diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/MANIFEST.in b/sdk/datalake/azure-mgmt-datalake-analytics/MANIFEST.in index 1c9ebaab0de4..c0cf549166ee 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/MANIFEST.in +++ b/sdk/datalake/azure-mgmt-datalake-analytics/MANIFEST.in @@ -1,4 +1,6 @@ +recursive-include tests *.py *.yaml include *.rst include azure/__init__.py include azure/mgmt/__init__.py include azure/mgmt/datalake/__init__.py + diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/README.rst b/sdk/datalake/azure-mgmt-datalake-analytics/README.rst index 756ded7f07a9..fb18ab8d347d 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/README.rst +++ b/sdk/datalake/azure-mgmt-datalake-analytics/README.rst @@ -6,7 +6,7 @@ This is the Microsoft Azure Data Lake Analytics Management Client Library. Azure Resource Manager (ARM) is the next generation of management APIs that replace the old Azure Service Management (ASM). -This package has been tested with Python 2.7, 3.4, 3.5, 3.6 and 3.7. +This package has been tested with Python 2.7, 3.5, 3.6 and 3.7. For the older Azure Service Management (ASM) libraries, see `azure-servicemanagement-legacy `__ library. @@ -14,25 +14,6 @@ For the older Azure Service Management (ASM) libraries, see For a more complete set of Azure libraries, see the `azure `__ bundle package. -Compatibility -============= - -**IMPORTANT**: If you have an earlier version of the azure package -(version < 1.0), you should uninstall it before installing this package. - -You can check the version using pip: - -.. code:: shell - - pip freeze - -If you see azure==0.11.0 (or any version below 1.0), uninstall it first: - -.. code:: shell - - pip uninstall azure - - Usage ===== diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/__init__.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/__init__.py index 39a62e8061a0..df6262e338cf 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/__init__.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/__init__.py @@ -9,10 +9,11 @@ # regenerated. # -------------------------------------------------------------------------- -from .data_lake_analytics_catalog_management_client import DataLakeAnalyticsCatalogManagementClient -from .version import VERSION +from ._configuration import DataLakeAnalyticsCatalogManagementClientConfiguration +from ._data_lake_analytics_catalog_management_client import DataLakeAnalyticsCatalogManagementClient +__all__ = ['DataLakeAnalyticsCatalogManagementClient', 'DataLakeAnalyticsCatalogManagementClientConfiguration'] -__all__ = ['DataLakeAnalyticsCatalogManagementClient'] +from .version import VERSION __version__ = VERSION diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/_configuration.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/_configuration.py new file mode 100644 index 000000000000..6a5e01b0c34a --- /dev/null +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/_configuration.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +from msrestazure import AzureConfiguration + +from .version import VERSION + + +class DataLakeAnalyticsCatalogManagementClientConfiguration(AzureConfiguration): + """Configuration for DataLakeAnalyticsCatalogManagementClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param adla_catalog_dns_suffix: Gets the DNS suffix used as the base for + all Azure Data Lake Analytics Catalog service requests. + :type adla_catalog_dns_suffix: str + """ + + def __init__( + self, credentials, adla_catalog_dns_suffix): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if adla_catalog_dns_suffix is None: + raise ValueError("Parameter 'adla_catalog_dns_suffix' must not be None.") + base_url = 'https://{accountName}.{adlaCatalogDnsSuffix}' + + super(DataLakeAnalyticsCatalogManagementClientConfiguration, self).__init__(base_url) + + # Starting Autorest.Python 4.0.64, make connection pool activated by default + self.keep_alive = True + + self.add_user_agent('azure-mgmt-datalake-analytics/{}'.format(VERSION)) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.adla_catalog_dns_suffix = adla_catalog_dns_suffix diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/data_lake_analytics_catalog_management_client.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/_data_lake_analytics_catalog_management_client.py similarity index 57% rename from sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/data_lake_analytics_catalog_management_client.py rename to sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/_data_lake_analytics_catalog_management_client.py index 626d61e9c2f4..c0433f2b3a17 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/data_lake_analytics_catalog_management_client.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/_data_lake_analytics_catalog_management_client.py @@ -11,41 +11,10 @@ from msrest.service_client import SDKClient from msrest import Serializer, Deserializer -from msrestazure import AzureConfiguration -from .version import VERSION -from .operations.catalog_operations import CatalogOperations -from . import models - - -class DataLakeAnalyticsCatalogManagementClientConfiguration(AzureConfiguration): - """Configuration for DataLakeAnalyticsCatalogManagementClient - Note that all parameters used to create this instance are saved as instance - attributes. - - :param credentials: Credentials needed for the client to connect to Azure. - :type credentials: :mod:`A msrestazure Credentials - object` - :param adla_catalog_dns_suffix: Gets the DNS suffix used as the base for - all Azure Data Lake Analytics Catalog service requests. - :type adla_catalog_dns_suffix: str - """ - - def __init__( - self, credentials, adla_catalog_dns_suffix): - if credentials is None: - raise ValueError("Parameter 'credentials' must not be None.") - if adla_catalog_dns_suffix is None: - raise ValueError("Parameter 'adla_catalog_dns_suffix' must not be None.") - base_url = 'https://{accountName}.{adlaCatalogDnsSuffix}' - - super(DataLakeAnalyticsCatalogManagementClientConfiguration, self).__init__(base_url) - - self.add_user_agent('azure-mgmt-datalake-analytics/{}'.format(VERSION)) - self.add_user_agent('Azure-SDK-For-Python') - - self.credentials = credentials - self.adla_catalog_dns_suffix = adla_catalog_dns_suffix +from ._configuration import DataLakeAnalyticsCatalogManagementClientConfiguration +from .operations import CatalogOperations +from . import models class DataLakeAnalyticsCatalogManagementClient(SDKClient): diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/__init__.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/__init__.py index 031c65b77ffa..2e06a035b7ea 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/__init__.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/__init__.py @@ -10,141 +10,141 @@ # -------------------------------------------------------------------------- try: - from .acl_create_or_update_parameters_py3 import AclCreateOrUpdateParameters - from .acl_delete_parameters_py3 import AclDeleteParameters - from .acl_py3 import Acl - from .data_lake_analytics_catalog_secret_create_or_update_parameters_py3 import DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters - from .data_lake_analytics_catalog_credential_create_parameters_py3 import DataLakeAnalyticsCatalogCredentialCreateParameters - from .data_lake_analytics_catalog_credential_delete_parameters_py3 import DataLakeAnalyticsCatalogCredentialDeleteParameters - from .data_lake_analytics_catalog_credential_update_parameters_py3 import DataLakeAnalyticsCatalogCredentialUpdateParameters - from .usql_secret_py3 import USqlSecret - from .usql_external_data_source_py3 import USqlExternalDataSource - from .usql_credential_py3 import USqlCredential - from .usql_procedure_py3 import USqlProcedure - from .usql_table_column_py3 import USqlTableColumn - from .usql_directed_column_py3 import USqlDirectedColumn - from .usql_distribution_info_py3 import USqlDistributionInfo - from .usql_index_py3 import USqlIndex - from .ddl_name_py3 import DdlName - from .entity_id_py3 import EntityId - from .external_table_py3 import ExternalTable - from .type_field_info_py3 import TypeFieldInfo - from .usql_table_preview_py3 import USqlTablePreview - from .usql_table_py3 import USqlTable - from .usql_table_fragment_py3 import USqlTableFragment - from .usql_table_type_py3 import USqlTableType - from .usql_view_py3 import USqlView - from .usql_package_py3 import USqlPackage - from .usql_table_partition_py3 import USqlTablePartition - from .usql_table_statistics_py3 import USqlTableStatistics - from .usql_type_py3 import USqlType - from .usql_table_valued_function_py3 import USqlTableValuedFunction - from .usql_assembly_file_info_py3 import USqlAssemblyFileInfo - from .usql_assembly_dependency_info_py3 import USqlAssemblyDependencyInfo - from .usql_assembly_py3 import USqlAssembly - from .usql_assembly_clr_py3 import USqlAssemblyClr - from .usql_schema_py3 import USqlSchema - from .usql_database_py3 import USqlDatabase - from .catalog_item_py3 import CatalogItem - from .catalog_item_list_py3 import CatalogItemList + from ._models_py3 import Acl + from ._models_py3 import AclCreateOrUpdateParameters + from ._models_py3 import AclDeleteParameters + from ._models_py3 import CatalogItem + from ._models_py3 import CatalogItemList + from ._models_py3 import DataLakeAnalyticsCatalogCredentialCreateParameters + from ._models_py3 import DataLakeAnalyticsCatalogCredentialDeleteParameters + from ._models_py3 import DataLakeAnalyticsCatalogCredentialUpdateParameters + from ._models_py3 import DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters + from ._models_py3 import DdlName + from ._models_py3 import EntityId + from ._models_py3 import ExternalTable + from ._models_py3 import TypeFieldInfo + from ._models_py3 import USqlAssembly + from ._models_py3 import USqlAssemblyClr + from ._models_py3 import USqlAssemblyDependencyInfo + from ._models_py3 import USqlAssemblyFileInfo + from ._models_py3 import USqlCredential + from ._models_py3 import USqlDatabase + from ._models_py3 import USqlDirectedColumn + from ._models_py3 import USqlDistributionInfo + from ._models_py3 import USqlExternalDataSource + from ._models_py3 import USqlIndex + from ._models_py3 import USqlPackage + from ._models_py3 import USqlProcedure + from ._models_py3 import USqlSchema + from ._models_py3 import USqlSecret + from ._models_py3 import USqlTable + from ._models_py3 import USqlTableColumn + from ._models_py3 import USqlTableFragment + from ._models_py3 import USqlTablePartition + from ._models_py3 import USqlTablePreview + from ._models_py3 import USqlTableStatistics + from ._models_py3 import USqlTableType + from ._models_py3 import USqlTableValuedFunction + from ._models_py3 import USqlType + from ._models_py3 import USqlView except (SyntaxError, ImportError): - from .acl_create_or_update_parameters import AclCreateOrUpdateParameters - from .acl_delete_parameters import AclDeleteParameters - from .acl import Acl - from .data_lake_analytics_catalog_secret_create_or_update_parameters import DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters - from .data_lake_analytics_catalog_credential_create_parameters import DataLakeAnalyticsCatalogCredentialCreateParameters - from .data_lake_analytics_catalog_credential_delete_parameters import DataLakeAnalyticsCatalogCredentialDeleteParameters - from .data_lake_analytics_catalog_credential_update_parameters import DataLakeAnalyticsCatalogCredentialUpdateParameters - from .usql_secret import USqlSecret - from .usql_external_data_source import USqlExternalDataSource - from .usql_credential import USqlCredential - from .usql_procedure import USqlProcedure - from .usql_table_column import USqlTableColumn - from .usql_directed_column import USqlDirectedColumn - from .usql_distribution_info import USqlDistributionInfo - from .usql_index import USqlIndex - from .ddl_name import DdlName - from .entity_id import EntityId - from .external_table import ExternalTable - from .type_field_info import TypeFieldInfo - from .usql_table_preview import USqlTablePreview - from .usql_table import USqlTable - from .usql_table_fragment import USqlTableFragment - from .usql_table_type import USqlTableType - from .usql_view import USqlView - from .usql_package import USqlPackage - from .usql_table_partition import USqlTablePartition - from .usql_table_statistics import USqlTableStatistics - from .usql_type import USqlType - from .usql_table_valued_function import USqlTableValuedFunction - from .usql_assembly_file_info import USqlAssemblyFileInfo - from .usql_assembly_dependency_info import USqlAssemblyDependencyInfo - from .usql_assembly import USqlAssembly - from .usql_assembly_clr import USqlAssemblyClr - from .usql_schema import USqlSchema - from .usql_database import USqlDatabase - from .catalog_item import CatalogItem - from .catalog_item_list import CatalogItemList -from .usql_credential_paged import USqlCredentialPaged -from .usql_external_data_source_paged import USqlExternalDataSourcePaged -from .usql_procedure_paged import USqlProcedurePaged -from .usql_table_fragment_paged import USqlTableFragmentPaged -from .usql_table_paged import USqlTablePaged -from .usql_table_statistics_paged import USqlTableStatisticsPaged -from .usql_table_type_paged import USqlTableTypePaged -from .usql_package_paged import USqlPackagePaged -from .usql_view_paged import USqlViewPaged -from .usql_table_partition_paged import USqlTablePartitionPaged -from .usql_type_paged import USqlTypePaged -from .usql_table_valued_function_paged import USqlTableValuedFunctionPaged -from .usql_assembly_clr_paged import USqlAssemblyClrPaged -from .usql_schema_paged import USqlSchemaPaged -from .acl_paged import AclPaged -from .usql_database_paged import USqlDatabasePaged -from .data_lake_analytics_catalog_management_client_enums import ( + from ._models import Acl + from ._models import AclCreateOrUpdateParameters + from ._models import AclDeleteParameters + from ._models import CatalogItem + from ._models import CatalogItemList + from ._models import DataLakeAnalyticsCatalogCredentialCreateParameters + from ._models import DataLakeAnalyticsCatalogCredentialDeleteParameters + from ._models import DataLakeAnalyticsCatalogCredentialUpdateParameters + from ._models import DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters + from ._models import DdlName + from ._models import EntityId + from ._models import ExternalTable + from ._models import TypeFieldInfo + from ._models import USqlAssembly + from ._models import USqlAssemblyClr + from ._models import USqlAssemblyDependencyInfo + from ._models import USqlAssemblyFileInfo + from ._models import USqlCredential + from ._models import USqlDatabase + from ._models import USqlDirectedColumn + from ._models import USqlDistributionInfo + from ._models import USqlExternalDataSource + from ._models import USqlIndex + from ._models import USqlPackage + from ._models import USqlProcedure + from ._models import USqlSchema + from ._models import USqlSecret + from ._models import USqlTable + from ._models import USqlTableColumn + from ._models import USqlTableFragment + from ._models import USqlTablePartition + from ._models import USqlTablePreview + from ._models import USqlTableStatistics + from ._models import USqlTableType + from ._models import USqlTableValuedFunction + from ._models import USqlType + from ._models import USqlView +from ._paged_models import AclPaged +from ._paged_models import USqlAssemblyClrPaged +from ._paged_models import USqlCredentialPaged +from ._paged_models import USqlDatabasePaged +from ._paged_models import USqlExternalDataSourcePaged +from ._paged_models import USqlPackagePaged +from ._paged_models import USqlProcedurePaged +from ._paged_models import USqlSchemaPaged +from ._paged_models import USqlTableFragmentPaged +from ._paged_models import USqlTablePaged +from ._paged_models import USqlTablePartitionPaged +from ._paged_models import USqlTableStatisticsPaged +from ._paged_models import USqlTableTypePaged +from ._paged_models import USqlTableValuedFunctionPaged +from ._paged_models import USqlTypePaged +from ._paged_models import USqlViewPaged +from ._data_lake_analytics_catalog_management_client_enums import ( AclType, PermissionType, FileType, ) __all__ = [ + 'Acl', 'AclCreateOrUpdateParameters', 'AclDeleteParameters', - 'Acl', - 'DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters', + 'CatalogItem', + 'CatalogItemList', 'DataLakeAnalyticsCatalogCredentialCreateParameters', 'DataLakeAnalyticsCatalogCredentialDeleteParameters', 'DataLakeAnalyticsCatalogCredentialUpdateParameters', - 'USqlSecret', - 'USqlExternalDataSource', - 'USqlCredential', - 'USqlProcedure', - 'USqlTableColumn', - 'USqlDirectedColumn', - 'USqlDistributionInfo', - 'USqlIndex', + 'DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters', 'DdlName', 'EntityId', 'ExternalTable', 'TypeFieldInfo', - 'USqlTablePreview', + 'USqlAssembly', + 'USqlAssemblyClr', + 'USqlAssemblyDependencyInfo', + 'USqlAssemblyFileInfo', + 'USqlCredential', + 'USqlDatabase', + 'USqlDirectedColumn', + 'USqlDistributionInfo', + 'USqlExternalDataSource', + 'USqlIndex', + 'USqlPackage', + 'USqlProcedure', + 'USqlSchema', + 'USqlSecret', 'USqlTable', + 'USqlTableColumn', 'USqlTableFragment', - 'USqlTableType', - 'USqlView', - 'USqlPackage', 'USqlTablePartition', + 'USqlTablePreview', 'USqlTableStatistics', - 'USqlType', + 'USqlTableType', 'USqlTableValuedFunction', - 'USqlAssemblyFileInfo', - 'USqlAssemblyDependencyInfo', - 'USqlAssembly', - 'USqlAssemblyClr', - 'USqlSchema', - 'USqlDatabase', - 'CatalogItem', - 'CatalogItemList', + 'USqlType', + 'USqlView', 'USqlCredentialPaged', 'USqlExternalDataSourcePaged', 'USqlProcedurePaged', diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_management_client_enums.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_data_lake_analytics_catalog_management_client_enums.py similarity index 100% rename from sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_management_client_enums.py rename to sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_data_lake_analytics_catalog_management_client_enums.py diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_models.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_models.py new file mode 100644 index 000000000000..c12e9df50c7c --- /dev/null +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_models.py @@ -0,0 +1,1334 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Acl(Model): + """A Data Lake Analytics catalog access control list (ACL) entry. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar ace_type: the access control list (ACL) entry type. UserObj and + GroupObj denote the owning user and group, respectively. Possible values + include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + :vartype ace_type: str or + ~azure.mgmt.datalake.analytics.catalog.models.AclType + :ivar principal_id: the Azure AD object ID of the user or group being + specified in the access control list (ACL) entry. + :vartype principal_id: str + :ivar permission: the permission type of the access control list (ACL) + entry. Possible values include: 'None', 'Use', 'Create', 'Drop', 'Alter', + 'Write', 'All' + :vartype permission: str or + ~azure.mgmt.datalake.analytics.catalog.models.PermissionType + """ + + _validation = { + 'ace_type': {'readonly': True}, + 'principal_id': {'readonly': True}, + 'permission': {'readonly': True}, + } + + _attribute_map = { + 'ace_type': {'key': 'aceType', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + 'permission': {'key': 'permission', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Acl, self).__init__(**kwargs) + self.ace_type = None + self.principal_id = None + self.permission = None + + +class AclCreateOrUpdateParameters(Model): + """The parameters used to create or update an access control list (ACL) entry. + + All required parameters must be populated in order to send to Azure. + + :param ace_type: Required. the access control list (ACL) entry type. + UserObj and GroupObj denote the owning user and group, respectively. + Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + :type ace_type: str or + ~azure.mgmt.datalake.analytics.catalog.models.AclType + :param principal_id: Required. the Azure AD object ID of the user or group + being specified in the access control list (ACL) entry. + :type principal_id: str + :param permission: Required. the permission type of the access control + list (ACL) entry. Possible values include: 'None', 'Use', 'Create', + 'Drop', 'Alter', 'Write', 'All' + :type permission: str or + ~azure.mgmt.datalake.analytics.catalog.models.PermissionType + """ + + _validation = { + 'ace_type': {'required': True}, + 'principal_id': {'required': True}, + 'permission': {'required': True}, + } + + _attribute_map = { + 'ace_type': {'key': 'aceType', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + 'permission': {'key': 'permission', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AclCreateOrUpdateParameters, self).__init__(**kwargs) + self.ace_type = kwargs.get('ace_type', None) + self.principal_id = kwargs.get('principal_id', None) + self.permission = kwargs.get('permission', None) + + +class AclDeleteParameters(Model): + """The parameters used to delete an access control list (ACL) entry. + + All required parameters must be populated in order to send to Azure. + + :param ace_type: Required. the access control list (ACL) entry type. + UserObj and GroupObj denote the owning user and group, respectively. + Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + :type ace_type: str or + ~azure.mgmt.datalake.analytics.catalog.models.AclType + :param principal_id: Required. the Azure AD object ID of the user or group + being specified in the access control list (ACL) entry. + :type principal_id: str + """ + + _validation = { + 'ace_type': {'required': True}, + 'principal_id': {'required': True}, + } + + _attribute_map = { + 'ace_type': {'key': 'aceType', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AclDeleteParameters, self).__init__(**kwargs) + self.ace_type = kwargs.get('ace_type', None) + self.principal_id = kwargs.get('principal_id', None) + + +class CatalogItem(Model): + """A Data Lake Analytics catalog item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CatalogItem, self).__init__(**kwargs) + self.compute_account_name = kwargs.get('compute_account_name', None) + self.version = kwargs.get('version', None) + + +class CatalogItemList(Model): + """A Data Lake Analytics catalog item list. + + :param next_link: the link to the next page of results. + :type next_link: str + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CatalogItemList, self).__init__(**kwargs) + self.next_link = kwargs.get('next_link', None) + + +class CloudError(Model): + """CloudError. + """ + + _attribute_map = { + } + + +class DataLakeAnalyticsCatalogCredentialCreateParameters(Model): + """Data Lake Analytics catalog credential creation parameters. + + All required parameters must be populated in order to send to Azure. + + :param password: Required. the password for the credential and user with + access to the data source. + :type password: str + :param uri: Required. the URI identifier for the data source this + credential can connect to in the format : + :type uri: str + :param user_id: Required. the object identifier for the user associated + with this credential with access to the data source. + :type user_id: str + """ + + _validation = { + 'password': {'required': True}, + 'uri': {'required': True}, + 'user_id': {'required': True}, + } + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'user_id': {'key': 'userId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(DataLakeAnalyticsCatalogCredentialCreateParameters, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.uri = kwargs.get('uri', None) + self.user_id = kwargs.get('user_id', None) + + +class DataLakeAnalyticsCatalogCredentialDeleteParameters(Model): + """Data Lake Analytics catalog credential deletion parameters. + + :param password: the current password for the credential and user with + access to the data source. This is required if the requester is not the + account owner. + :type password: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(DataLakeAnalyticsCatalogCredentialDeleteParameters, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + + +class DataLakeAnalyticsCatalogCredentialUpdateParameters(Model): + """Data Lake Analytics catalog credential update parameters. + + :param password: the current password for the credential and user with + access to the data source. This is required if the requester is not the + account owner. + :type password: str + :param new_password: the new password for the credential and user with + access to the data source. + :type new_password: str + :param uri: the URI identifier for the data source this credential can + connect to in the format : + :type uri: str + :param user_id: the object identifier for the user associated with this + credential with access to the data source. + :type user_id: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'new_password': {'key': 'newPassword', 'type': 'str'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'user_id': {'key': 'userId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(DataLakeAnalyticsCatalogCredentialUpdateParameters, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.new_password = kwargs.get('new_password', None) + self.uri = kwargs.get('uri', None) + self.user_id = kwargs.get('user_id', None) + + +class DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(Model): + """Data Lake Analytics catalog secret creation and update parameters. This is + deprecated and will be removed in the next release. Please use + DataLakeAnalyticsCatalogCredentialCreateOrUpdateParameters instead. + + All required parameters must be populated in order to send to Azure. + + :param password: Required. the password for the secret to pass in + :type password: str + :param uri: the URI identifier for the secret in the format + : + :type uri: str + """ + + _validation = { + 'password': {'required': True}, + } + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'uri': {'key': 'uri', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters, self).__init__(**kwargs) + self.password = kwargs.get('password', None) + self.uri = kwargs.get('uri', None) + + +class DdlName(Model): + """A Data Lake Analytics DDL name item. + + :param first_part: the name of the table associated with this database and + schema. + :type first_part: str + :param second_part: the name of the table associated with this database + and schema. + :type second_part: str + :param third_part: the name of the table associated with this database and + schema. + :type third_part: str + :param server: the name of the table associated with this database and + schema. + :type server: str + """ + + _attribute_map = { + 'first_part': {'key': 'firstPart', 'type': 'str'}, + 'second_part': {'key': 'secondPart', 'type': 'str'}, + 'third_part': {'key': 'thirdPart', 'type': 'str'}, + 'server': {'key': 'server', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(DdlName, self).__init__(**kwargs) + self.first_part = kwargs.get('first_part', None) + self.second_part = kwargs.get('second_part', None) + self.third_part = kwargs.get('third_part', None) + self.server = kwargs.get('server', None) + + +class EntityId(Model): + """A Data Lake Analytics catalog entity identifier object. + + :param name: the name of the external table associated with this database, + schema and table. + :type name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName + :param version: the version of the external data source. + :type version: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'DdlName'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EntityId, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.version = kwargs.get('version', None) + + +class ExternalTable(Model): + """A Data Lake Analytics catalog external table item. + + :param table_name: the name of the table associated with this database and + schema. + :type table_name: str + :param data_source: the data source associated with this external table. + :type data_source: ~azure.mgmt.datalake.analytics.catalog.models.EntityId + """ + + _attribute_map = { + 'table_name': {'key': 'tableName', 'type': 'str'}, + 'data_source': {'key': 'dataSource', 'type': 'EntityId'}, + } + + def __init__(self, **kwargs): + super(ExternalTable, self).__init__(**kwargs) + self.table_name = kwargs.get('table_name', None) + self.data_source = kwargs.get('data_source', None) + + +class TypeFieldInfo(Model): + """A Data Lake Analytics catalog type field information item. + + :param name: the name of the field associated with this type. + :type name: str + :param type: the type of the field. + :type type: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(TypeFieldInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) + + +class USqlAssembly(CatalogItem): + """A Data Lake Analytics catalog U-SQL Assembly. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the assembly. + :type name: str + :param clr_name: the name of the CLR. + :type clr_name: str + :param is_visible: the switch indicating if this assembly is visible or + not. + :type is_visible: bool + :param is_user_defined: the switch indicating if this assembly is user + defined or not. + :type is_user_defined: bool + :param files: the list of files associated with the assembly + :type files: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyFileInfo] + :param dependencies: the list of dependencies associated with the assembly + :type dependencies: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyDependencyInfo] + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'assemblyName', 'type': 'str'}, + 'clr_name': {'key': 'clrName', 'type': 'str'}, + 'is_visible': {'key': 'isVisible', 'type': 'bool'}, + 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, + 'files': {'key': 'files', 'type': '[USqlAssemblyFileInfo]'}, + 'dependencies': {'key': 'dependencies', 'type': '[USqlAssemblyDependencyInfo]'}, + } + + def __init__(self, **kwargs): + super(USqlAssembly, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) + self.clr_name = kwargs.get('clr_name', None) + self.is_visible = kwargs.get('is_visible', None) + self.is_user_defined = kwargs.get('is_user_defined', None) + self.files = kwargs.get('files', None) + self.dependencies = kwargs.get('dependencies', None) + + +class USqlAssemblyClr(CatalogItem): + """A Data Lake Analytics catalog U-SQL assembly CLR item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the assembly. + :type name: str + :param clr_name: the name of the CLR. + :type clr_name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'assemblyClrName', 'type': 'str'}, + 'clr_name': {'key': 'clrName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlAssemblyClr, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) + self.clr_name = kwargs.get('clr_name', None) + + +class USqlAssemblyDependencyInfo(Model): + """A Data Lake Analytics catalog U-SQL dependency information item. + + :param entity_id: the EntityId of the dependency. + :type entity_id: ~azure.mgmt.datalake.analytics.catalog.models.EntityId + """ + + _attribute_map = { + 'entity_id': {'key': 'entityId', 'type': 'EntityId'}, + } + + def __init__(self, **kwargs): + super(USqlAssemblyDependencyInfo, self).__init__(**kwargs) + self.entity_id = kwargs.get('entity_id', None) + + +class USqlAssemblyFileInfo(Model): + """A Data Lake Analytics catalog U-SQL assembly file information item. + + :param type: the assembly file type. Possible values include: 'Assembly', + 'Resource', 'Nodeploy' + :type type: str or ~azure.mgmt.datalake.analytics.catalog.models.FileType + :param original_path: The original path to the assembly file. + :type original_path: str + :param content_path: The content path to the assembly file. + :type content_path: str + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'original_path': {'key': 'originalPath', 'type': 'str'}, + 'content_path': {'key': 'contentPath', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlAssemblyFileInfo, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.original_path = kwargs.get('original_path', None) + self.content_path = kwargs.get('content_path', None) + + +class USqlCredential(CatalogItem): + """A Data Lake Analytics catalog U-SQL credential item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param name: the name of the credential. + :type name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'name': {'key': 'credentialName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlCredential, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + + +class USqlDatabase(CatalogItem): + """A Data Lake Analytics catalog U-SQL database item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param name: the name of the database. + :type name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'name': {'key': 'databaseName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlDatabase, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + + +class USqlDirectedColumn(Model): + """A Data Lake Analytics catalog U-SQL directed column item. + + :param name: the name of the index in the table. + :type name: str + :param descending: the switch indicating if the index is descending or + not. + :type descending: bool + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'descending': {'key': 'descending', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(USqlDirectedColumn, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.descending = kwargs.get('descending', None) + + +class USqlDistributionInfo(Model): + """A Data Lake Analytics catalog U-SQL distribution information object. + + :param type: the type of this distribution. + :type type: int + :param keys: the list of directed columns in the distribution + :type keys: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] + :param count: the count of indices using this distribution. + :type count: int + :param dynamic_count: the dynamic count of indices using this + distribution. + :type dynamic_count: int + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'int'}, + 'keys': {'key': 'keys', 'type': '[USqlDirectedColumn]'}, + 'count': {'key': 'count', 'type': 'int'}, + 'dynamic_count': {'key': 'dynamicCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(USqlDistributionInfo, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.keys = kwargs.get('keys', None) + self.count = kwargs.get('count', None) + self.dynamic_count = kwargs.get('dynamic_count', None) + + +class USqlExternalDataSource(CatalogItem): + """A Data Lake Analytics catalog U-SQL external datasource item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the external data source. + :type name: str + :param provider: the name of the provider for the external data source. + :type provider: str + :param provider_string: the name of the provider string for the external + data source. + :type provider_string: str + :param pushdown_types: the list of types to push down from the external + data source. + :type pushdown_types: list[str] + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'externalDataSourceName', 'type': 'str'}, + 'provider': {'key': 'provider', 'type': 'str'}, + 'provider_string': {'key': 'providerString', 'type': 'str'}, + 'pushdown_types': {'key': 'pushdownTypes', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(USqlExternalDataSource, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) + self.provider = kwargs.get('provider', None) + self.provider_string = kwargs.get('provider_string', None) + self.pushdown_types = kwargs.get('pushdown_types', None) + + +class USqlIndex(Model): + """A Data Lake Analytics catalog U-SQL table index item. + + :param name: the name of the index in the table. + :type name: str + :param index_keys: the list of directed columns in the index + :type index_keys: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] + :param columns: the list of columns in the index + :type columns: list[str] + :param distribution_info: the distributions info of the index + :type distribution_info: + ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo + :param partition_function: partition function ID for the index. + :type partition_function: str + :param partition_key_list: the list of partition keys in the index + :type partition_key_list: list[str] + :param stream_names: the list of full paths to the streams that contain + this index in the DataLake account. + :type stream_names: list[str] + :param is_columnstore: the switch indicating if this index is a + columnstore index. + :type is_columnstore: bool + :param index_id: the ID of this index within the table. + :type index_id: int + :param is_unique: the switch indicating if this index is a unique index. + :type is_unique: bool + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'index_keys': {'key': 'indexKeys', 'type': '[USqlDirectedColumn]'}, + 'columns': {'key': 'columns', 'type': '[str]'}, + 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, + 'partition_function': {'key': 'partitionFunction', 'type': 'str'}, + 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, + 'stream_names': {'key': 'streamNames', 'type': '[str]'}, + 'is_columnstore': {'key': 'isColumnstore', 'type': 'bool'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'is_unique': {'key': 'isUnique', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(USqlIndex, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.index_keys = kwargs.get('index_keys', None) + self.columns = kwargs.get('columns', None) + self.distribution_info = kwargs.get('distribution_info', None) + self.partition_function = kwargs.get('partition_function', None) + self.partition_key_list = kwargs.get('partition_key_list', None) + self.stream_names = kwargs.get('stream_names', None) + self.is_columnstore = kwargs.get('is_columnstore', None) + self.index_id = kwargs.get('index_id', None) + self.is_unique = kwargs.get('is_unique', None) + + +class USqlPackage(CatalogItem): + """A Data Lake Analytics catalog U-SQL package item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database containing the package. + :type database_name: str + :param schema_name: the name of the schema associated with this package + and database. + :type schema_name: str + :param name: the name of the package. + :type name: str + :param definition: the definition of the package. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'packageName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlPackage, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.definition = kwargs.get('definition', None) + + +class USqlProcedure(CatalogItem): + """A Data Lake Analytics catalog U-SQL procedure item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this procedure + and database. + :type schema_name: str + :param name: the name of the procedure. + :type name: str + :param definition: the defined query of the procedure. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'procName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlProcedure, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.definition = kwargs.get('definition', None) + + +class USqlSchema(CatalogItem): + """A Data Lake Analytics catalog U-SQL schema item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the schema. + :type name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'schemaName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlSchema, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) + + +class USqlSecret(CatalogItem): + """A Data Lake Analytics catalog U-SQL secret item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the secret. + :type name: str + :param creation_time: the creation time of the credential object. This is + the only information returned about a secret from a GET. + :type creation_time: datetime + :param uri: the URI identifier for the secret in the format + : + :type uri: str + :param password: the password for the secret to pass in + :type password: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'secretName', 'type': 'str'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlSecret, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.name = kwargs.get('name', None) + self.creation_time = kwargs.get('creation_time', None) + self.uri = kwargs.get('uri', None) + self.password = kwargs.get('password', None) + + +class USqlTable(CatalogItem): + """A Data Lake Analytics catalog U-SQL table item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param name: the name of the table. + :type name: str + :param column_list: the list of columns in this table + :type column_list: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] + :param index_list: the list of indices in this table + :type index_list: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlIndex] + :param partition_key_list: the list of partition keys in the table + :type partition_key_list: list[str] + :param external_table: the external table associated with the table. + :type external_table: + ~azure.mgmt.datalake.analytics.catalog.models.ExternalTable + :param distribution_info: the distributions info of the table + :type distribution_info: + ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'tableName', 'type': 'str'}, + 'column_list': {'key': 'columnList', 'type': '[USqlTableColumn]'}, + 'index_list': {'key': 'indexList', 'type': '[USqlIndex]'}, + 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, + 'external_table': {'key': 'externalTable', 'type': 'ExternalTable'}, + 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, + } + + def __init__(self, **kwargs): + super(USqlTable, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.column_list = kwargs.get('column_list', None) + self.index_list = kwargs.get('index_list', None) + self.partition_key_list = kwargs.get('partition_key_list', None) + self.external_table = kwargs.get('external_table', None) + self.distribution_info = kwargs.get('distribution_info', None) + + +class USqlTableColumn(Model): + """A Data Lake Analytics catalog U-SQL table column item. + + :param name: the name of the column in the table. + :type name: str + :param type: the object type of the specified column (such as + System.String). + :type type: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlTableColumn, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) + + +class USqlTableFragment(Model): + """A Data Lake Analytics catalog U-SQL table fragment item. + + :param parent_id: the parent object Id of the table fragment. The parent + could be a table or table partition. + :type parent_id: str + :param fragment_id: the version of the catalog item. + :type fragment_id: str + :param index_id: the ordinal of the index which contains the table + fragment. + :type index_id: int + :param size: the data size of the table fragment in bytes. + :type size: long + :param row_count: the number of rows in the table fragment. + :type row_count: long + :param create_date: the creation time of the table fragment. + :type create_date: datetime + :param stream_path: the relative path for the table fragment location. + :type stream_path: str + """ + + _attribute_map = { + 'parent_id': {'key': 'parentId', 'type': 'str'}, + 'fragment_id': {'key': 'fragmentId', 'type': 'str'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'size': {'key': 'size', 'type': 'long'}, + 'row_count': {'key': 'rowCount', 'type': 'long'}, + 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, + 'stream_path': {'key': 'streamPath', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlTableFragment, self).__init__(**kwargs) + self.parent_id = kwargs.get('parent_id', None) + self.fragment_id = kwargs.get('fragment_id', None) + self.index_id = kwargs.get('index_id', None) + self.size = kwargs.get('size', None) + self.row_count = kwargs.get('row_count', None) + self.create_date = kwargs.get('create_date', None) + self.stream_path = kwargs.get('stream_path', None) + + +class USqlTablePartition(CatalogItem): + """A Data Lake Analytics catalog U-SQL table partition item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table + partition and database. + :type schema_name: str + :param name: the name of the table partition. + :type name: str + :param parent_name: the Ddl object of the partition's parent. + :type parent_name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName + :param index_id: the index ID for this partition. + :type index_id: int + :param label: the list of labels associated with this partition. + :type label: list[str] + :param create_date: the creation time of the partition + :type create_date: datetime + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'partitionName', 'type': 'str'}, + 'parent_name': {'key': 'parentName', 'type': 'DdlName'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'label': {'key': 'label', 'type': '[str]'}, + 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(USqlTablePartition, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.parent_name = kwargs.get('parent_name', None) + self.index_id = kwargs.get('index_id', None) + self.label = kwargs.get('label', None) + self.create_date = kwargs.get('create_date', None) + + +class USqlTablePreview(Model): + """A Data Lake Analytics catalog table or partition preview rows item. + + :param total_row_count: the total number of rows in the table or + partition. + :type total_row_count: long + :param total_column_count: the total number of columns in the table or + partition. + :type total_column_count: long + :param rows: the rows of the table or partition preview, where each row is + an array of string representations the row's values. Note: Byte arrays + will appear as base-64 encoded values, SqlMap and SqlArray objects will + appear as escaped JSON objects, and DateTime objects will appear as ISO + formatted UTC date-times. + :type rows: list[list[str]] + :param truncated: true if the amount of data in the response is less than + expected due to the preview operation's size limitations. This can occur + if the requested rows or row counts are too large. + :type truncated: bool + :param schema: the schema of the table or partition. + :type schema: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] + """ + + _attribute_map = { + 'total_row_count': {'key': 'totalRowCount', 'type': 'long'}, + 'total_column_count': {'key': 'totalColumnCount', 'type': 'long'}, + 'rows': {'key': 'rows', 'type': '[[str]]'}, + 'truncated': {'key': 'truncated', 'type': 'bool'}, + 'schema': {'key': 'schema', 'type': '[USqlTableColumn]'}, + } + + def __init__(self, **kwargs): + super(USqlTablePreview, self).__init__(**kwargs) + self.total_row_count = kwargs.get('total_row_count', None) + self.total_column_count = kwargs.get('total_column_count', None) + self.rows = kwargs.get('rows', None) + self.truncated = kwargs.get('truncated', None) + self.schema = kwargs.get('schema', None) + + +class USqlTableStatistics(CatalogItem): + """A Data Lake Analytics catalog U-SQL table statistics item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param table_name: the name of the table. + :type table_name: str + :param name: the name of the table statistics. + :type name: str + :param user_stat_name: the name of the user statistics. + :type user_stat_name: str + :param stat_data_path: the path to the statistics data. + :type stat_data_path: str + :param create_time: the creation time of the statistics. + :type create_time: datetime + :param update_time: the last time the statistics were updated. + :type update_time: datetime + :param is_user_created: the switch indicating if these statistics are user + created. + :type is_user_created: bool + :param is_auto_created: the switch indicating if these statistics are + automatically created. + :type is_auto_created: bool + :param has_filter: the switch indicating if these statistics have a + filter. + :type has_filter: bool + :param filter_definition: the filter definition for the statistics. + :type filter_definition: str + :param col_names: the list of column names associated with these + statistics. + :type col_names: list[str] + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'table_name': {'key': 'tableName', 'type': 'str'}, + 'name': {'key': 'statisticsName', 'type': 'str'}, + 'user_stat_name': {'key': 'userStatName', 'type': 'str'}, + 'stat_data_path': {'key': 'statDataPath', 'type': 'str'}, + 'create_time': {'key': 'createTime', 'type': 'iso-8601'}, + 'update_time': {'key': 'updateTime', 'type': 'iso-8601'}, + 'is_user_created': {'key': 'isUserCreated', 'type': 'bool'}, + 'is_auto_created': {'key': 'isAutoCreated', 'type': 'bool'}, + 'has_filter': {'key': 'hasFilter', 'type': 'bool'}, + 'filter_definition': {'key': 'filterDefinition', 'type': 'str'}, + 'col_names': {'key': 'colNames', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(USqlTableStatistics, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.table_name = kwargs.get('table_name', None) + self.name = kwargs.get('name', None) + self.user_stat_name = kwargs.get('user_stat_name', None) + self.stat_data_path = kwargs.get('stat_data_path', None) + self.create_time = kwargs.get('create_time', None) + self.update_time = kwargs.get('update_time', None) + self.is_user_created = kwargs.get('is_user_created', None) + self.is_auto_created = kwargs.get('is_auto_created', None) + self.has_filter = kwargs.get('has_filter', None) + self.filter_definition = kwargs.get('filter_definition', None) + self.col_names = kwargs.get('col_names', None) + + +class USqlType(CatalogItem): + """A Data Lake Analytics catalog U-SQL type item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param name: the name of type for this type. + :type name: str + :param type_family: the type family for this type. + :type type_family: str + :param c_sharp_name: the C# name for this type. + :type c_sharp_name: str + :param full_csharp_name: the fully qualified C# name for this type. + :type full_csharp_name: str + :param system_type_id: the system type ID for this type. + :type system_type_id: int + :param user_type_id: the user type ID for this type. + :type user_type_id: int + :param schema_id: the schema ID for this type. + :type schema_id: int + :param principal_id: the principal ID for this type. + :type principal_id: int + :param is_nullable: The switch indicating if this type is nullable. + :type is_nullable: bool + :param is_user_defined: The switch indicating if this type is user + defined. + :type is_user_defined: bool + :param is_assembly_type: The switch indicating if this type is an assembly + type. + :type is_assembly_type: bool + :param is_table_type: The switch indicating if this type is a table type. + :type is_table_type: bool + :param is_complex_type: The switch indicating if this type is a complex + type. + :type is_complex_type: bool + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'typeName', 'type': 'str'}, + 'type_family': {'key': 'typeFamily', 'type': 'str'}, + 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, + 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, + 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, + 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, + 'schema_id': {'key': 'schemaId', 'type': 'int'}, + 'principal_id': {'key': 'principalId', 'type': 'int'}, + 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, + 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, + 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, + 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, + 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(USqlType, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.type_family = kwargs.get('type_family', None) + self.c_sharp_name = kwargs.get('c_sharp_name', None) + self.full_csharp_name = kwargs.get('full_csharp_name', None) + self.system_type_id = kwargs.get('system_type_id', None) + self.user_type_id = kwargs.get('user_type_id', None) + self.schema_id = kwargs.get('schema_id', None) + self.principal_id = kwargs.get('principal_id', None) + self.is_nullable = kwargs.get('is_nullable', None) + self.is_user_defined = kwargs.get('is_user_defined', None) + self.is_assembly_type = kwargs.get('is_assembly_type', None) + self.is_table_type = kwargs.get('is_table_type', None) + self.is_complex_type = kwargs.get('is_complex_type', None) + + +class USqlTableType(USqlType): + """A Data Lake Analytics catalog U-SQL table type item. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param name: the name of type for this type. + :type name: str + :param type_family: the type family for this type. + :type type_family: str + :param c_sharp_name: the C# name for this type. + :type c_sharp_name: str + :param full_csharp_name: the fully qualified C# name for this type. + :type full_csharp_name: str + :param system_type_id: the system type ID for this type. + :type system_type_id: int + :param user_type_id: the user type ID for this type. + :type user_type_id: int + :param schema_id: the schema ID for this type. + :type schema_id: int + :param principal_id: the principal ID for this type. + :type principal_id: int + :param is_nullable: The switch indicating if this type is nullable. + :type is_nullable: bool + :param is_user_defined: The switch indicating if this type is user + defined. + :type is_user_defined: bool + :param is_assembly_type: The switch indicating if this type is an assembly + type. + :type is_assembly_type: bool + :param is_table_type: The switch indicating if this type is a table type. + :type is_table_type: bool + :param is_complex_type: The switch indicating if this type is a complex + type. + :type is_complex_type: bool + :ivar columns: the type field information associated with this table type. + :vartype columns: + list[~azure.mgmt.datalake.analytics.catalog.models.TypeFieldInfo] + """ + + _validation = { + 'columns': {'readonly': True}, + } + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'typeName', 'type': 'str'}, + 'type_family': {'key': 'typeFamily', 'type': 'str'}, + 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, + 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, + 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, + 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, + 'schema_id': {'key': 'schemaId', 'type': 'int'}, + 'principal_id': {'key': 'principalId', 'type': 'int'}, + 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, + 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, + 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, + 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, + 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, + 'columns': {'key': 'columns', 'type': '[TypeFieldInfo]'}, + } + + def __init__(self, **kwargs): + super(USqlTableType, self).__init__(**kwargs) + self.columns = None + + +class USqlTableValuedFunction(CatalogItem): + """A Data Lake Analytics catalog U-SQL table valued function item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this database. + :type schema_name: str + :param name: the name of the table valued function. + :type name: str + :param definition: the definition of the table valued function. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'tvfName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlTableValuedFunction, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.definition = kwargs.get('definition', None) + + +class USqlView(CatalogItem): + """A Data Lake Analytics catalog U-SQL view item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this view and + database. + :type schema_name: str + :param name: the name of the view. + :type name: str + :param definition: the defined query of the view. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'viewName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(USqlView, self).__init__(**kwargs) + self.database_name = kwargs.get('database_name', None) + self.schema_name = kwargs.get('schema_name', None) + self.name = kwargs.get('name', None) + self.definition = kwargs.get('definition', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_models_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_models_py3.py new file mode 100644 index 000000000000..4066cfbc7f08 --- /dev/null +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_models_py3.py @@ -0,0 +1,1334 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Acl(Model): + """A Data Lake Analytics catalog access control list (ACL) entry. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar ace_type: the access control list (ACL) entry type. UserObj and + GroupObj denote the owning user and group, respectively. Possible values + include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + :vartype ace_type: str or + ~azure.mgmt.datalake.analytics.catalog.models.AclType + :ivar principal_id: the Azure AD object ID of the user or group being + specified in the access control list (ACL) entry. + :vartype principal_id: str + :ivar permission: the permission type of the access control list (ACL) + entry. Possible values include: 'None', 'Use', 'Create', 'Drop', 'Alter', + 'Write', 'All' + :vartype permission: str or + ~azure.mgmt.datalake.analytics.catalog.models.PermissionType + """ + + _validation = { + 'ace_type': {'readonly': True}, + 'principal_id': {'readonly': True}, + 'permission': {'readonly': True}, + } + + _attribute_map = { + 'ace_type': {'key': 'aceType', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + 'permission': {'key': 'permission', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(Acl, self).__init__(**kwargs) + self.ace_type = None + self.principal_id = None + self.permission = None + + +class AclCreateOrUpdateParameters(Model): + """The parameters used to create or update an access control list (ACL) entry. + + All required parameters must be populated in order to send to Azure. + + :param ace_type: Required. the access control list (ACL) entry type. + UserObj and GroupObj denote the owning user and group, respectively. + Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + :type ace_type: str or + ~azure.mgmt.datalake.analytics.catalog.models.AclType + :param principal_id: Required. the Azure AD object ID of the user or group + being specified in the access control list (ACL) entry. + :type principal_id: str + :param permission: Required. the permission type of the access control + list (ACL) entry. Possible values include: 'None', 'Use', 'Create', + 'Drop', 'Alter', 'Write', 'All' + :type permission: str or + ~azure.mgmt.datalake.analytics.catalog.models.PermissionType + """ + + _validation = { + 'ace_type': {'required': True}, + 'principal_id': {'required': True}, + 'permission': {'required': True}, + } + + _attribute_map = { + 'ace_type': {'key': 'aceType', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + 'permission': {'key': 'permission', 'type': 'str'}, + } + + def __init__(self, *, ace_type, principal_id: str, permission, **kwargs) -> None: + super(AclCreateOrUpdateParameters, self).__init__(**kwargs) + self.ace_type = ace_type + self.principal_id = principal_id + self.permission = permission + + +class AclDeleteParameters(Model): + """The parameters used to delete an access control list (ACL) entry. + + All required parameters must be populated in order to send to Azure. + + :param ace_type: Required. the access control list (ACL) entry type. + UserObj and GroupObj denote the owning user and group, respectively. + Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' + :type ace_type: str or + ~azure.mgmt.datalake.analytics.catalog.models.AclType + :param principal_id: Required. the Azure AD object ID of the user or group + being specified in the access control list (ACL) entry. + :type principal_id: str + """ + + _validation = { + 'ace_type': {'required': True}, + 'principal_id': {'required': True}, + } + + _attribute_map = { + 'ace_type': {'key': 'aceType', 'type': 'str'}, + 'principal_id': {'key': 'principalId', 'type': 'str'}, + } + + def __init__(self, *, ace_type, principal_id: str, **kwargs) -> None: + super(AclDeleteParameters, self).__init__(**kwargs) + self.ace_type = ace_type + self.principal_id = principal_id + + +class CatalogItem(Model): + """A Data Lake Analytics catalog item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, **kwargs) -> None: + super(CatalogItem, self).__init__(**kwargs) + self.compute_account_name = compute_account_name + self.version = version + + +class CatalogItemList(Model): + """A Data Lake Analytics catalog item list. + + :param next_link: the link to the next page of results. + :type next_link: str + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + } + + def __init__(self, *, next_link: str=None, **kwargs) -> None: + super(CatalogItemList, self).__init__(**kwargs) + self.next_link = next_link + + +class CloudError(Model): + """CloudError. + """ + + _attribute_map = { + } + + +class DataLakeAnalyticsCatalogCredentialCreateParameters(Model): + """Data Lake Analytics catalog credential creation parameters. + + All required parameters must be populated in order to send to Azure. + + :param password: Required. the password for the credential and user with + access to the data source. + :type password: str + :param uri: Required. the URI identifier for the data source this + credential can connect to in the format : + :type uri: str + :param user_id: Required. the object identifier for the user associated + with this credential with access to the data source. + :type user_id: str + """ + + _validation = { + 'password': {'required': True}, + 'uri': {'required': True}, + 'user_id': {'required': True}, + } + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'user_id': {'key': 'userId', 'type': 'str'}, + } + + def __init__(self, *, password: str, uri: str, user_id: str, **kwargs) -> None: + super(DataLakeAnalyticsCatalogCredentialCreateParameters, self).__init__(**kwargs) + self.password = password + self.uri = uri + self.user_id = user_id + + +class DataLakeAnalyticsCatalogCredentialDeleteParameters(Model): + """Data Lake Analytics catalog credential deletion parameters. + + :param password: the current password for the credential and user with + access to the data source. This is required if the requester is not the + account owner. + :type password: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, password: str=None, **kwargs) -> None: + super(DataLakeAnalyticsCatalogCredentialDeleteParameters, self).__init__(**kwargs) + self.password = password + + +class DataLakeAnalyticsCatalogCredentialUpdateParameters(Model): + """Data Lake Analytics catalog credential update parameters. + + :param password: the current password for the credential and user with + access to the data source. This is required if the requester is not the + account owner. + :type password: str + :param new_password: the new password for the credential and user with + access to the data source. + :type new_password: str + :param uri: the URI identifier for the data source this credential can + connect to in the format : + :type uri: str + :param user_id: the object identifier for the user associated with this + credential with access to the data source. + :type user_id: str + """ + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'new_password': {'key': 'newPassword', 'type': 'str'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'user_id': {'key': 'userId', 'type': 'str'}, + } + + def __init__(self, *, password: str=None, new_password: str=None, uri: str=None, user_id: str=None, **kwargs) -> None: + super(DataLakeAnalyticsCatalogCredentialUpdateParameters, self).__init__(**kwargs) + self.password = password + self.new_password = new_password + self.uri = uri + self.user_id = user_id + + +class DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(Model): + """Data Lake Analytics catalog secret creation and update parameters. This is + deprecated and will be removed in the next release. Please use + DataLakeAnalyticsCatalogCredentialCreateOrUpdateParameters instead. + + All required parameters must be populated in order to send to Azure. + + :param password: Required. the password for the secret to pass in + :type password: str + :param uri: the URI identifier for the secret in the format + : + :type uri: str + """ + + _validation = { + 'password': {'required': True}, + } + + _attribute_map = { + 'password': {'key': 'password', 'type': 'str'}, + 'uri': {'key': 'uri', 'type': 'str'}, + } + + def __init__(self, *, password: str, uri: str=None, **kwargs) -> None: + super(DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters, self).__init__(**kwargs) + self.password = password + self.uri = uri + + +class DdlName(Model): + """A Data Lake Analytics DDL name item. + + :param first_part: the name of the table associated with this database and + schema. + :type first_part: str + :param second_part: the name of the table associated with this database + and schema. + :type second_part: str + :param third_part: the name of the table associated with this database and + schema. + :type third_part: str + :param server: the name of the table associated with this database and + schema. + :type server: str + """ + + _attribute_map = { + 'first_part': {'key': 'firstPart', 'type': 'str'}, + 'second_part': {'key': 'secondPart', 'type': 'str'}, + 'third_part': {'key': 'thirdPart', 'type': 'str'}, + 'server': {'key': 'server', 'type': 'str'}, + } + + def __init__(self, *, first_part: str=None, second_part: str=None, third_part: str=None, server: str=None, **kwargs) -> None: + super(DdlName, self).__init__(**kwargs) + self.first_part = first_part + self.second_part = second_part + self.third_part = third_part + self.server = server + + +class EntityId(Model): + """A Data Lake Analytics catalog entity identifier object. + + :param name: the name of the external table associated with this database, + schema and table. + :type name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName + :param version: the version of the external data source. + :type version: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'DdlName'}, + 'version': {'key': 'version', 'type': 'str'}, + } + + def __init__(self, *, name=None, version: str=None, **kwargs) -> None: + super(EntityId, self).__init__(**kwargs) + self.name = name + self.version = version + + +class ExternalTable(Model): + """A Data Lake Analytics catalog external table item. + + :param table_name: the name of the table associated with this database and + schema. + :type table_name: str + :param data_source: the data source associated with this external table. + :type data_source: ~azure.mgmt.datalake.analytics.catalog.models.EntityId + """ + + _attribute_map = { + 'table_name': {'key': 'tableName', 'type': 'str'}, + 'data_source': {'key': 'dataSource', 'type': 'EntityId'}, + } + + def __init__(self, *, table_name: str=None, data_source=None, **kwargs) -> None: + super(ExternalTable, self).__init__(**kwargs) + self.table_name = table_name + self.data_source = data_source + + +class TypeFieldInfo(Model): + """A Data Lake Analytics catalog type field information item. + + :param name: the name of the field associated with this type. + :type name: str + :param type: the type of the field. + :type type: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None: + super(TypeFieldInfo, self).__init__(**kwargs) + self.name = name + self.type = type + + +class USqlAssembly(CatalogItem): + """A Data Lake Analytics catalog U-SQL Assembly. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the assembly. + :type name: str + :param clr_name: the name of the CLR. + :type clr_name: str + :param is_visible: the switch indicating if this assembly is visible or + not. + :type is_visible: bool + :param is_user_defined: the switch indicating if this assembly is user + defined or not. + :type is_user_defined: bool + :param files: the list of files associated with the assembly + :type files: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyFileInfo] + :param dependencies: the list of dependencies associated with the assembly + :type dependencies: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyDependencyInfo] + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'assemblyName', 'type': 'str'}, + 'clr_name': {'key': 'clrName', 'type': 'str'}, + 'is_visible': {'key': 'isVisible', 'type': 'bool'}, + 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, + 'files': {'key': 'files', 'type': '[USqlAssemblyFileInfo]'}, + 'dependencies': {'key': 'dependencies', 'type': '[USqlAssemblyDependencyInfo]'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, clr_name: str=None, is_visible: bool=None, is_user_defined: bool=None, files=None, dependencies=None, **kwargs) -> None: + super(USqlAssembly, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name + self.clr_name = clr_name + self.is_visible = is_visible + self.is_user_defined = is_user_defined + self.files = files + self.dependencies = dependencies + + +class USqlAssemblyClr(CatalogItem): + """A Data Lake Analytics catalog U-SQL assembly CLR item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the assembly. + :type name: str + :param clr_name: the name of the CLR. + :type clr_name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'assemblyClrName', 'type': 'str'}, + 'clr_name': {'key': 'clrName', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, clr_name: str=None, **kwargs) -> None: + super(USqlAssemblyClr, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name + self.clr_name = clr_name + + +class USqlAssemblyDependencyInfo(Model): + """A Data Lake Analytics catalog U-SQL dependency information item. + + :param entity_id: the EntityId of the dependency. + :type entity_id: ~azure.mgmt.datalake.analytics.catalog.models.EntityId + """ + + _attribute_map = { + 'entity_id': {'key': 'entityId', 'type': 'EntityId'}, + } + + def __init__(self, *, entity_id=None, **kwargs) -> None: + super(USqlAssemblyDependencyInfo, self).__init__(**kwargs) + self.entity_id = entity_id + + +class USqlAssemblyFileInfo(Model): + """A Data Lake Analytics catalog U-SQL assembly file information item. + + :param type: the assembly file type. Possible values include: 'Assembly', + 'Resource', 'Nodeploy' + :type type: str or ~azure.mgmt.datalake.analytics.catalog.models.FileType + :param original_path: The original path to the assembly file. + :type original_path: str + :param content_path: The content path to the assembly file. + :type content_path: str + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'original_path': {'key': 'originalPath', 'type': 'str'}, + 'content_path': {'key': 'contentPath', 'type': 'str'}, + } + + def __init__(self, *, type=None, original_path: str=None, content_path: str=None, **kwargs) -> None: + super(USqlAssemblyFileInfo, self).__init__(**kwargs) + self.type = type + self.original_path = original_path + self.content_path = content_path + + +class USqlCredential(CatalogItem): + """A Data Lake Analytics catalog U-SQL credential item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param name: the name of the credential. + :type name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'name': {'key': 'credentialName', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, name: str=None, **kwargs) -> None: + super(USqlCredential, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.name = name + + +class USqlDatabase(CatalogItem): + """A Data Lake Analytics catalog U-SQL database item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param name: the name of the database. + :type name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'name': {'key': 'databaseName', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, name: str=None, **kwargs) -> None: + super(USqlDatabase, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.name = name + + +class USqlDirectedColumn(Model): + """A Data Lake Analytics catalog U-SQL directed column item. + + :param name: the name of the index in the table. + :type name: str + :param descending: the switch indicating if the index is descending or + not. + :type descending: bool + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'descending': {'key': 'descending', 'type': 'bool'}, + } + + def __init__(self, *, name: str=None, descending: bool=None, **kwargs) -> None: + super(USqlDirectedColumn, self).__init__(**kwargs) + self.name = name + self.descending = descending + + +class USqlDistributionInfo(Model): + """A Data Lake Analytics catalog U-SQL distribution information object. + + :param type: the type of this distribution. + :type type: int + :param keys: the list of directed columns in the distribution + :type keys: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] + :param count: the count of indices using this distribution. + :type count: int + :param dynamic_count: the dynamic count of indices using this + distribution. + :type dynamic_count: int + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'int'}, + 'keys': {'key': 'keys', 'type': '[USqlDirectedColumn]'}, + 'count': {'key': 'count', 'type': 'int'}, + 'dynamic_count': {'key': 'dynamicCount', 'type': 'int'}, + } + + def __init__(self, *, type: int=None, keys=None, count: int=None, dynamic_count: int=None, **kwargs) -> None: + super(USqlDistributionInfo, self).__init__(**kwargs) + self.type = type + self.keys = keys + self.count = count + self.dynamic_count = dynamic_count + + +class USqlExternalDataSource(CatalogItem): + """A Data Lake Analytics catalog U-SQL external datasource item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the external data source. + :type name: str + :param provider: the name of the provider for the external data source. + :type provider: str + :param provider_string: the name of the provider string for the external + data source. + :type provider_string: str + :param pushdown_types: the list of types to push down from the external + data source. + :type pushdown_types: list[str] + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'externalDataSourceName', 'type': 'str'}, + 'provider': {'key': 'provider', 'type': 'str'}, + 'provider_string': {'key': 'providerString', 'type': 'str'}, + 'pushdown_types': {'key': 'pushdownTypes', 'type': '[str]'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, provider: str=None, provider_string: str=None, pushdown_types=None, **kwargs) -> None: + super(USqlExternalDataSource, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name + self.provider = provider + self.provider_string = provider_string + self.pushdown_types = pushdown_types + + +class USqlIndex(Model): + """A Data Lake Analytics catalog U-SQL table index item. + + :param name: the name of the index in the table. + :type name: str + :param index_keys: the list of directed columns in the index + :type index_keys: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] + :param columns: the list of columns in the index + :type columns: list[str] + :param distribution_info: the distributions info of the index + :type distribution_info: + ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo + :param partition_function: partition function ID for the index. + :type partition_function: str + :param partition_key_list: the list of partition keys in the index + :type partition_key_list: list[str] + :param stream_names: the list of full paths to the streams that contain + this index in the DataLake account. + :type stream_names: list[str] + :param is_columnstore: the switch indicating if this index is a + columnstore index. + :type is_columnstore: bool + :param index_id: the ID of this index within the table. + :type index_id: int + :param is_unique: the switch indicating if this index is a unique index. + :type is_unique: bool + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'index_keys': {'key': 'indexKeys', 'type': '[USqlDirectedColumn]'}, + 'columns': {'key': 'columns', 'type': '[str]'}, + 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, + 'partition_function': {'key': 'partitionFunction', 'type': 'str'}, + 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, + 'stream_names': {'key': 'streamNames', 'type': '[str]'}, + 'is_columnstore': {'key': 'isColumnstore', 'type': 'bool'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'is_unique': {'key': 'isUnique', 'type': 'bool'}, + } + + def __init__(self, *, name: str=None, index_keys=None, columns=None, distribution_info=None, partition_function: str=None, partition_key_list=None, stream_names=None, is_columnstore: bool=None, index_id: int=None, is_unique: bool=None, **kwargs) -> None: + super(USqlIndex, self).__init__(**kwargs) + self.name = name + self.index_keys = index_keys + self.columns = columns + self.distribution_info = distribution_info + self.partition_function = partition_function + self.partition_key_list = partition_key_list + self.stream_names = stream_names + self.is_columnstore = is_columnstore + self.index_id = index_id + self.is_unique = is_unique + + +class USqlPackage(CatalogItem): + """A Data Lake Analytics catalog U-SQL package item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database containing the package. + :type database_name: str + :param schema_name: the name of the schema associated with this package + and database. + :type schema_name: str + :param name: the name of the package. + :type name: str + :param definition: the definition of the package. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'packageName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: + super(USqlPackage, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.definition = definition + + +class USqlProcedure(CatalogItem): + """A Data Lake Analytics catalog U-SQL procedure item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this procedure + and database. + :type schema_name: str + :param name: the name of the procedure. + :type name: str + :param definition: the defined query of the procedure. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'procName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: + super(USqlProcedure, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.definition = definition + + +class USqlSchema(CatalogItem): + """A Data Lake Analytics catalog U-SQL schema item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the schema. + :type name: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'schemaName', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, **kwargs) -> None: + super(USqlSchema, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name + + +class USqlSecret(CatalogItem): + """A Data Lake Analytics catalog U-SQL secret item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param name: the name of the secret. + :type name: str + :param creation_time: the creation time of the credential object. This is + the only information returned about a secret from a GET. + :type creation_time: datetime + :param uri: the URI identifier for the secret in the format + : + :type uri: str + :param password: the password for the secret to pass in + :type password: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'name': {'key': 'secretName', 'type': 'str'}, + 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, + 'uri': {'key': 'uri', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, creation_time=None, uri: str=None, password: str=None, **kwargs) -> None: + super(USqlSecret, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.name = name + self.creation_time = creation_time + self.uri = uri + self.password = password + + +class USqlTable(CatalogItem): + """A Data Lake Analytics catalog U-SQL table item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param name: the name of the table. + :type name: str + :param column_list: the list of columns in this table + :type column_list: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] + :param index_list: the list of indices in this table + :type index_list: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlIndex] + :param partition_key_list: the list of partition keys in the table + :type partition_key_list: list[str] + :param external_table: the external table associated with the table. + :type external_table: + ~azure.mgmt.datalake.analytics.catalog.models.ExternalTable + :param distribution_info: the distributions info of the table + :type distribution_info: + ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'tableName', 'type': 'str'}, + 'column_list': {'key': 'columnList', 'type': '[USqlTableColumn]'}, + 'index_list': {'key': 'indexList', 'type': '[USqlIndex]'}, + 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, + 'external_table': {'key': 'externalTable', 'type': 'ExternalTable'}, + 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, column_list=None, index_list=None, partition_key_list=None, external_table=None, distribution_info=None, **kwargs) -> None: + super(USqlTable, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.column_list = column_list + self.index_list = index_list + self.partition_key_list = partition_key_list + self.external_table = external_table + self.distribution_info = distribution_info + + +class USqlTableColumn(Model): + """A Data Lake Analytics catalog U-SQL table column item. + + :param name: the name of the column in the table. + :type name: str + :param type: the object type of the specified column (such as + System.String). + :type type: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None: + super(USqlTableColumn, self).__init__(**kwargs) + self.name = name + self.type = type + + +class USqlTableFragment(Model): + """A Data Lake Analytics catalog U-SQL table fragment item. + + :param parent_id: the parent object Id of the table fragment. The parent + could be a table or table partition. + :type parent_id: str + :param fragment_id: the version of the catalog item. + :type fragment_id: str + :param index_id: the ordinal of the index which contains the table + fragment. + :type index_id: int + :param size: the data size of the table fragment in bytes. + :type size: long + :param row_count: the number of rows in the table fragment. + :type row_count: long + :param create_date: the creation time of the table fragment. + :type create_date: datetime + :param stream_path: the relative path for the table fragment location. + :type stream_path: str + """ + + _attribute_map = { + 'parent_id': {'key': 'parentId', 'type': 'str'}, + 'fragment_id': {'key': 'fragmentId', 'type': 'str'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'size': {'key': 'size', 'type': 'long'}, + 'row_count': {'key': 'rowCount', 'type': 'long'}, + 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, + 'stream_path': {'key': 'streamPath', 'type': 'str'}, + } + + def __init__(self, *, parent_id: str=None, fragment_id: str=None, index_id: int=None, size: int=None, row_count: int=None, create_date=None, stream_path: str=None, **kwargs) -> None: + super(USqlTableFragment, self).__init__(**kwargs) + self.parent_id = parent_id + self.fragment_id = fragment_id + self.index_id = index_id + self.size = size + self.row_count = row_count + self.create_date = create_date + self.stream_path = stream_path + + +class USqlTablePartition(CatalogItem): + """A Data Lake Analytics catalog U-SQL table partition item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table + partition and database. + :type schema_name: str + :param name: the name of the table partition. + :type name: str + :param parent_name: the Ddl object of the partition's parent. + :type parent_name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName + :param index_id: the index ID for this partition. + :type index_id: int + :param label: the list of labels associated with this partition. + :type label: list[str] + :param create_date: the creation time of the partition + :type create_date: datetime + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'partitionName', 'type': 'str'}, + 'parent_name': {'key': 'parentName', 'type': 'DdlName'}, + 'index_id': {'key': 'indexId', 'type': 'int'}, + 'label': {'key': 'label', 'type': '[str]'}, + 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, parent_name=None, index_id: int=None, label=None, create_date=None, **kwargs) -> None: + super(USqlTablePartition, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.parent_name = parent_name + self.index_id = index_id + self.label = label + self.create_date = create_date + + +class USqlTablePreview(Model): + """A Data Lake Analytics catalog table or partition preview rows item. + + :param total_row_count: the total number of rows in the table or + partition. + :type total_row_count: long + :param total_column_count: the total number of columns in the table or + partition. + :type total_column_count: long + :param rows: the rows of the table or partition preview, where each row is + an array of string representations the row's values. Note: Byte arrays + will appear as base-64 encoded values, SqlMap and SqlArray objects will + appear as escaped JSON objects, and DateTime objects will appear as ISO + formatted UTC date-times. + :type rows: list[list[str]] + :param truncated: true if the amount of data in the response is less than + expected due to the preview operation's size limitations. This can occur + if the requested rows or row counts are too large. + :type truncated: bool + :param schema: the schema of the table or partition. + :type schema: + list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] + """ + + _attribute_map = { + 'total_row_count': {'key': 'totalRowCount', 'type': 'long'}, + 'total_column_count': {'key': 'totalColumnCount', 'type': 'long'}, + 'rows': {'key': 'rows', 'type': '[[str]]'}, + 'truncated': {'key': 'truncated', 'type': 'bool'}, + 'schema': {'key': 'schema', 'type': '[USqlTableColumn]'}, + } + + def __init__(self, *, total_row_count: int=None, total_column_count: int=None, rows=None, truncated: bool=None, schema=None, **kwargs) -> None: + super(USqlTablePreview, self).__init__(**kwargs) + self.total_row_count = total_row_count + self.total_column_count = total_column_count + self.rows = rows + self.truncated = truncated + self.schema = schema + + +class USqlTableStatistics(CatalogItem): + """A Data Lake Analytics catalog U-SQL table statistics item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param table_name: the name of the table. + :type table_name: str + :param name: the name of the table statistics. + :type name: str + :param user_stat_name: the name of the user statistics. + :type user_stat_name: str + :param stat_data_path: the path to the statistics data. + :type stat_data_path: str + :param create_time: the creation time of the statistics. + :type create_time: datetime + :param update_time: the last time the statistics were updated. + :type update_time: datetime + :param is_user_created: the switch indicating if these statistics are user + created. + :type is_user_created: bool + :param is_auto_created: the switch indicating if these statistics are + automatically created. + :type is_auto_created: bool + :param has_filter: the switch indicating if these statistics have a + filter. + :type has_filter: bool + :param filter_definition: the filter definition for the statistics. + :type filter_definition: str + :param col_names: the list of column names associated with these + statistics. + :type col_names: list[str] + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'table_name': {'key': 'tableName', 'type': 'str'}, + 'name': {'key': 'statisticsName', 'type': 'str'}, + 'user_stat_name': {'key': 'userStatName', 'type': 'str'}, + 'stat_data_path': {'key': 'statDataPath', 'type': 'str'}, + 'create_time': {'key': 'createTime', 'type': 'iso-8601'}, + 'update_time': {'key': 'updateTime', 'type': 'iso-8601'}, + 'is_user_created': {'key': 'isUserCreated', 'type': 'bool'}, + 'is_auto_created': {'key': 'isAutoCreated', 'type': 'bool'}, + 'has_filter': {'key': 'hasFilter', 'type': 'bool'}, + 'filter_definition': {'key': 'filterDefinition', 'type': 'str'}, + 'col_names': {'key': 'colNames', 'type': '[str]'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, table_name: str=None, name: str=None, user_stat_name: str=None, stat_data_path: str=None, create_time=None, update_time=None, is_user_created: bool=None, is_auto_created: bool=None, has_filter: bool=None, filter_definition: str=None, col_names=None, **kwargs) -> None: + super(USqlTableStatistics, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.table_name = table_name + self.name = name + self.user_stat_name = user_stat_name + self.stat_data_path = stat_data_path + self.create_time = create_time + self.update_time = update_time + self.is_user_created = is_user_created + self.is_auto_created = is_auto_created + self.has_filter = has_filter + self.filter_definition = filter_definition + self.col_names = col_names + + +class USqlType(CatalogItem): + """A Data Lake Analytics catalog U-SQL type item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param name: the name of type for this type. + :type name: str + :param type_family: the type family for this type. + :type type_family: str + :param c_sharp_name: the C# name for this type. + :type c_sharp_name: str + :param full_csharp_name: the fully qualified C# name for this type. + :type full_csharp_name: str + :param system_type_id: the system type ID for this type. + :type system_type_id: int + :param user_type_id: the user type ID for this type. + :type user_type_id: int + :param schema_id: the schema ID for this type. + :type schema_id: int + :param principal_id: the principal ID for this type. + :type principal_id: int + :param is_nullable: The switch indicating if this type is nullable. + :type is_nullable: bool + :param is_user_defined: The switch indicating if this type is user + defined. + :type is_user_defined: bool + :param is_assembly_type: The switch indicating if this type is an assembly + type. + :type is_assembly_type: bool + :param is_table_type: The switch indicating if this type is a table type. + :type is_table_type: bool + :param is_complex_type: The switch indicating if this type is a complex + type. + :type is_complex_type: bool + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'typeName', 'type': 'str'}, + 'type_family': {'key': 'typeFamily', 'type': 'str'}, + 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, + 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, + 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, + 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, + 'schema_id': {'key': 'schemaId', 'type': 'int'}, + 'principal_id': {'key': 'principalId', 'type': 'int'}, + 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, + 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, + 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, + 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, + 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, type_family: str=None, c_sharp_name: str=None, full_csharp_name: str=None, system_type_id: int=None, user_type_id: int=None, schema_id: int=None, principal_id: int=None, is_nullable: bool=None, is_user_defined: bool=None, is_assembly_type: bool=None, is_table_type: bool=None, is_complex_type: bool=None, **kwargs) -> None: + super(USqlType, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.type_family = type_family + self.c_sharp_name = c_sharp_name + self.full_csharp_name = full_csharp_name + self.system_type_id = system_type_id + self.user_type_id = user_type_id + self.schema_id = schema_id + self.principal_id = principal_id + self.is_nullable = is_nullable + self.is_user_defined = is_user_defined + self.is_assembly_type = is_assembly_type + self.is_table_type = is_table_type + self.is_complex_type = is_complex_type + + +class USqlTableType(USqlType): + """A Data Lake Analytics catalog U-SQL table type item. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this table and + database. + :type schema_name: str + :param name: the name of type for this type. + :type name: str + :param type_family: the type family for this type. + :type type_family: str + :param c_sharp_name: the C# name for this type. + :type c_sharp_name: str + :param full_csharp_name: the fully qualified C# name for this type. + :type full_csharp_name: str + :param system_type_id: the system type ID for this type. + :type system_type_id: int + :param user_type_id: the user type ID for this type. + :type user_type_id: int + :param schema_id: the schema ID for this type. + :type schema_id: int + :param principal_id: the principal ID for this type. + :type principal_id: int + :param is_nullable: The switch indicating if this type is nullable. + :type is_nullable: bool + :param is_user_defined: The switch indicating if this type is user + defined. + :type is_user_defined: bool + :param is_assembly_type: The switch indicating if this type is an assembly + type. + :type is_assembly_type: bool + :param is_table_type: The switch indicating if this type is a table type. + :type is_table_type: bool + :param is_complex_type: The switch indicating if this type is a complex + type. + :type is_complex_type: bool + :ivar columns: the type field information associated with this table type. + :vartype columns: + list[~azure.mgmt.datalake.analytics.catalog.models.TypeFieldInfo] + """ + + _validation = { + 'columns': {'readonly': True}, + } + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'typeName', 'type': 'str'}, + 'type_family': {'key': 'typeFamily', 'type': 'str'}, + 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, + 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, + 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, + 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, + 'schema_id': {'key': 'schemaId', 'type': 'int'}, + 'principal_id': {'key': 'principalId', 'type': 'int'}, + 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, + 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, + 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, + 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, + 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, + 'columns': {'key': 'columns', 'type': '[TypeFieldInfo]'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, type_family: str=None, c_sharp_name: str=None, full_csharp_name: str=None, system_type_id: int=None, user_type_id: int=None, schema_id: int=None, principal_id: int=None, is_nullable: bool=None, is_user_defined: bool=None, is_assembly_type: bool=None, is_table_type: bool=None, is_complex_type: bool=None, **kwargs) -> None: + super(USqlTableType, self).__init__(compute_account_name=compute_account_name, version=version, database_name=database_name, schema_name=schema_name, name=name, type_family=type_family, c_sharp_name=c_sharp_name, full_csharp_name=full_csharp_name, system_type_id=system_type_id, user_type_id=user_type_id, schema_id=schema_id, principal_id=principal_id, is_nullable=is_nullable, is_user_defined=is_user_defined, is_assembly_type=is_assembly_type, is_table_type=is_table_type, is_complex_type=is_complex_type, **kwargs) + self.columns = None + + +class USqlTableValuedFunction(CatalogItem): + """A Data Lake Analytics catalog U-SQL table valued function item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this database. + :type schema_name: str + :param name: the name of the table valued function. + :type name: str + :param definition: the definition of the table valued function. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'tvfName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: + super(USqlTableValuedFunction, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.definition = definition + + +class USqlView(CatalogItem): + """A Data Lake Analytics catalog U-SQL view item. + + :param compute_account_name: the name of the Data Lake Analytics account. + :type compute_account_name: str + :param version: the version of the catalog item. + :type version: str + :param database_name: the name of the database. + :type database_name: str + :param schema_name: the name of the schema associated with this view and + database. + :type schema_name: str + :param name: the name of the view. + :type name: str + :param definition: the defined query of the view. + :type definition: str + """ + + _attribute_map = { + 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, + 'version': {'key': 'version', 'type': 'str'}, + 'database_name': {'key': 'databaseName', 'type': 'str'}, + 'schema_name': {'key': 'schemaName', 'type': 'str'}, + 'name': {'key': 'viewName', 'type': 'str'}, + 'definition': {'key': 'definition', 'type': 'str'}, + } + + def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: + super(USqlView, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) + self.database_name = database_name + self.schema_name = schema_name + self.name = name + self.definition = definition diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_paged_models.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_paged_models.py new file mode 100644 index 000000000000..012fddfbb5e9 --- /dev/null +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/_paged_models.py @@ -0,0 +1,222 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.paging import Paged + + +class USqlCredentialPaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlCredential ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlCredential]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlCredentialPaged, self).__init__(*args, **kwargs) +class USqlExternalDataSourcePaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlExternalDataSource ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlExternalDataSource]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlExternalDataSourcePaged, self).__init__(*args, **kwargs) +class USqlProcedurePaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlProcedure ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlProcedure]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlProcedurePaged, self).__init__(*args, **kwargs) +class USqlTableFragmentPaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlTableFragment ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlTableFragment]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlTableFragmentPaged, self).__init__(*args, **kwargs) +class USqlTablePaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlTable ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlTable]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlTablePaged, self).__init__(*args, **kwargs) +class USqlTableStatisticsPaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlTableStatistics ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlTableStatistics]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlTableStatisticsPaged, self).__init__(*args, **kwargs) +class USqlTableTypePaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlTableType ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlTableType]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlTableTypePaged, self).__init__(*args, **kwargs) +class USqlPackagePaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlPackage ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlPackage]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlPackagePaged, self).__init__(*args, **kwargs) +class USqlViewPaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlView ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlView]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlViewPaged, self).__init__(*args, **kwargs) +class USqlTablePartitionPaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlTablePartition ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlTablePartition]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlTablePartitionPaged, self).__init__(*args, **kwargs) +class USqlTypePaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlType ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlType]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlTypePaged, self).__init__(*args, **kwargs) +class USqlTableValuedFunctionPaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlTableValuedFunction ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlTableValuedFunction]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlTableValuedFunctionPaged, self).__init__(*args, **kwargs) +class USqlAssemblyClrPaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlAssemblyClr ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlAssemblyClr]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlAssemblyClrPaged, self).__init__(*args, **kwargs) +class USqlSchemaPaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlSchema ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlSchema]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlSchemaPaged, self).__init__(*args, **kwargs) +class AclPaged(Paged): + """ + A paging container for iterating over a list of :class:`Acl ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[Acl]'} + } + + def __init__(self, *args, **kwargs): + + super(AclPaged, self).__init__(*args, **kwargs) +class USqlDatabasePaged(Paged): + """ + A paging container for iterating over a list of :class:`USqlDatabase ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[USqlDatabase]'} + } + + def __init__(self, *args, **kwargs): + + super(USqlDatabasePaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl.py deleted file mode 100644 index c3a76d792cf7..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Acl(Model): - """A Data Lake Analytics catalog access control list (ACL) entry. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar ace_type: the access control list (ACL) entry type. UserObj and - GroupObj denote the owning user and group, respectively. Possible values - include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' - :vartype ace_type: str or - ~azure.mgmt.datalake.analytics.catalog.models.AclType - :ivar principal_id: the Azure AD object ID of the user or group being - specified in the access control list (ACL) entry. - :vartype principal_id: str - :ivar permission: the permission type of the access control list (ACL) - entry. Possible values include: 'None', 'Use', 'Create', 'Drop', 'Alter', - 'Write', 'All' - :vartype permission: str or - ~azure.mgmt.datalake.analytics.catalog.models.PermissionType - """ - - _validation = { - 'ace_type': {'readonly': True}, - 'principal_id': {'readonly': True}, - 'permission': {'readonly': True}, - } - - _attribute_map = { - 'ace_type': {'key': 'aceType', 'type': 'str'}, - 'principal_id': {'key': 'principalId', 'type': 'str'}, - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(Acl, self).__init__(**kwargs) - self.ace_type = None - self.principal_id = None - self.permission = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py deleted file mode 100644 index c91fabb4ca29..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class AclCreateOrUpdateParameters(Model): - """The parameters used to create or update an access control list (ACL) entry. - - All required parameters must be populated in order to send to Azure. - - :param ace_type: Required. the access control list (ACL) entry type. - UserObj and GroupObj denote the owning user and group, respectively. - Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' - :type ace_type: str or - ~azure.mgmt.datalake.analytics.catalog.models.AclType - :param principal_id: Required. the Azure AD object ID of the user or group - being specified in the access control list (ACL) entry. - :type principal_id: str - :param permission: Required. the permission type of the access control - list (ACL) entry. Possible values include: 'None', 'Use', 'Create', - 'Drop', 'Alter', 'Write', 'All' - :type permission: str or - ~azure.mgmt.datalake.analytics.catalog.models.PermissionType - """ - - _validation = { - 'ace_type': {'required': True}, - 'principal_id': {'required': True}, - 'permission': {'required': True}, - } - - _attribute_map = { - 'ace_type': {'key': 'aceType', 'type': 'str'}, - 'principal_id': {'key': 'principalId', 'type': 'str'}, - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(AclCreateOrUpdateParameters, self).__init__(**kwargs) - self.ace_type = kwargs.get('ace_type', None) - self.principal_id = kwargs.get('principal_id', None) - self.permission = kwargs.get('permission', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters_py3.py deleted file mode 100644 index a5abdbc2687e..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_create_or_update_parameters_py3.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class AclCreateOrUpdateParameters(Model): - """The parameters used to create or update an access control list (ACL) entry. - - All required parameters must be populated in order to send to Azure. - - :param ace_type: Required. the access control list (ACL) entry type. - UserObj and GroupObj denote the owning user and group, respectively. - Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' - :type ace_type: str or - ~azure.mgmt.datalake.analytics.catalog.models.AclType - :param principal_id: Required. the Azure AD object ID of the user or group - being specified in the access control list (ACL) entry. - :type principal_id: str - :param permission: Required. the permission type of the access control - list (ACL) entry. Possible values include: 'None', 'Use', 'Create', - 'Drop', 'Alter', 'Write', 'All' - :type permission: str or - ~azure.mgmt.datalake.analytics.catalog.models.PermissionType - """ - - _validation = { - 'ace_type': {'required': True}, - 'principal_id': {'required': True}, - 'permission': {'required': True}, - } - - _attribute_map = { - 'ace_type': {'key': 'aceType', 'type': 'str'}, - 'principal_id': {'key': 'principalId', 'type': 'str'}, - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__(self, *, ace_type, principal_id: str, permission, **kwargs) -> None: - super(AclCreateOrUpdateParameters, self).__init__(**kwargs) - self.ace_type = ace_type - self.principal_id = principal_id - self.permission = permission diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters.py deleted file mode 100644 index fdfb3317dd09..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class AclDeleteParameters(Model): - """The parameters used to delete an access control list (ACL) entry. - - All required parameters must be populated in order to send to Azure. - - :param ace_type: Required. the access control list (ACL) entry type. - UserObj and GroupObj denote the owning user and group, respectively. - Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' - :type ace_type: str or - ~azure.mgmt.datalake.analytics.catalog.models.AclType - :param principal_id: Required. the Azure AD object ID of the user or group - being specified in the access control list (ACL) entry. - :type principal_id: str - """ - - _validation = { - 'ace_type': {'required': True}, - 'principal_id': {'required': True}, - } - - _attribute_map = { - 'ace_type': {'key': 'aceType', 'type': 'str'}, - 'principal_id': {'key': 'principalId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(AclDeleteParameters, self).__init__(**kwargs) - self.ace_type = kwargs.get('ace_type', None) - self.principal_id = kwargs.get('principal_id', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters_py3.py deleted file mode 100644 index 29cb4179acc2..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_delete_parameters_py3.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class AclDeleteParameters(Model): - """The parameters used to delete an access control list (ACL) entry. - - All required parameters must be populated in order to send to Azure. - - :param ace_type: Required. the access control list (ACL) entry type. - UserObj and GroupObj denote the owning user and group, respectively. - Possible values include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' - :type ace_type: str or - ~azure.mgmt.datalake.analytics.catalog.models.AclType - :param principal_id: Required. the Azure AD object ID of the user or group - being specified in the access control list (ACL) entry. - :type principal_id: str - """ - - _validation = { - 'ace_type': {'required': True}, - 'principal_id': {'required': True}, - } - - _attribute_map = { - 'ace_type': {'key': 'aceType', 'type': 'str'}, - 'principal_id': {'key': 'principalId', 'type': 'str'}, - } - - def __init__(self, *, ace_type, principal_id: str, **kwargs) -> None: - super(AclDeleteParameters, self).__init__(**kwargs) - self.ace_type = ace_type - self.principal_id = principal_id diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_paged.py deleted file mode 100644 index ce838fae2933..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class AclPaged(Paged): - """ - A paging container for iterating over a list of :class:`Acl ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[Acl]'} - } - - def __init__(self, *args, **kwargs): - - super(AclPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_py3.py deleted file mode 100644 index 3d98a56102a2..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/acl_py3.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Acl(Model): - """A Data Lake Analytics catalog access control list (ACL) entry. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar ace_type: the access control list (ACL) entry type. UserObj and - GroupObj denote the owning user and group, respectively. Possible values - include: 'UserObj', 'GroupObj', 'Other', 'User', 'Group' - :vartype ace_type: str or - ~azure.mgmt.datalake.analytics.catalog.models.AclType - :ivar principal_id: the Azure AD object ID of the user or group being - specified in the access control list (ACL) entry. - :vartype principal_id: str - :ivar permission: the permission type of the access control list (ACL) - entry. Possible values include: 'None', 'Use', 'Create', 'Drop', 'Alter', - 'Write', 'All' - :vartype permission: str or - ~azure.mgmt.datalake.analytics.catalog.models.PermissionType - """ - - _validation = { - 'ace_type': {'readonly': True}, - 'principal_id': {'readonly': True}, - 'permission': {'readonly': True}, - } - - _attribute_map = { - 'ace_type': {'key': 'aceType', 'type': 'str'}, - 'principal_id': {'key': 'principalId', 'type': 'str'}, - 'permission': {'key': 'permission', 'type': 'str'}, - } - - def __init__(self, **kwargs) -> None: - super(Acl, self).__init__(**kwargs) - self.ace_type = None - self.principal_id = None - self.permission = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item.py deleted file mode 100644 index 741b35995ad2..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class CatalogItem(Model): - """A Data Lake Analytics catalog item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(CatalogItem, self).__init__(**kwargs) - self.compute_account_name = kwargs.get('compute_account_name', None) - self.version = kwargs.get('version', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list.py deleted file mode 100644 index 9ad56393d90b..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class CatalogItemList(Model): - """A Data Lake Analytics catalog item list. - - :param next_link: the link to the next page of results. - :type next_link: str - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(CatalogItemList, self).__init__(**kwargs) - self.next_link = kwargs.get('next_link', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list_py3.py deleted file mode 100644 index 8a25d8e32de3..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_list_py3.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class CatalogItemList(Model): - """A Data Lake Analytics catalog item list. - - :param next_link: the link to the next page of results. - :type next_link: str - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - } - - def __init__(self, *, next_link: str=None, **kwargs) -> None: - super(CatalogItemList, self).__init__(**kwargs) - self.next_link = next_link diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_py3.py deleted file mode 100644 index aec21a6408ee..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/catalog_item_py3.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class CatalogItem(Model): - """A Data Lake Analytics catalog item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, **kwargs) -> None: - super(CatalogItem, self).__init__(**kwargs) - self.compute_account_name = compute_account_name - self.version = version diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py deleted file mode 100644 index ff1312b7e813..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters.py +++ /dev/null @@ -1,47 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DataLakeAnalyticsCatalogCredentialCreateParameters(Model): - """Data Lake Analytics catalog credential creation parameters. - - All required parameters must be populated in order to send to Azure. - - :param password: Required. the password for the credential and user with - access to the data source. - :type password: str - :param uri: Required. the URI identifier for the data source this - credential can connect to in the format : - :type uri: str - :param user_id: Required. the object identifier for the user associated - with this credential with access to the data source. - :type user_id: str - """ - - _validation = { - 'password': {'required': True}, - 'uri': {'required': True}, - 'user_id': {'required': True}, - } - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - 'uri': {'key': 'uri', 'type': 'str'}, - 'user_id': {'key': 'userId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(DataLakeAnalyticsCatalogCredentialCreateParameters, self).__init__(**kwargs) - self.password = kwargs.get('password', None) - self.uri = kwargs.get('uri', None) - self.user_id = kwargs.get('user_id', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters_py3.py deleted file mode 100644 index 15ccd484953b..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_create_parameters_py3.py +++ /dev/null @@ -1,47 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DataLakeAnalyticsCatalogCredentialCreateParameters(Model): - """Data Lake Analytics catalog credential creation parameters. - - All required parameters must be populated in order to send to Azure. - - :param password: Required. the password for the credential and user with - access to the data source. - :type password: str - :param uri: Required. the URI identifier for the data source this - credential can connect to in the format : - :type uri: str - :param user_id: Required. the object identifier for the user associated - with this credential with access to the data source. - :type user_id: str - """ - - _validation = { - 'password': {'required': True}, - 'uri': {'required': True}, - 'user_id': {'required': True}, - } - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - 'uri': {'key': 'uri', 'type': 'str'}, - 'user_id': {'key': 'userId', 'type': 'str'}, - } - - def __init__(self, *, password: str, uri: str, user_id: str, **kwargs) -> None: - super(DataLakeAnalyticsCatalogCredentialCreateParameters, self).__init__(**kwargs) - self.password = password - self.uri = uri - self.user_id = user_id diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters.py deleted file mode 100644 index 748060c8e4e4..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters.py +++ /dev/null @@ -1,30 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DataLakeAnalyticsCatalogCredentialDeleteParameters(Model): - """Data Lake Analytics catalog credential deletion parameters. - - :param password: the current password for the credential and user with - access to the data source. This is required if the requester is not the - account owner. - :type password: str - """ - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(DataLakeAnalyticsCatalogCredentialDeleteParameters, self).__init__(**kwargs) - self.password = kwargs.get('password', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters_py3.py deleted file mode 100644 index 0e1e99f00659..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_delete_parameters_py3.py +++ /dev/null @@ -1,30 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DataLakeAnalyticsCatalogCredentialDeleteParameters(Model): - """Data Lake Analytics catalog credential deletion parameters. - - :param password: the current password for the credential and user with - access to the data source. This is required if the requester is not the - account owner. - :type password: str - """ - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__(self, *, password: str=None, **kwargs) -> None: - super(DataLakeAnalyticsCatalogCredentialDeleteParameters, self).__init__(**kwargs) - self.password = password diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters.py deleted file mode 100644 index 1301d96a77fd..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DataLakeAnalyticsCatalogCredentialUpdateParameters(Model): - """Data Lake Analytics catalog credential update parameters. - - :param password: the current password for the credential and user with - access to the data source. This is required if the requester is not the - account owner. - :type password: str - :param new_password: the new password for the credential and user with - access to the data source. - :type new_password: str - :param uri: the URI identifier for the data source this credential can - connect to in the format : - :type uri: str - :param user_id: the object identifier for the user associated with this - credential with access to the data source. - :type user_id: str - """ - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - 'new_password': {'key': 'newPassword', 'type': 'str'}, - 'uri': {'key': 'uri', 'type': 'str'}, - 'user_id': {'key': 'userId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(DataLakeAnalyticsCatalogCredentialUpdateParameters, self).__init__(**kwargs) - self.password = kwargs.get('password', None) - self.new_password = kwargs.get('new_password', None) - self.uri = kwargs.get('uri', None) - self.user_id = kwargs.get('user_id', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters_py3.py deleted file mode 100644 index c348936b53a9..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_credential_update_parameters_py3.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DataLakeAnalyticsCatalogCredentialUpdateParameters(Model): - """Data Lake Analytics catalog credential update parameters. - - :param password: the current password for the credential and user with - access to the data source. This is required if the requester is not the - account owner. - :type password: str - :param new_password: the new password for the credential and user with - access to the data source. - :type new_password: str - :param uri: the URI identifier for the data source this credential can - connect to in the format : - :type uri: str - :param user_id: the object identifier for the user associated with this - credential with access to the data source. - :type user_id: str - """ - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - 'new_password': {'key': 'newPassword', 'type': 'str'}, - 'uri': {'key': 'uri', 'type': 'str'}, - 'user_id': {'key': 'userId', 'type': 'str'}, - } - - def __init__(self, *, password: str=None, new_password: str=None, uri: str=None, user_id: str=None, **kwargs) -> None: - super(DataLakeAnalyticsCatalogCredentialUpdateParameters, self).__init__(**kwargs) - self.password = password - self.new_password = new_password - self.uri = uri - self.user_id = user_id diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters.py deleted file mode 100644 index 8a9cd7a8cbdb..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(Model): - """Data Lake Analytics catalog secret creation and update parameters. This is - deprecated and will be removed in the next release. Please use - DataLakeAnalyticsCatalogCredentialCreateOrUpdateParameters instead. - - All required parameters must be populated in order to send to Azure. - - :param password: Required. the password for the secret to pass in - :type password: str - :param uri: the URI identifier for the secret in the format - : - :type uri: str - """ - - _validation = { - 'password': {'required': True}, - } - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - 'uri': {'key': 'uri', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters, self).__init__(**kwargs) - self.password = kwargs.get('password', None) - self.uri = kwargs.get('uri', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters_py3.py deleted file mode 100644 index 95aa1aea4d92..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/data_lake_analytics_catalog_secret_create_or_update_parameters_py3.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters(Model): - """Data Lake Analytics catalog secret creation and update parameters. This is - deprecated and will be removed in the next release. Please use - DataLakeAnalyticsCatalogCredentialCreateOrUpdateParameters instead. - - All required parameters must be populated in order to send to Azure. - - :param password: Required. the password for the secret to pass in - :type password: str - :param uri: the URI identifier for the secret in the format - : - :type uri: str - """ - - _validation = { - 'password': {'required': True}, - } - - _attribute_map = { - 'password': {'key': 'password', 'type': 'str'}, - 'uri': {'key': 'uri', 'type': 'str'}, - } - - def __init__(self, *, password: str, uri: str=None, **kwargs) -> None: - super(DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters, self).__init__(**kwargs) - self.password = password - self.uri = uri diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name.py deleted file mode 100644 index f292532ecd32..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DdlName(Model): - """A Data Lake Analytics DDL name item. - - :param first_part: the name of the table associated with this database and - schema. - :type first_part: str - :param second_part: the name of the table associated with this database - and schema. - :type second_part: str - :param third_part: the name of the table associated with this database and - schema. - :type third_part: str - :param server: the name of the table associated with this database and - schema. - :type server: str - """ - - _attribute_map = { - 'first_part': {'key': 'firstPart', 'type': 'str'}, - 'second_part': {'key': 'secondPart', 'type': 'str'}, - 'third_part': {'key': 'thirdPart', 'type': 'str'}, - 'server': {'key': 'server', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(DdlName, self).__init__(**kwargs) - self.first_part = kwargs.get('first_part', None) - self.second_part = kwargs.get('second_part', None) - self.third_part = kwargs.get('third_part', None) - self.server = kwargs.get('server', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name_py3.py deleted file mode 100644 index cde8d3153014..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/ddl_name_py3.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class DdlName(Model): - """A Data Lake Analytics DDL name item. - - :param first_part: the name of the table associated with this database and - schema. - :type first_part: str - :param second_part: the name of the table associated with this database - and schema. - :type second_part: str - :param third_part: the name of the table associated with this database and - schema. - :type third_part: str - :param server: the name of the table associated with this database and - schema. - :type server: str - """ - - _attribute_map = { - 'first_part': {'key': 'firstPart', 'type': 'str'}, - 'second_part': {'key': 'secondPart', 'type': 'str'}, - 'third_part': {'key': 'thirdPart', 'type': 'str'}, - 'server': {'key': 'server', 'type': 'str'}, - } - - def __init__(self, *, first_part: str=None, second_part: str=None, third_part: str=None, server: str=None, **kwargs) -> None: - super(DdlName, self).__init__(**kwargs) - self.first_part = first_part - self.second_part = second_part - self.third_part = third_part - self.server = server diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id.py deleted file mode 100644 index ead952acc662..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class EntityId(Model): - """A Data Lake Analytics catalog entity identifier object. - - :param name: the name of the external table associated with this database, - schema and table. - :type name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName - :param version: the version of the external data source. - :type version: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'DdlName'}, - 'version': {'key': 'version', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(EntityId, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.version = kwargs.get('version', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id_py3.py deleted file mode 100644 index 837237ccbed6..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/entity_id_py3.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class EntityId(Model): - """A Data Lake Analytics catalog entity identifier object. - - :param name: the name of the external table associated with this database, - schema and table. - :type name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName - :param version: the version of the external data source. - :type version: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'DdlName'}, - 'version': {'key': 'version', 'type': 'str'}, - } - - def __init__(self, *, name=None, version: str=None, **kwargs) -> None: - super(EntityId, self).__init__(**kwargs) - self.name = name - self.version = version diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table.py deleted file mode 100644 index f320fe801e9c..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ExternalTable(Model): - """A Data Lake Analytics catalog external table item. - - :param table_name: the name of the table associated with this database and - schema. - :type table_name: str - :param data_source: the data source associated with this external table. - :type data_source: ~azure.mgmt.datalake.analytics.catalog.models.EntityId - """ - - _attribute_map = { - 'table_name': {'key': 'tableName', 'type': 'str'}, - 'data_source': {'key': 'dataSource', 'type': 'EntityId'}, - } - - def __init__(self, **kwargs): - super(ExternalTable, self).__init__(**kwargs) - self.table_name = kwargs.get('table_name', None) - self.data_source = kwargs.get('data_source', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table_py3.py deleted file mode 100644 index c0ce15c2c40f..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/external_table_py3.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ExternalTable(Model): - """A Data Lake Analytics catalog external table item. - - :param table_name: the name of the table associated with this database and - schema. - :type table_name: str - :param data_source: the data source associated with this external table. - :type data_source: ~azure.mgmt.datalake.analytics.catalog.models.EntityId - """ - - _attribute_map = { - 'table_name': {'key': 'tableName', 'type': 'str'}, - 'data_source': {'key': 'dataSource', 'type': 'EntityId'}, - } - - def __init__(self, *, table_name: str=None, data_source=None, **kwargs) -> None: - super(ExternalTable, self).__init__(**kwargs) - self.table_name = table_name - self.data_source = data_source diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info.py deleted file mode 100644 index 3f19e903f478..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class TypeFieldInfo(Model): - """A Data Lake Analytics catalog type field information item. - - :param name: the name of the field associated with this type. - :type name: str - :param type: the type of the field. - :type type: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(TypeFieldInfo, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info_py3.py deleted file mode 100644 index 629d08f65304..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/type_field_info_py3.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class TypeFieldInfo(Model): - """A Data Lake Analytics catalog type field information item. - - :param name: the name of the field associated with this type. - :type name: str - :param type: the type of the field. - :type type: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - } - - def __init__(self, *, name: str=None, type: str=None, **kwargs) -> None: - super(TypeFieldInfo, self).__init__(**kwargs) - self.name = name - self.type = type diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly.py deleted file mode 100644 index c3adfeab895c..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlAssembly(CatalogItem): - """A Data Lake Analytics catalog U-SQL Assembly. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the assembly. - :type name: str - :param clr_name: the name of the CLR. - :type clr_name: str - :param is_visible: the switch indicating if this assembly is visible or - not. - :type is_visible: bool - :param is_user_defined: the switch indicating if this assembly is user - defined or not. - :type is_user_defined: bool - :param files: the list of files associated with the assembly - :type files: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyFileInfo] - :param dependencies: the list of dependencies associated with the assembly - :type dependencies: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyDependencyInfo] - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'assemblyName', 'type': 'str'}, - 'clr_name': {'key': 'clrName', 'type': 'str'}, - 'is_visible': {'key': 'isVisible', 'type': 'bool'}, - 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, - 'files': {'key': 'files', 'type': '[USqlAssemblyFileInfo]'}, - 'dependencies': {'key': 'dependencies', 'type': '[USqlAssemblyDependencyInfo]'}, - } - - def __init__(self, **kwargs): - super(USqlAssembly, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.name = kwargs.get('name', None) - self.clr_name = kwargs.get('clr_name', None) - self.is_visible = kwargs.get('is_visible', None) - self.is_user_defined = kwargs.get('is_user_defined', None) - self.files = kwargs.get('files', None) - self.dependencies = kwargs.get('dependencies', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr.py deleted file mode 100644 index 23412eb5f4e5..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlAssemblyClr(CatalogItem): - """A Data Lake Analytics catalog U-SQL assembly CLR item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the assembly. - :type name: str - :param clr_name: the name of the CLR. - :type clr_name: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'assemblyClrName', 'type': 'str'}, - 'clr_name': {'key': 'clrName', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlAssemblyClr, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.name = kwargs.get('name', None) - self.clr_name = kwargs.get('clr_name', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_paged.py deleted file mode 100644 index caae702d9f0d..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlAssemblyClrPaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlAssemblyClr ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlAssemblyClr]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlAssemblyClrPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_py3.py deleted file mode 100644 index 50579d120536..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_py3.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlAssemblyClr(CatalogItem): - """A Data Lake Analytics catalog U-SQL assembly CLR item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the assembly. - :type name: str - :param clr_name: the name of the CLR. - :type clr_name: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'assemblyClrName', 'type': 'str'}, - 'clr_name': {'key': 'clrName', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, clr_name: str=None, **kwargs) -> None: - super(USqlAssemblyClr, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.name = name - self.clr_name = clr_name diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info.py deleted file mode 100644 index b9995d9f20d0..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlAssemblyDependencyInfo(Model): - """A Data Lake Analytics catalog U-SQL dependency information item. - - :param entity_id: the EntityId of the dependency. - :type entity_id: ~azure.mgmt.datalake.analytics.catalog.models.EntityId - """ - - _attribute_map = { - 'entity_id': {'key': 'entityId', 'type': 'EntityId'}, - } - - def __init__(self, **kwargs): - super(USqlAssemblyDependencyInfo, self).__init__(**kwargs) - self.entity_id = kwargs.get('entity_id', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info_py3.py deleted file mode 100644 index fa03a9d16955..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_dependency_info_py3.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlAssemblyDependencyInfo(Model): - """A Data Lake Analytics catalog U-SQL dependency information item. - - :param entity_id: the EntityId of the dependency. - :type entity_id: ~azure.mgmt.datalake.analytics.catalog.models.EntityId - """ - - _attribute_map = { - 'entity_id': {'key': 'entityId', 'type': 'EntityId'}, - } - - def __init__(self, *, entity_id=None, **kwargs) -> None: - super(USqlAssemblyDependencyInfo, self).__init__(**kwargs) - self.entity_id = entity_id diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info.py deleted file mode 100644 index 19d3135999b3..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlAssemblyFileInfo(Model): - """A Data Lake Analytics catalog U-SQL assembly file information item. - - :param type: the assembly file type. Possible values include: 'Assembly', - 'Resource', 'Nodeploy' - :type type: str or ~azure.mgmt.datalake.analytics.catalog.models.FileType - :param original_path: the the original path to the assembly file. - :type original_path: str - :param content_path: the the content path to the assembly file. - :type content_path: str - """ - - _attribute_map = { - 'type': {'key': 'type', 'type': 'str'}, - 'original_path': {'key': 'originalPath', 'type': 'str'}, - 'content_path': {'key': 'contentPath', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlAssemblyFileInfo, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.original_path = kwargs.get('original_path', None) - self.content_path = kwargs.get('content_path', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info_py3.py deleted file mode 100644 index 3d6889c04be9..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_file_info_py3.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlAssemblyFileInfo(Model): - """A Data Lake Analytics catalog U-SQL assembly file information item. - - :param type: the assembly file type. Possible values include: 'Assembly', - 'Resource', 'Nodeploy' - :type type: str or ~azure.mgmt.datalake.analytics.catalog.models.FileType - :param original_path: the the original path to the assembly file. - :type original_path: str - :param content_path: the the content path to the assembly file. - :type content_path: str - """ - - _attribute_map = { - 'type': {'key': 'type', 'type': 'str'}, - 'original_path': {'key': 'originalPath', 'type': 'str'}, - 'content_path': {'key': 'contentPath', 'type': 'str'}, - } - - def __init__(self, *, type=None, original_path: str=None, content_path: str=None, **kwargs) -> None: - super(USqlAssemblyFileInfo, self).__init__(**kwargs) - self.type = type - self.original_path = original_path - self.content_path = content_path diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_py3.py deleted file mode 100644 index 4ebf178e728b..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_py3.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlAssembly(CatalogItem): - """A Data Lake Analytics catalog U-SQL Assembly. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the assembly. - :type name: str - :param clr_name: the name of the CLR. - :type clr_name: str - :param is_visible: the switch indicating if this assembly is visible or - not. - :type is_visible: bool - :param is_user_defined: the switch indicating if this assembly is user - defined or not. - :type is_user_defined: bool - :param files: the list of files associated with the assembly - :type files: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyFileInfo] - :param dependencies: the list of dependencies associated with the assembly - :type dependencies: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyDependencyInfo] - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'assemblyName', 'type': 'str'}, - 'clr_name': {'key': 'clrName', 'type': 'str'}, - 'is_visible': {'key': 'isVisible', 'type': 'bool'}, - 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, - 'files': {'key': 'files', 'type': '[USqlAssemblyFileInfo]'}, - 'dependencies': {'key': 'dependencies', 'type': '[USqlAssemblyDependencyInfo]'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, clr_name: str=None, is_visible: bool=None, is_user_defined: bool=None, files=None, dependencies=None, **kwargs) -> None: - super(USqlAssembly, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.name = name - self.clr_name = clr_name - self.is_visible = is_visible - self.is_user_defined = is_user_defined - self.files = files - self.dependencies = dependencies diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential.py deleted file mode 100644 index 5596e8b8aa86..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlCredential(CatalogItem): - """A Data Lake Analytics catalog U-SQL credential item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param name: the name of the credential. - :type name: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'name': {'key': 'credentialName', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlCredential, self).__init__(**kwargs) - self.name = kwargs.get('name', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_paged.py deleted file mode 100644 index fd7b50a9ae63..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlCredentialPaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlCredential ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlCredential]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlCredentialPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_py3.py deleted file mode 100644 index f1654bc03774..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_credential_py3.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlCredential(CatalogItem): - """A Data Lake Analytics catalog U-SQL credential item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param name: the name of the credential. - :type name: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'name': {'key': 'credentialName', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, name: str=None, **kwargs) -> None: - super(USqlCredential, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.name = name diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database.py deleted file mode 100644 index 93fedcb63ae6..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlDatabase(CatalogItem): - """A Data Lake Analytics catalog U-SQL database item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param name: the name of the database. - :type name: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'name': {'key': 'databaseName', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlDatabase, self).__init__(**kwargs) - self.name = kwargs.get('name', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_paged.py deleted file mode 100644 index acebb8500686..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlDatabasePaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlDatabase ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlDatabase]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlDatabasePaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_py3.py deleted file mode 100644 index 00cfffdf5ccd..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_database_py3.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlDatabase(CatalogItem): - """A Data Lake Analytics catalog U-SQL database item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param name: the name of the database. - :type name: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'name': {'key': 'databaseName', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, name: str=None, **kwargs) -> None: - super(USqlDatabase, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.name = name diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column.py deleted file mode 100644 index 5d413c63b7d8..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlDirectedColumn(Model): - """A Data Lake Analytics catalog U-SQL directed column item. - - :param name: the name of the index in the table. - :type name: str - :param descending: the switch indicating if the index is descending or - not. - :type descending: bool - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'descending': {'key': 'descending', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(USqlDirectedColumn, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.descending = kwargs.get('descending', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column_py3.py deleted file mode 100644 index c699a39c407b..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_directed_column_py3.py +++ /dev/null @@ -1,33 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlDirectedColumn(Model): - """A Data Lake Analytics catalog U-SQL directed column item. - - :param name: the name of the index in the table. - :type name: str - :param descending: the switch indicating if the index is descending or - not. - :type descending: bool - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'descending': {'key': 'descending', 'type': 'bool'}, - } - - def __init__(self, *, name: str=None, descending: bool=None, **kwargs) -> None: - super(USqlDirectedColumn, self).__init__(**kwargs) - self.name = name - self.descending = descending diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info.py deleted file mode 100644 index c5f27b3ade7d..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlDistributionInfo(Model): - """A Data Lake Analytics catalog U-SQL distribution information object. - - :param type: the type of this distribution. - :type type: int - :param keys: the list of directed columns in the distribution - :type keys: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] - :param count: the count of indices using this distribution. - :type count: int - :param dynamic_count: the dynamic count of indices using this - distribution. - :type dynamic_count: int - """ - - _attribute_map = { - 'type': {'key': 'type', 'type': 'int'}, - 'keys': {'key': 'keys', 'type': '[USqlDirectedColumn]'}, - 'count': {'key': 'count', 'type': 'int'}, - 'dynamic_count': {'key': 'dynamicCount', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(USqlDistributionInfo, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.keys = kwargs.get('keys', None) - self.count = kwargs.get('count', None) - self.dynamic_count = kwargs.get('dynamic_count', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info_py3.py deleted file mode 100644 index ba87c79220fa..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_distribution_info_py3.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlDistributionInfo(Model): - """A Data Lake Analytics catalog U-SQL distribution information object. - - :param type: the type of this distribution. - :type type: int - :param keys: the list of directed columns in the distribution - :type keys: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] - :param count: the count of indices using this distribution. - :type count: int - :param dynamic_count: the dynamic count of indices using this - distribution. - :type dynamic_count: int - """ - - _attribute_map = { - 'type': {'key': 'type', 'type': 'int'}, - 'keys': {'key': 'keys', 'type': '[USqlDirectedColumn]'}, - 'count': {'key': 'count', 'type': 'int'}, - 'dynamic_count': {'key': 'dynamicCount', 'type': 'int'}, - } - - def __init__(self, *, type: int=None, keys=None, count: int=None, dynamic_count: int=None, **kwargs) -> None: - super(USqlDistributionInfo, self).__init__(**kwargs) - self.type = type - self.keys = keys - self.count = count - self.dynamic_count = dynamic_count diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source.py deleted file mode 100644 index 390b2359c172..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlExternalDataSource(CatalogItem): - """A Data Lake Analytics catalog U-SQL external datasource item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the external data source. - :type name: str - :param provider: the name of the provider for the external data source. - :type provider: str - :param provider_string: the name of the provider string for the external - data source. - :type provider_string: str - :param pushdown_types: the list of types to push down from the external - data source. - :type pushdown_types: list[str] - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'externalDataSourceName', 'type': 'str'}, - 'provider': {'key': 'provider', 'type': 'str'}, - 'provider_string': {'key': 'providerString', 'type': 'str'}, - 'pushdown_types': {'key': 'pushdownTypes', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(USqlExternalDataSource, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.name = kwargs.get('name', None) - self.provider = kwargs.get('provider', None) - self.provider_string = kwargs.get('provider_string', None) - self.pushdown_types = kwargs.get('pushdown_types', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source_paged.py deleted file mode 100644 index fc5cf4ae6b1d..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlExternalDataSourcePaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlExternalDataSource ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlExternalDataSource]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlExternalDataSourcePaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source_py3.py deleted file mode 100644 index 946ed8479b4d..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_external_data_source_py3.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlExternalDataSource(CatalogItem): - """A Data Lake Analytics catalog U-SQL external datasource item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the external data source. - :type name: str - :param provider: the name of the provider for the external data source. - :type provider: str - :param provider_string: the name of the provider string for the external - data source. - :type provider_string: str - :param pushdown_types: the list of types to push down from the external - data source. - :type pushdown_types: list[str] - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'externalDataSourceName', 'type': 'str'}, - 'provider': {'key': 'provider', 'type': 'str'}, - 'provider_string': {'key': 'providerString', 'type': 'str'}, - 'pushdown_types': {'key': 'pushdownTypes', 'type': '[str]'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, provider: str=None, provider_string: str=None, pushdown_types=None, **kwargs) -> None: - super(USqlExternalDataSource, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.name = name - self.provider = provider - self.provider_string = provider_string - self.pushdown_types = pushdown_types diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index.py deleted file mode 100644 index bdb47ae18d97..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlIndex(Model): - """A Data Lake Analytics catalog U-SQL table index item. - - :param name: the name of the index in the table. - :type name: str - :param index_keys: the list of directed columns in the index - :type index_keys: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] - :param columns: the list of columns in the index - :type columns: list[str] - :param distribution_info: the distributions info of the index - :type distribution_info: - ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo - :param partition_function: partition function ID for the index. - :type partition_function: str - :param partition_key_list: the list of partion keys in the index - :type partition_key_list: list[str] - :param stream_names: the list of full paths to the streams that contain - this index in the DataLake account. - :type stream_names: list[str] - :param is_columnstore: the switch indicating if this index is a - columnstore index. - :type is_columnstore: bool - :param index_id: the ID of this index within the table. - :type index_id: int - :param is_unique: the switch indicating if this index is a unique index. - :type is_unique: bool - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'index_keys': {'key': 'indexKeys', 'type': '[USqlDirectedColumn]'}, - 'columns': {'key': 'columns', 'type': '[str]'}, - 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, - 'partition_function': {'key': 'partitionFunction', 'type': 'str'}, - 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, - 'stream_names': {'key': 'streamNames', 'type': '[str]'}, - 'is_columnstore': {'key': 'isColumnstore', 'type': 'bool'}, - 'index_id': {'key': 'indexId', 'type': 'int'}, - 'is_unique': {'key': 'isUnique', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(USqlIndex, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.index_keys = kwargs.get('index_keys', None) - self.columns = kwargs.get('columns', None) - self.distribution_info = kwargs.get('distribution_info', None) - self.partition_function = kwargs.get('partition_function', None) - self.partition_key_list = kwargs.get('partition_key_list', None) - self.stream_names = kwargs.get('stream_names', None) - self.is_columnstore = kwargs.get('is_columnstore', None) - self.index_id = kwargs.get('index_id', None) - self.is_unique = kwargs.get('is_unique', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index_py3.py deleted file mode 100644 index 899591442bb1..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_index_py3.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlIndex(Model): - """A Data Lake Analytics catalog U-SQL table index item. - - :param name: the name of the index in the table. - :type name: str - :param index_keys: the list of directed columns in the index - :type index_keys: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlDirectedColumn] - :param columns: the list of columns in the index - :type columns: list[str] - :param distribution_info: the distributions info of the index - :type distribution_info: - ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo - :param partition_function: partition function ID for the index. - :type partition_function: str - :param partition_key_list: the list of partion keys in the index - :type partition_key_list: list[str] - :param stream_names: the list of full paths to the streams that contain - this index in the DataLake account. - :type stream_names: list[str] - :param is_columnstore: the switch indicating if this index is a - columnstore index. - :type is_columnstore: bool - :param index_id: the ID of this index within the table. - :type index_id: int - :param is_unique: the switch indicating if this index is a unique index. - :type is_unique: bool - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'index_keys': {'key': 'indexKeys', 'type': '[USqlDirectedColumn]'}, - 'columns': {'key': 'columns', 'type': '[str]'}, - 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, - 'partition_function': {'key': 'partitionFunction', 'type': 'str'}, - 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, - 'stream_names': {'key': 'streamNames', 'type': '[str]'}, - 'is_columnstore': {'key': 'isColumnstore', 'type': 'bool'}, - 'index_id': {'key': 'indexId', 'type': 'int'}, - 'is_unique': {'key': 'isUnique', 'type': 'bool'}, - } - - def __init__(self, *, name: str=None, index_keys=None, columns=None, distribution_info=None, partition_function: str=None, partition_key_list=None, stream_names=None, is_columnstore: bool=None, index_id: int=None, is_unique: bool=None, **kwargs) -> None: - super(USqlIndex, self).__init__(**kwargs) - self.name = name - self.index_keys = index_keys - self.columns = columns - self.distribution_info = distribution_info - self.partition_function = partition_function - self.partition_key_list = partition_key_list - self.stream_names = stream_names - self.is_columnstore = is_columnstore - self.index_id = index_id - self.is_unique = is_unique diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package.py deleted file mode 100644 index bbb6ed25858e..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package.py +++ /dev/null @@ -1,47 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlPackage(CatalogItem): - """A Data Lake Analytics catalog U-SQL package item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database containing the package. - :type database_name: str - :param schema_name: the name of the schema associated with this package - and database. - :type schema_name: str - :param name: the name of the package. - :type name: str - :param definition: the definition of the package. - :type definition: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'packageName', 'type': 'str'}, - 'definition': {'key': 'definition', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlPackage, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.schema_name = kwargs.get('schema_name', None) - self.name = kwargs.get('name', None) - self.definition = kwargs.get('definition', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package_paged.py deleted file mode 100644 index 2d8c3977a4f2..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlPackagePaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlPackage ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlPackage]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlPackagePaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package_py3.py deleted file mode 100644 index e1f79d784766..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_package_py3.py +++ /dev/null @@ -1,47 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlPackage(CatalogItem): - """A Data Lake Analytics catalog U-SQL package item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database containing the package. - :type database_name: str - :param schema_name: the name of the schema associated with this package - and database. - :type schema_name: str - :param name: the name of the package. - :type name: str - :param definition: the definition of the package. - :type definition: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'packageName', 'type': 'str'}, - 'definition': {'key': 'definition', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: - super(USqlPackage, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.definition = definition diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure.py deleted file mode 100644 index 92874b2d9bda..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure.py +++ /dev/null @@ -1,47 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlProcedure(CatalogItem): - """A Data Lake Analytics catalog U-SQL procedure item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this procedure - and database. - :type schema_name: str - :param name: the name of the procedure. - :type name: str - :param definition: the defined query of the procedure. - :type definition: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'procName', 'type': 'str'}, - 'definition': {'key': 'definition', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlProcedure, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.schema_name = kwargs.get('schema_name', None) - self.name = kwargs.get('name', None) - self.definition = kwargs.get('definition', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_paged.py deleted file mode 100644 index 61b23707c7e1..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlProcedurePaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlProcedure ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlProcedure]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlProcedurePaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_py3.py deleted file mode 100644 index 98f9a30e6267..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_procedure_py3.py +++ /dev/null @@ -1,47 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlProcedure(CatalogItem): - """A Data Lake Analytics catalog U-SQL procedure item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this procedure - and database. - :type schema_name: str - :param name: the name of the procedure. - :type name: str - :param definition: the defined query of the procedure. - :type definition: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'procName', 'type': 'str'}, - 'definition': {'key': 'definition', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: - super(USqlProcedure, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.definition = definition diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema.py deleted file mode 100644 index 1a292ca673f6..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlSchema(CatalogItem): - """A Data Lake Analytics catalog U-SQL schema item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the schema. - :type name: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'schemaName', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlSchema, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.name = kwargs.get('name', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema_paged.py deleted file mode 100644 index f8f8d2386192..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlSchemaPaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlSchema ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlSchema]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlSchemaPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema_py3.py deleted file mode 100644 index bb7f8d3de46a..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_schema_py3.py +++ /dev/null @@ -1,38 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlSchema(CatalogItem): - """A Data Lake Analytics catalog U-SQL schema item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the schema. - :type name: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'schemaName', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, **kwargs) -> None: - super(USqlSchema, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.name = name diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret.py deleted file mode 100644 index 5054e0ecaa44..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlSecret(CatalogItem): - """A Data Lake Analytics catalog U-SQL secret item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the secret. - :type name: str - :param creation_time: the creation time of the credential object. This is - the only information returned about a secret from a GET. - :type creation_time: datetime - :param uri: the URI identifier for the secret in the format - : - :type uri: str - :param password: the password for the secret to pass in - :type password: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'secretName', 'type': 'str'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'uri': {'key': 'uri', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlSecret, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.name = kwargs.get('name', None) - self.creation_time = kwargs.get('creation_time', None) - self.uri = kwargs.get('uri', None) - self.password = kwargs.get('password', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret_py3.py deleted file mode 100644 index a493f7576c8a..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_secret_py3.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlSecret(CatalogItem): - """A Data Lake Analytics catalog U-SQL secret item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param name: the name of the secret. - :type name: str - :param creation_time: the creation time of the credential object. This is - the only information returned about a secret from a GET. - :type creation_time: datetime - :param uri: the URI identifier for the secret in the format - : - :type uri: str - :param password: the password for the secret to pass in - :type password: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'name': {'key': 'secretName', 'type': 'str'}, - 'creation_time': {'key': 'creationTime', 'type': 'iso-8601'}, - 'uri': {'key': 'uri', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, creation_time=None, uri: str=None, password: str=None, **kwargs) -> None: - super(USqlSecret, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.name = name - self.creation_time = creation_time - self.uri = uri - self.password = password diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table.py deleted file mode 100644 index 4e9bc0830f05..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlTable(CatalogItem): - """A Data Lake Analytics catalog U-SQL table item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table and - database. - :type schema_name: str - :param name: the name of the table. - :type name: str - :param column_list: the list of columns in this table - :type column_list: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] - :param index_list: the list of indices in this table - :type index_list: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlIndex] - :param partition_key_list: the list of partition keys in the table - :type partition_key_list: list[str] - :param external_table: the external table associated with the table. - :type external_table: - ~azure.mgmt.datalake.analytics.catalog.models.ExternalTable - :param distribution_info: the distributions info of the table - :type distribution_info: - ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'tableName', 'type': 'str'}, - 'column_list': {'key': 'columnList', 'type': '[USqlTableColumn]'}, - 'index_list': {'key': 'indexList', 'type': '[USqlIndex]'}, - 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, - 'external_table': {'key': 'externalTable', 'type': 'ExternalTable'}, - 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, - } - - def __init__(self, **kwargs): - super(USqlTable, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.schema_name = kwargs.get('schema_name', None) - self.name = kwargs.get('name', None) - self.column_list = kwargs.get('column_list', None) - self.index_list = kwargs.get('index_list', None) - self.partition_key_list = kwargs.get('partition_key_list', None) - self.external_table = kwargs.get('external_table', None) - self.distribution_info = kwargs.get('distribution_info', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column.py deleted file mode 100644 index 201156537eed..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlTableColumn(Model): - """A Data Lake Analytics catalog U-SQL table column item. - - :param name: the name of the column in the table. - :type name: str - :param type: the object type of the specified column (such as - System.String). - :type type: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'column_name': {'key': 'columnName', 'type': 'str'}, - 'data_type': {'key': 'dataType', 'type': 'str'}, - } - - def __init__(self, name=None, type=None, column_name=None, data_type=None): - super(USqlTableColumn, self).__init__() - self.name = name if name != None else column_name - self.type = type if type != None else data_type diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column_py3.py deleted file mode 100644 index 51ac583ef5f8..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column_py3.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlTableColumn(Model): - """A Data Lake Analytics catalog U-SQL table column item. - - :param name: the name of the column in the table. - :type name: str - :param type: the object type of the specified column (such as - System.String). - :type type: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'column_name': {'key': 'columnName', 'type': 'str'}, - 'data_type': {'key': 'dataType', 'type': 'str'}, - } - - def __init__(self, *, name: str=None, type: str=None, column_name=None, data_type=None, **kwargs) -> None: - super(USqlTableColumn, self).__init__(**kwargs) - self.name = name if name != None else column_name - self.type = type if type != None else data_type \ No newline at end of file diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment.py deleted file mode 100644 index 565a1246737f..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment.py +++ /dev/null @@ -1,50 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlTableFragment(Model): - """A Data Lake Analytics catalog U-SQL table fragment item. - - :param parent_id: the parent object Id of the table fragment. The parent - could be a table or table partition. - :type parent_id: str - :param fragment_id: the version of the catalog item. - :type fragment_id: str - :param index_id: the ordinal of the index which contains the table - fragment. - :type index_id: int - :param size: the data size of the table fragment in bytes. - :type size: long - :param row_count: the number of rows in the table fragment. - :type row_count: long - :param create_date: the creation time of the table fragment. - :type create_date: datetime - """ - - _attribute_map = { - 'parent_id': {'key': 'parentId', 'type': 'str'}, - 'fragment_id': {'key': 'fragmentId', 'type': 'str'}, - 'index_id': {'key': 'indexId', 'type': 'int'}, - 'size': {'key': 'size', 'type': 'long'}, - 'row_count': {'key': 'rowCount', 'type': 'long'}, - 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, - } - - def __init__(self, **kwargs): - super(USqlTableFragment, self).__init__(**kwargs) - self.parent_id = kwargs.get('parent_id', None) - self.fragment_id = kwargs.get('fragment_id', None) - self.index_id = kwargs.get('index_id', None) - self.size = kwargs.get('size', None) - self.row_count = kwargs.get('row_count', None) - self.create_date = kwargs.get('create_date', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_paged.py deleted file mode 100644 index f0b3e7c68464..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlTableFragmentPaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlTableFragment ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlTableFragment]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlTableFragmentPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_py3.py deleted file mode 100644 index f211e39638a4..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_fragment_py3.py +++ /dev/null @@ -1,50 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlTableFragment(Model): - """A Data Lake Analytics catalog U-SQL table fragment item. - - :param parent_id: the parent object Id of the table fragment. The parent - could be a table or table partition. - :type parent_id: str - :param fragment_id: the version of the catalog item. - :type fragment_id: str - :param index_id: the ordinal of the index which contains the table - fragment. - :type index_id: int - :param size: the data size of the table fragment in bytes. - :type size: long - :param row_count: the number of rows in the table fragment. - :type row_count: long - :param create_date: the creation time of the table fragment. - :type create_date: datetime - """ - - _attribute_map = { - 'parent_id': {'key': 'parentId', 'type': 'str'}, - 'fragment_id': {'key': 'fragmentId', 'type': 'str'}, - 'index_id': {'key': 'indexId', 'type': 'int'}, - 'size': {'key': 'size', 'type': 'long'}, - 'row_count': {'key': 'rowCount', 'type': 'long'}, - 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, - } - - def __init__(self, *, parent_id: str=None, fragment_id: str=None, index_id: int=None, size: int=None, row_count: int=None, create_date=None, **kwargs) -> None: - super(USqlTableFragment, self).__init__(**kwargs) - self.parent_id = parent_id - self.fragment_id = fragment_id - self.index_id = index_id - self.size = size - self.row_count = row_count - self.create_date = create_date diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_paged.py deleted file mode 100644 index b96cd3580b11..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlTablePaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlTable ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlTable]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlTablePaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition.py deleted file mode 100644 index cc2f0acd03ef..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlTablePartition(CatalogItem): - """A Data Lake Analytics catalog U-SQL table partition item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table - partition and database. - :type schema_name: str - :param name: the name of the table partition. - :type name: str - :param parent_name: the Ddl object of the partition's parent. - :type parent_name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName - :param index_id: the index ID for this partition. - :type index_id: int - :param label: the list of labels associated with this partition. - :type label: list[str] - :param create_date: the creation time of the partition - :type create_date: datetime - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'partitionName', 'type': 'str'}, - 'parent_name': {'key': 'parentName', 'type': 'DdlName'}, - 'index_id': {'key': 'indexId', 'type': 'int'}, - 'label': {'key': 'label', 'type': '[str]'}, - 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, - } - - def __init__(self, **kwargs): - super(USqlTablePartition, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.schema_name = kwargs.get('schema_name', None) - self.name = kwargs.get('name', None) - self.parent_name = kwargs.get('parent_name', None) - self.index_id = kwargs.get('index_id', None) - self.label = kwargs.get('label', None) - self.create_date = kwargs.get('create_date', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition_paged.py deleted file mode 100644 index 2b0f1db6ac47..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlTablePartitionPaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlTablePartition ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlTablePartition]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlTablePartitionPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition_py3.py deleted file mode 100644 index 3fd74aaf5613..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_partition_py3.py +++ /dev/null @@ -1,59 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlTablePartition(CatalogItem): - """A Data Lake Analytics catalog U-SQL table partition item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table - partition and database. - :type schema_name: str - :param name: the name of the table partition. - :type name: str - :param parent_name: the Ddl object of the partition's parent. - :type parent_name: ~azure.mgmt.datalake.analytics.catalog.models.DdlName - :param index_id: the index ID for this partition. - :type index_id: int - :param label: the list of labels associated with this partition. - :type label: list[str] - :param create_date: the creation time of the partition - :type create_date: datetime - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'partitionName', 'type': 'str'}, - 'parent_name': {'key': 'parentName', 'type': 'DdlName'}, - 'index_id': {'key': 'indexId', 'type': 'int'}, - 'label': {'key': 'label', 'type': '[str]'}, - 'create_date': {'key': 'createDate', 'type': 'iso-8601'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, parent_name=None, index_id: int=None, label=None, create_date=None, **kwargs) -> None: - super(USqlTablePartition, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.parent_name = parent_name - self.index_id = index_id - self.label = label - self.create_date = create_date diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview.py deleted file mode 100644 index 9962446d2e32..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlTablePreview(Model): - """A Data Lake Analytics catalog table or partition preview rows item. - - :param total_row_count: the total number of rows in the table or - partition. - :type total_row_count: long - :param total_column_count: the total number of columns in the table or - partition. - :type total_column_count: long - :param rows: the rows of the table or partition preview, where each row is - an array of string representations the row's values. Note: Byte arrays - will appear as base-64 encoded values, SqlMap and SqlArray objects will - appear as escaped JSON objects, and DateTime objects will appear as ISO - formatted UTC date-times. - :type rows: list[list[str]] - :param truncated: true if the amount of data in the response is less than - expected due to the preview operation's size limitations. This can occur - if the requested rows or row counts are too large. - :type truncated: bool - :param schema: the schema of the table or partition. - :type schema: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] - """ - - _attribute_map = { - 'total_row_count': {'key': 'totalRowCount', 'type': 'long'}, - 'total_column_count': {'key': 'totalColumnCount', 'type': 'long'}, - 'rows': {'key': 'rows', 'type': '[[str]]'}, - 'truncated': {'key': 'truncated', 'type': 'bool'}, - 'schema': {'key': 'schema', 'type': '[USqlTableColumn]'}, - } - - def __init__(self, **kwargs): - super(USqlTablePreview, self).__init__(**kwargs) - self.total_row_count = kwargs.get('total_row_count', None) - self.total_column_count = kwargs.get('total_column_count', None) - self.rows = kwargs.get('rows', None) - self.truncated = kwargs.get('truncated', None) - self.schema = kwargs.get('schema', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview_py3.py deleted file mode 100644 index 972044d66119..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_preview_py3.py +++ /dev/null @@ -1,53 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class USqlTablePreview(Model): - """A Data Lake Analytics catalog table or partition preview rows item. - - :param total_row_count: the total number of rows in the table or - partition. - :type total_row_count: long - :param total_column_count: the total number of columns in the table or - partition. - :type total_column_count: long - :param rows: the rows of the table or partition preview, where each row is - an array of string representations the row's values. Note: Byte arrays - will appear as base-64 encoded values, SqlMap and SqlArray objects will - appear as escaped JSON objects, and DateTime objects will appear as ISO - formatted UTC date-times. - :type rows: list[list[str]] - :param truncated: true if the amount of data in the response is less than - expected due to the preview operation's size limitations. This can occur - if the requested rows or row counts are too large. - :type truncated: bool - :param schema: the schema of the table or partition. - :type schema: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] - """ - - _attribute_map = { - 'total_row_count': {'key': 'totalRowCount', 'type': 'long'}, - 'total_column_count': {'key': 'totalColumnCount', 'type': 'long'}, - 'rows': {'key': 'rows', 'type': '[[str]]'}, - 'truncated': {'key': 'truncated', 'type': 'bool'}, - 'schema': {'key': 'schema', 'type': '[USqlTableColumn]'}, - } - - def __init__(self, *, total_row_count: int=None, total_column_count: int=None, rows=None, truncated: bool=None, schema=None, **kwargs) -> None: - super(USqlTablePreview, self).__init__(**kwargs) - self.total_row_count = total_row_count - self.total_column_count = total_column_count - self.rows = rows - self.truncated = truncated - self.schema = schema diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_py3.py deleted file mode 100644 index 9bab0fafbd46..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_py3.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlTable(CatalogItem): - """A Data Lake Analytics catalog U-SQL table item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table and - database. - :type schema_name: str - :param name: the name of the table. - :type name: str - :param column_list: the list of columns in this table - :type column_list: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlTableColumn] - :param index_list: the list of indices in this table - :type index_list: - list[~azure.mgmt.datalake.analytics.catalog.models.USqlIndex] - :param partition_key_list: the list of partition keys in the table - :type partition_key_list: list[str] - :param external_table: the external table associated with the table. - :type external_table: - ~azure.mgmt.datalake.analytics.catalog.models.ExternalTable - :param distribution_info: the distributions info of the table - :type distribution_info: - ~azure.mgmt.datalake.analytics.catalog.models.USqlDistributionInfo - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'tableName', 'type': 'str'}, - 'column_list': {'key': 'columnList', 'type': '[USqlTableColumn]'}, - 'index_list': {'key': 'indexList', 'type': '[USqlIndex]'}, - 'partition_key_list': {'key': 'partitionKeyList', 'type': '[str]'}, - 'external_table': {'key': 'externalTable', 'type': 'ExternalTable'}, - 'distribution_info': {'key': 'distributionInfo', 'type': 'USqlDistributionInfo'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, column_list=None, index_list=None, partition_key_list=None, external_table=None, distribution_info=None, **kwargs) -> None: - super(USqlTable, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.column_list = column_list - self.index_list = index_list - self.partition_key_list = partition_key_list - self.external_table = external_table - self.distribution_info = distribution_info diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics.py deleted file mode 100644 index 7d01781764ed..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics.py +++ /dev/null @@ -1,87 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlTableStatistics(CatalogItem): - """A Data Lake Analytics catalog U-SQL table statistics item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table and - database. - :type schema_name: str - :param table_name: the name of the table. - :type table_name: str - :param name: the name of the table statistics. - :type name: str - :param user_stat_name: the name of the user statistics. - :type user_stat_name: str - :param stat_data_path: the path to the statistics data. - :type stat_data_path: str - :param create_time: the creation time of the statistics. - :type create_time: datetime - :param update_time: the last time the statistics were updated. - :type update_time: datetime - :param is_user_created: the switch indicating if these statistics are user - created. - :type is_user_created: bool - :param is_auto_created: the switch indicating if these statistics are - automatically created. - :type is_auto_created: bool - :param has_filter: the switch indicating if these statistics have a - filter. - :type has_filter: bool - :param filter_definition: the filter definition for the statistics. - :type filter_definition: str - :param col_names: the list of column names associated with these - statistics. - :type col_names: list[str] - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'table_name': {'key': 'tableName', 'type': 'str'}, - 'name': {'key': 'statisticsName', 'type': 'str'}, - 'user_stat_name': {'key': 'userStatName', 'type': 'str'}, - 'stat_data_path': {'key': 'statDataPath', 'type': 'str'}, - 'create_time': {'key': 'createTime', 'type': 'iso-8601'}, - 'update_time': {'key': 'updateTime', 'type': 'iso-8601'}, - 'is_user_created': {'key': 'isUserCreated', 'type': 'bool'}, - 'is_auto_created': {'key': 'isAutoCreated', 'type': 'bool'}, - 'has_filter': {'key': 'hasFilter', 'type': 'bool'}, - 'filter_definition': {'key': 'filterDefinition', 'type': 'str'}, - 'col_names': {'key': 'colNames', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(USqlTableStatistics, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.schema_name = kwargs.get('schema_name', None) - self.table_name = kwargs.get('table_name', None) - self.name = kwargs.get('name', None) - self.user_stat_name = kwargs.get('user_stat_name', None) - self.stat_data_path = kwargs.get('stat_data_path', None) - self.create_time = kwargs.get('create_time', None) - self.update_time = kwargs.get('update_time', None) - self.is_user_created = kwargs.get('is_user_created', None) - self.is_auto_created = kwargs.get('is_auto_created', None) - self.has_filter = kwargs.get('has_filter', None) - self.filter_definition = kwargs.get('filter_definition', None) - self.col_names = kwargs.get('col_names', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics_paged.py deleted file mode 100644 index 47fa3fd26bc2..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlTableStatisticsPaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlTableStatistics ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlTableStatistics]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlTableStatisticsPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics_py3.py deleted file mode 100644 index d60a6e311279..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_statistics_py3.py +++ /dev/null @@ -1,87 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlTableStatistics(CatalogItem): - """A Data Lake Analytics catalog U-SQL table statistics item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table and - database. - :type schema_name: str - :param table_name: the name of the table. - :type table_name: str - :param name: the name of the table statistics. - :type name: str - :param user_stat_name: the name of the user statistics. - :type user_stat_name: str - :param stat_data_path: the path to the statistics data. - :type stat_data_path: str - :param create_time: the creation time of the statistics. - :type create_time: datetime - :param update_time: the last time the statistics were updated. - :type update_time: datetime - :param is_user_created: the switch indicating if these statistics are user - created. - :type is_user_created: bool - :param is_auto_created: the switch indicating if these statistics are - automatically created. - :type is_auto_created: bool - :param has_filter: the switch indicating if these statistics have a - filter. - :type has_filter: bool - :param filter_definition: the filter definition for the statistics. - :type filter_definition: str - :param col_names: the list of column names associated with these - statistics. - :type col_names: list[str] - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'table_name': {'key': 'tableName', 'type': 'str'}, - 'name': {'key': 'statisticsName', 'type': 'str'}, - 'user_stat_name': {'key': 'userStatName', 'type': 'str'}, - 'stat_data_path': {'key': 'statDataPath', 'type': 'str'}, - 'create_time': {'key': 'createTime', 'type': 'iso-8601'}, - 'update_time': {'key': 'updateTime', 'type': 'iso-8601'}, - 'is_user_created': {'key': 'isUserCreated', 'type': 'bool'}, - 'is_auto_created': {'key': 'isAutoCreated', 'type': 'bool'}, - 'has_filter': {'key': 'hasFilter', 'type': 'bool'}, - 'filter_definition': {'key': 'filterDefinition', 'type': 'str'}, - 'col_names': {'key': 'colNames', 'type': '[str]'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, table_name: str=None, name: str=None, user_stat_name: str=None, stat_data_path: str=None, create_time=None, update_time=None, is_user_created: bool=None, is_auto_created: bool=None, has_filter: bool=None, filter_definition: str=None, col_names=None, **kwargs) -> None: - super(USqlTableStatistics, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.schema_name = schema_name - self.table_name = table_name - self.name = name - self.user_stat_name = user_stat_name - self.stat_data_path = stat_data_path - self.create_time = create_time - self.update_time = update_time - self.is_user_created = is_user_created - self.is_auto_created = is_auto_created - self.has_filter = has_filter - self.filter_definition = filter_definition - self.col_names = col_names diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type.py deleted file mode 100644 index 19a1906ca282..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type.py +++ /dev/null @@ -1,92 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .usql_type import USqlType - - -class USqlTableType(USqlType): - """A Data Lake Analytics catalog U-SQL table type item. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table and - database. - :type schema_name: str - :param name: the name of type for this type. - :type name: str - :param type_family: the type family for this type. - :type type_family: str - :param c_sharp_name: the C# name for this type. - :type c_sharp_name: str - :param full_csharp_name: the fully qualified C# name for this type. - :type full_csharp_name: str - :param system_type_id: the system type ID for this type. - :type system_type_id: int - :param user_type_id: the user type ID for this type. - :type user_type_id: int - :param schema_id: the schema ID for this type. - :type schema_id: int - :param principal_id: the principal ID for this type. - :type principal_id: int - :param is_nullable: the the switch indicating if this type is nullable. - :type is_nullable: bool - :param is_user_defined: the the switch indicating if this type is user - defined. - :type is_user_defined: bool - :param is_assembly_type: the the switch indicating if this type is an - assembly type. - :type is_assembly_type: bool - :param is_table_type: the the switch indicating if this type is a table - type. - :type is_table_type: bool - :param is_complex_type: the the switch indicating if this type is a - complex type. - :type is_complex_type: bool - :ivar columns: the type field information associated with this table type. - :vartype columns: - list[~azure.mgmt.datalake.analytics.catalog.models.TypeFieldInfo] - """ - - _validation = { - 'columns': {'readonly': True}, - } - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'typeName', 'type': 'str'}, - 'type_family': {'key': 'typeFamily', 'type': 'str'}, - 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, - 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, - 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, - 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, - 'schema_id': {'key': 'schemaId', 'type': 'int'}, - 'principal_id': {'key': 'principalId', 'type': 'int'}, - 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, - 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, - 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, - 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, - 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, - 'columns': {'key': 'columns', 'type': '[TypeFieldInfo]'}, - } - - def __init__(self, **kwargs): - super(USqlTableType, self).__init__(**kwargs) - self.columns = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type_paged.py deleted file mode 100644 index 9cda3f159c89..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlTableTypePaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlTableType ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlTableType]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlTableTypePaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type_py3.py deleted file mode 100644 index a44f43f366a9..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_type_py3.py +++ /dev/null @@ -1,92 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .usql_type_py3 import USqlType - - -class USqlTableType(USqlType): - """A Data Lake Analytics catalog U-SQL table type item. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table and - database. - :type schema_name: str - :param name: the name of type for this type. - :type name: str - :param type_family: the type family for this type. - :type type_family: str - :param c_sharp_name: the C# name for this type. - :type c_sharp_name: str - :param full_csharp_name: the fully qualified C# name for this type. - :type full_csharp_name: str - :param system_type_id: the system type ID for this type. - :type system_type_id: int - :param user_type_id: the user type ID for this type. - :type user_type_id: int - :param schema_id: the schema ID for this type. - :type schema_id: int - :param principal_id: the principal ID for this type. - :type principal_id: int - :param is_nullable: the the switch indicating if this type is nullable. - :type is_nullable: bool - :param is_user_defined: the the switch indicating if this type is user - defined. - :type is_user_defined: bool - :param is_assembly_type: the the switch indicating if this type is an - assembly type. - :type is_assembly_type: bool - :param is_table_type: the the switch indicating if this type is a table - type. - :type is_table_type: bool - :param is_complex_type: the the switch indicating if this type is a - complex type. - :type is_complex_type: bool - :ivar columns: the type field information associated with this table type. - :vartype columns: - list[~azure.mgmt.datalake.analytics.catalog.models.TypeFieldInfo] - """ - - _validation = { - 'columns': {'readonly': True}, - } - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'typeName', 'type': 'str'}, - 'type_family': {'key': 'typeFamily', 'type': 'str'}, - 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, - 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, - 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, - 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, - 'schema_id': {'key': 'schemaId', 'type': 'int'}, - 'principal_id': {'key': 'principalId', 'type': 'int'}, - 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, - 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, - 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, - 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, - 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, - 'columns': {'key': 'columns', 'type': '[TypeFieldInfo]'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, type_family: str=None, c_sharp_name: str=None, full_csharp_name: str=None, system_type_id: int=None, user_type_id: int=None, schema_id: int=None, principal_id: int=None, is_nullable: bool=None, is_user_defined: bool=None, is_assembly_type: bool=None, is_table_type: bool=None, is_complex_type: bool=None, **kwargs) -> None: - super(USqlTableType, self).__init__(compute_account_name=compute_account_name, version=version, database_name=database_name, schema_name=schema_name, name=name, type_family=type_family, c_sharp_name=c_sharp_name, full_csharp_name=full_csharp_name, system_type_id=system_type_id, user_type_id=user_type_id, schema_id=schema_id, principal_id=principal_id, is_nullable=is_nullable, is_user_defined=is_user_defined, is_assembly_type=is_assembly_type, is_table_type=is_table_type, is_complex_type=is_complex_type, **kwargs) - self.columns = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function.py deleted file mode 100644 index 0c2d0ac4242f..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlTableValuedFunction(CatalogItem): - """A Data Lake Analytics catalog U-SQL table valued function item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this database. - :type schema_name: str - :param name: the name of the table valued function. - :type name: str - :param definition: the definition of the table valued function. - :type definition: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'tvfName', 'type': 'str'}, - 'definition': {'key': 'definition', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlTableValuedFunction, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.schema_name = kwargs.get('schema_name', None) - self.name = kwargs.get('name', None) - self.definition = kwargs.get('definition', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function_paged.py deleted file mode 100644 index 36065b69b525..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlTableValuedFunctionPaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlTableValuedFunction ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlTableValuedFunction]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlTableValuedFunctionPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function_py3.py deleted file mode 100644 index 833f96d8d557..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_valued_function_py3.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlTableValuedFunction(CatalogItem): - """A Data Lake Analytics catalog U-SQL table valued function item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this database. - :type schema_name: str - :param name: the name of the table valued function. - :type name: str - :param definition: the definition of the table valued function. - :type definition: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'tvfName', 'type': 'str'}, - 'definition': {'key': 'definition', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: - super(USqlTableValuedFunction, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.definition = definition diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type.py deleted file mode 100644 index e377666a65f3..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlType(CatalogItem): - """A Data Lake Analytics catalog U-SQL type item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table and - database. - :type schema_name: str - :param name: the name of type for this type. - :type name: str - :param type_family: the type family for this type. - :type type_family: str - :param c_sharp_name: the C# name for this type. - :type c_sharp_name: str - :param full_csharp_name: the fully qualified C# name for this type. - :type full_csharp_name: str - :param system_type_id: the system type ID for this type. - :type system_type_id: int - :param user_type_id: the user type ID for this type. - :type user_type_id: int - :param schema_id: the schema ID for this type. - :type schema_id: int - :param principal_id: the principal ID for this type. - :type principal_id: int - :param is_nullable: the the switch indicating if this type is nullable. - :type is_nullable: bool - :param is_user_defined: the the switch indicating if this type is user - defined. - :type is_user_defined: bool - :param is_assembly_type: the the switch indicating if this type is an - assembly type. - :type is_assembly_type: bool - :param is_table_type: the the switch indicating if this type is a table - type. - :type is_table_type: bool - :param is_complex_type: the the switch indicating if this type is a - complex type. - :type is_complex_type: bool - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'typeName', 'type': 'str'}, - 'type_family': {'key': 'typeFamily', 'type': 'str'}, - 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, - 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, - 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, - 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, - 'schema_id': {'key': 'schemaId', 'type': 'int'}, - 'principal_id': {'key': 'principalId', 'type': 'int'}, - 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, - 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, - 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, - 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, - 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, - } - - def __init__(self, **kwargs): - super(USqlType, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.schema_name = kwargs.get('schema_name', None) - self.name = kwargs.get('name', None) - self.type_family = kwargs.get('type_family', None) - self.c_sharp_name = kwargs.get('c_sharp_name', None) - self.full_csharp_name = kwargs.get('full_csharp_name', None) - self.system_type_id = kwargs.get('system_type_id', None) - self.user_type_id = kwargs.get('user_type_id', None) - self.schema_id = kwargs.get('schema_id', None) - self.principal_id = kwargs.get('principal_id', None) - self.is_nullable = kwargs.get('is_nullable', None) - self.is_user_defined = kwargs.get('is_user_defined', None) - self.is_assembly_type = kwargs.get('is_assembly_type', None) - self.is_table_type = kwargs.get('is_table_type', None) - self.is_complex_type = kwargs.get('is_complex_type', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type_paged.py deleted file mode 100644 index f079ab674366..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlTypePaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlType ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlType]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlTypePaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type_py3.py deleted file mode 100644 index c4c5f0bb306d..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_type_py3.py +++ /dev/null @@ -1,95 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlType(CatalogItem): - """A Data Lake Analytics catalog U-SQL type item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this table and - database. - :type schema_name: str - :param name: the name of type for this type. - :type name: str - :param type_family: the type family for this type. - :type type_family: str - :param c_sharp_name: the C# name for this type. - :type c_sharp_name: str - :param full_csharp_name: the fully qualified C# name for this type. - :type full_csharp_name: str - :param system_type_id: the system type ID for this type. - :type system_type_id: int - :param user_type_id: the user type ID for this type. - :type user_type_id: int - :param schema_id: the schema ID for this type. - :type schema_id: int - :param principal_id: the principal ID for this type. - :type principal_id: int - :param is_nullable: the the switch indicating if this type is nullable. - :type is_nullable: bool - :param is_user_defined: the the switch indicating if this type is user - defined. - :type is_user_defined: bool - :param is_assembly_type: the the switch indicating if this type is an - assembly type. - :type is_assembly_type: bool - :param is_table_type: the the switch indicating if this type is a table - type. - :type is_table_type: bool - :param is_complex_type: the the switch indicating if this type is a - complex type. - :type is_complex_type: bool - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'typeName', 'type': 'str'}, - 'type_family': {'key': 'typeFamily', 'type': 'str'}, - 'c_sharp_name': {'key': 'cSharpName', 'type': 'str'}, - 'full_csharp_name': {'key': 'fullCSharpName', 'type': 'str'}, - 'system_type_id': {'key': 'systemTypeId', 'type': 'int'}, - 'user_type_id': {'key': 'userTypeId', 'type': 'int'}, - 'schema_id': {'key': 'schemaId', 'type': 'int'}, - 'principal_id': {'key': 'principalId', 'type': 'int'}, - 'is_nullable': {'key': 'isNullable', 'type': 'bool'}, - 'is_user_defined': {'key': 'isUserDefined', 'type': 'bool'}, - 'is_assembly_type': {'key': 'isAssemblyType', 'type': 'bool'}, - 'is_table_type': {'key': 'isTableType', 'type': 'bool'}, - 'is_complex_type': {'key': 'isComplexType', 'type': 'bool'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, type_family: str=None, c_sharp_name: str=None, full_csharp_name: str=None, system_type_id: int=None, user_type_id: int=None, schema_id: int=None, principal_id: int=None, is_nullable: bool=None, is_user_defined: bool=None, is_assembly_type: bool=None, is_table_type: bool=None, is_complex_type: bool=None, **kwargs) -> None: - super(USqlType, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.type_family = type_family - self.c_sharp_name = c_sharp_name - self.full_csharp_name = full_csharp_name - self.system_type_id = system_type_id - self.user_type_id = user_type_id - self.schema_id = schema_id - self.principal_id = principal_id - self.is_nullable = is_nullable - self.is_user_defined = is_user_defined - self.is_assembly_type = is_assembly_type - self.is_table_type = is_table_type - self.is_complex_type = is_complex_type diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view.py deleted file mode 100644 index a1819d7712cf..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view.py +++ /dev/null @@ -1,47 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item import CatalogItem - - -class USqlView(CatalogItem): - """A Data Lake Analytics catalog U-SQL view item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this view and - database. - :type schema_name: str - :param name: the name of the view. - :type name: str - :param definition: the defined query of the view. - :type definition: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'viewName', 'type': 'str'}, - 'definition': {'key': 'definition', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(USqlView, self).__init__(**kwargs) - self.database_name = kwargs.get('database_name', None) - self.schema_name = kwargs.get('schema_name', None) - self.name = kwargs.get('name', None) - self.definition = kwargs.get('definition', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_paged.py deleted file mode 100644 index 5536566708a4..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class USqlViewPaged(Paged): - """ - A paging container for iterating over a list of :class:`USqlView ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[USqlView]'} - } - - def __init__(self, *args, **kwargs): - - super(USqlViewPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_py3.py deleted file mode 100644 index 675e61057e8d..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_py3.py +++ /dev/null @@ -1,47 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .catalog_item_py3 import CatalogItem - - -class USqlView(CatalogItem): - """A Data Lake Analytics catalog U-SQL view item. - - :param compute_account_name: the name of the Data Lake Analytics account. - :type compute_account_name: str - :param version: the version of the catalog item. - :type version: str - :param database_name: the name of the database. - :type database_name: str - :param schema_name: the name of the schema associated with this view and - database. - :type schema_name: str - :param name: the name of the view. - :type name: str - :param definition: the defined query of the view. - :type definition: str - """ - - _attribute_map = { - 'compute_account_name': {'key': 'computeAccountName', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'database_name': {'key': 'databaseName', 'type': 'str'}, - 'schema_name': {'key': 'schemaName', 'type': 'str'}, - 'name': {'key': 'viewName', 'type': 'str'}, - 'definition': {'key': 'definition', 'type': 'str'}, - } - - def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, schema_name: str=None, name: str=None, definition: str=None, **kwargs) -> None: - super(USqlView, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs) - self.database_name = database_name - self.schema_name = schema_name - self.name = name - self.definition = definition diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/__init__.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/__init__.py index da1ff05e4292..3a0099aa9372 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/__init__.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/__init__.py @@ -9,7 +9,7 @@ # regenerated. # -------------------------------------------------------------------------- -from .catalog_operations import CatalogOperations +from ._catalog_operations import CatalogOperations __all__ = [ 'CatalogOperations', diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/catalog_operations.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/_catalog_operations.py similarity index 92% rename from sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/catalog_operations.py rename to sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/_catalog_operations.py index a2c93ca2aae1..11c1f07e5e91 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/catalog_operations.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/operations/_catalog_operations.py @@ -19,6 +19,8 @@ class CatalogOperations(object): """CatalogOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -95,9 +97,8 @@ def create_secret( body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters') # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -148,7 +149,7 @@ def get_secret( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -157,8 +158,8 @@ def get_secret( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -166,7 +167,6 @@ def get_secret( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlSecret', response) @@ -234,9 +234,8 @@ def update_secret( body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters') # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -286,7 +285,6 @@ def delete_secret( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -295,8 +293,8 @@ def delete_secret( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.delete(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -343,7 +341,6 @@ def delete_all_secrets( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -352,8 +349,8 @@ def delete_all_secrets( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.delete(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -421,9 +418,8 @@ def create_credential( body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogCredentialCreateParameters') # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -473,7 +469,7 @@ def get_credential( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -482,8 +478,8 @@ def get_credential( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -491,7 +487,6 @@ def get_credential( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlCredential', response) @@ -556,9 +551,8 @@ def update_credential( body_content = self._serialize.body(parameters, 'DataLakeAnalyticsCatalogCredentialUpdateParameters') # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -637,9 +631,8 @@ def delete_credential( body_content = None # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -690,8 +683,7 @@ def list_credentials( ~azure.mgmt.datalake.analytics.catalog.models.USqlCredentialPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlCredential] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_credentials.metadata['url'] @@ -724,7 +716,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -733,9 +725,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -745,12 +741,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlCredentialPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlCredentialPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlCredentialPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_credentials.metadata = {'url': '/catalog/usql/databases/{databaseName}/credentials'} @@ -796,7 +790,7 @@ def get_external_data_source( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -805,8 +799,8 @@ def get_external_data_source( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -814,7 +808,6 @@ def get_external_data_source( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlExternalDataSource', response) @@ -866,8 +859,7 @@ def list_external_data_sources( ~azure.mgmt.datalake.analytics.catalog.models.USqlExternalDataSourcePaged[~azure.mgmt.datalake.analytics.catalog.models.USqlExternalDataSource] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_external_data_sources.metadata['url'] @@ -900,7 +892,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -909,9 +901,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -921,12 +917,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlExternalDataSourcePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlExternalDataSourcePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlExternalDataSourcePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_external_data_sources.metadata = {'url': '/catalog/usql/databases/{databaseName}/externaldatasources'} @@ -972,7 +966,7 @@ def get_procedure( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -981,8 +975,8 @@ def get_procedure( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -990,7 +984,6 @@ def get_procedure( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlProcedure', response) @@ -1043,8 +1036,7 @@ def list_procedures( ~azure.mgmt.datalake.analytics.catalog.models.USqlProcedurePaged[~azure.mgmt.datalake.analytics.catalog.models.USqlProcedure] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_procedures.metadata['url'] @@ -1078,7 +1070,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1087,9 +1079,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1099,12 +1095,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlProcedurePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlProcedurePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlProcedurePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_procedures.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/procedures'} @@ -1149,7 +1143,7 @@ def get_table( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1158,8 +1152,8 @@ def get_table( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1167,7 +1161,6 @@ def get_table( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlTable', response) @@ -1225,8 +1218,7 @@ def list_table_fragments( ~azure.mgmt.datalake.analytics.catalog.models.USqlTableFragmentPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTableFragment] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_table_fragments.metadata['url'] @@ -1261,7 +1253,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1270,9 +1262,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1282,12 +1278,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTableFragmentPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTableFragmentPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTableFragmentPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_table_fragments.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/tablefragments'} @@ -1339,8 +1333,7 @@ def list_tables( ~azure.mgmt.datalake.analytics.catalog.models.USqlTablePaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTable] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_tables.metadata['url'] @@ -1376,7 +1369,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1385,9 +1378,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1397,12 +1394,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTablePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTablePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTablePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_tables.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables'} @@ -1450,8 +1445,7 @@ def list_table_statistics_by_database_and_schema( ~azure.mgmt.datalake.analytics.catalog.models.USqlTableStatisticsPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTableStatistics] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_table_statistics_by_database_and_schema.metadata['url'] @@ -1485,7 +1479,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1494,9 +1488,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1506,12 +1504,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTableStatisticsPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTableStatisticsPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTableStatisticsPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_table_statistics_by_database_and_schema.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/statistics'} @@ -1558,7 +1554,7 @@ def get_table_type( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1567,8 +1563,8 @@ def get_table_type( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1576,7 +1572,6 @@ def get_table_type( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlTableType', response) @@ -1629,8 +1624,7 @@ def list_table_types( ~azure.mgmt.datalake.analytics.catalog.models.USqlTableTypePaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTableType] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_table_types.metadata['url'] @@ -1664,7 +1658,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1673,9 +1667,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1685,12 +1683,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTableTypePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTableTypePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTableTypePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_table_types.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tabletypes'} @@ -1735,7 +1731,7 @@ def get_package( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1744,8 +1740,8 @@ def get_package( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1753,7 +1749,6 @@ def get_package( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlPackage', response) @@ -1806,8 +1801,7 @@ def list_packages( ~azure.mgmt.datalake.analytics.catalog.models.USqlPackagePaged[~azure.mgmt.datalake.analytics.catalog.models.USqlPackage] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_packages.metadata['url'] @@ -1841,7 +1835,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1850,9 +1844,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1862,12 +1860,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlPackagePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlPackagePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlPackagePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_packages.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/packages'} @@ -1912,7 +1908,7 @@ def get_view( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -1921,8 +1917,8 @@ def get_view( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -1930,7 +1926,6 @@ def get_view( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlView', response) @@ -1982,8 +1977,7 @@ def list_views( ~azure.mgmt.datalake.analytics.catalog.models.USqlViewPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlView] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_views.metadata['url'] @@ -2017,7 +2011,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2026,9 +2020,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2038,12 +2036,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlViewPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlViewPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlViewPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_views.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/views'} @@ -2094,7 +2090,7 @@ def get_table_statistic( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2103,8 +2099,8 @@ def get_table_statistic( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2112,7 +2108,6 @@ def get_table_statistic( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlTableStatistics', response) @@ -2168,8 +2163,7 @@ def list_table_statistics( ~azure.mgmt.datalake.analytics.catalog.models.USqlTableStatisticsPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTableStatistics] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_table_statistics.metadata['url'] @@ -2204,7 +2198,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2213,9 +2207,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2225,12 +2223,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTableStatisticsPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTableStatisticsPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTableStatisticsPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_table_statistics.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/statistics'} @@ -2289,7 +2285,7 @@ def preview_table_partition( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2298,8 +2294,8 @@ def preview_table_partition( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2307,7 +2303,6 @@ def preview_table_partition( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlTablePreview', response) @@ -2364,7 +2359,7 @@ def get_table_partition( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2373,8 +2368,8 @@ def get_table_partition( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2382,7 +2377,6 @@ def get_table_partition( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlTablePartition', response) @@ -2443,7 +2437,7 @@ def preview_table( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2452,8 +2446,8 @@ def preview_table( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2461,7 +2455,6 @@ def preview_table( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlTablePreview', response) @@ -2517,8 +2510,7 @@ def list_table_partitions( ~azure.mgmt.datalake.analytics.catalog.models.USqlTablePartitionPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTablePartition] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_table_partitions.metadata['url'] @@ -2553,7 +2545,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2562,9 +2554,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2574,12 +2570,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTablePartitionPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTablePartitionPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTablePartitionPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_table_partitions.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tables/{tableName}/partitions'} @@ -2626,8 +2620,7 @@ def list_types( ~azure.mgmt.datalake.analytics.catalog.models.USqlTypePaged[~azure.mgmt.datalake.analytics.catalog.models.USqlType] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_types.metadata['url'] @@ -2661,7 +2654,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2670,9 +2663,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2682,12 +2679,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTypePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTypePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTypePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_types.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/types'} @@ -2737,7 +2732,7 @@ def get_table_valued_function( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2746,8 +2741,8 @@ def get_table_valued_function( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2755,7 +2750,6 @@ def get_table_valued_function( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlTableValuedFunction', response) @@ -2810,8 +2804,7 @@ def list_table_valued_functions( ~azure.mgmt.datalake.analytics.catalog.models.USqlTableValuedFunctionPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTableValuedFunction] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_table_valued_functions.metadata['url'] @@ -2845,7 +2838,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2854,9 +2847,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2866,12 +2863,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTableValuedFunctionPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTableValuedFunctionPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTableValuedFunctionPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_table_valued_functions.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas/{schemaName}/tablevaluedfunctions'} @@ -2914,7 +2909,7 @@ def get_assembly( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -2923,8 +2918,8 @@ def get_assembly( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -2932,7 +2927,6 @@ def get_assembly( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlAssembly', response) @@ -2983,8 +2977,7 @@ def list_assemblies( ~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyClrPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlAssemblyClr] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_assemblies.metadata['url'] @@ -3017,7 +3010,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3026,9 +3019,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3038,12 +3035,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlAssemblyClrPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlAssemblyClrPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlAssemblyClrPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_assemblies.metadata = {'url': '/catalog/usql/databases/{databaseName}/assemblies'} @@ -3085,7 +3080,7 @@ def get_schema( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3094,8 +3089,8 @@ def get_schema( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3103,7 +3098,6 @@ def get_schema( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlSchema', response) @@ -3153,8 +3147,7 @@ def list_schemas( ~azure.mgmt.datalake.analytics.catalog.models.USqlSchemaPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlSchema] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_schemas.metadata['url'] @@ -3187,7 +3180,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3196,9 +3189,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3208,12 +3205,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlSchemaPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlSchemaPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlSchemaPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_schemas.metadata = {'url': '/catalog/usql/databases/{databaseName}/schemas'} @@ -3259,8 +3254,7 @@ def list_table_statistics_by_database( ~azure.mgmt.datalake.analytics.catalog.models.USqlTableStatisticsPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTableStatistics] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_table_statistics_by_database.metadata['url'] @@ -3293,7 +3287,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3302,9 +3296,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3314,12 +3312,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTableStatisticsPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTableStatisticsPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTableStatisticsPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_table_statistics_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/statistics'} @@ -3369,8 +3365,7 @@ def list_tables_by_database( ~azure.mgmt.datalake.analytics.catalog.models.USqlTablePaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTable] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_tables_by_database.metadata['url'] @@ -3405,7 +3400,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3414,9 +3409,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3426,12 +3425,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTablePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTablePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTablePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_tables_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/tables'} @@ -3477,8 +3474,7 @@ def list_table_valued_functions_by_database( ~azure.mgmt.datalake.analytics.catalog.models.USqlTableValuedFunctionPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlTableValuedFunction] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_table_valued_functions_by_database.metadata['url'] @@ -3511,7 +3507,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3520,9 +3516,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3532,12 +3532,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlTableValuedFunctionPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlTableValuedFunctionPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlTableValuedFunctionPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_table_valued_functions_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/tablevaluedfunctions'} @@ -3582,8 +3580,7 @@ def list_views_by_database( ~azure.mgmt.datalake.analytics.catalog.models.USqlViewPaged[~azure.mgmt.datalake.analytics.catalog.models.USqlView] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_views_by_database.metadata['url'] @@ -3616,7 +3613,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3625,9 +3622,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3637,12 +3638,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlViewPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlViewPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlViewPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_views_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/views'} @@ -3687,8 +3686,7 @@ def list_acls_by_database( ~azure.mgmt.datalake.analytics.catalog.models.AclPaged[~azure.mgmt.datalake.analytics.catalog.models.Acl] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_acls_by_database.metadata['url'] @@ -3721,7 +3719,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3730,9 +3728,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3742,12 +3744,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.AclPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.AclPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.AclPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_acls_by_database.metadata = {'url': '/catalog/usql/databases/{databaseName}/acl'} @@ -3790,8 +3790,7 @@ def list_acls( ~azure.mgmt.datalake.analytics.catalog.models.AclPaged[~azure.mgmt.datalake.analytics.catalog.models.Acl] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_acls.metadata['url'] @@ -3823,7 +3822,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3832,9 +3831,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3844,12 +3847,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.AclPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.AclPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.AclPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_acls.metadata = {'url': '/catalog/usql/acl'} @@ -3888,7 +3889,7 @@ def get_database( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3897,8 +3898,8 @@ def get_database( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -3906,7 +3907,6 @@ def get_database( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('USqlDatabase', response) @@ -3954,8 +3954,7 @@ def list_databases( ~azure.mgmt.datalake.analytics.catalog.models.USqlDatabasePaged[~azure.mgmt.datalake.analytics.catalog.models.USqlDatabase] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_databases.metadata['url'] @@ -3987,7 +3986,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -3996,9 +3995,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -4008,12 +4011,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.USqlDatabasePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.USqlDatabasePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.USqlDatabasePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_databases.metadata = {'url': '/catalog/usql/databases'} @@ -4068,9 +4069,8 @@ def grant_acl( body_content = self._serialize.body(parameters, 'AclCreateOrUpdateParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -4135,9 +4135,8 @@ def grant_acl_to_database( body_content = self._serialize.body(parameters, 'AclCreateOrUpdateParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -4205,9 +4204,8 @@ def revoke_acl( body_content = self._serialize.body(parameters, 'AclDeleteParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -4278,9 +4276,8 @@ def revoke_acl_from_database( body_content = self._serialize.body(parameters, 'AclDeleteParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/__init__.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/__init__.py index 8867563082d6..d3e63003f265 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/__init__.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/__init__.py @@ -9,10 +9,11 @@ # regenerated. # -------------------------------------------------------------------------- -from .data_lake_analytics_job_management_client import DataLakeAnalyticsJobManagementClient -from .version import VERSION +from ._configuration import DataLakeAnalyticsJobManagementClientConfiguration +from ._data_lake_analytics_job_management_client import DataLakeAnalyticsJobManagementClient +__all__ = ['DataLakeAnalyticsJobManagementClient', 'DataLakeAnalyticsJobManagementClientConfiguration'] -__all__ = ['DataLakeAnalyticsJobManagementClient'] +from .version import VERSION __version__ = VERSION diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/_configuration.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/_configuration.py new file mode 100644 index 000000000000..7cdff3572c6f --- /dev/null +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/_configuration.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +from msrestazure import AzureConfiguration + +from .version import VERSION + + +class DataLakeAnalyticsJobManagementClientConfiguration(AzureConfiguration): + """Configuration for DataLakeAnalyticsJobManagementClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param adla_job_dns_suffix: The DNS suffix used as the base for all Azure + Data Lake Analytics Job service requests. + :type adla_job_dns_suffix: str + """ + + def __init__( + self, credentials, adla_job_dns_suffix): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if adla_job_dns_suffix is None: + raise ValueError("Parameter 'adla_job_dns_suffix' must not be None.") + base_url = 'https://{accountName}.{adlaJobDnsSuffix}' + + super(DataLakeAnalyticsJobManagementClientConfiguration, self).__init__(base_url) + + # Starting Autorest.Python 4.0.64, make connection pool activated by default + self.keep_alive = True + + self.add_user_agent('azure-mgmt-datalake-analytics/{}'.format(VERSION)) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.adla_job_dns_suffix = adla_job_dns_suffix diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/data_lake_analytics_job_management_client.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/_data_lake_analytics_job_management_client.py similarity index 61% rename from sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/data_lake_analytics_job_management_client.py rename to sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/_data_lake_analytics_job_management_client.py index c74fcbd0dc46..e0e6d7124c5a 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/data_lake_analytics_job_management_client.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/_data_lake_analytics_job_management_client.py @@ -11,43 +11,12 @@ from msrest.service_client import SDKClient from msrest import Serializer, Deserializer -from msrestazure import AzureConfiguration -from .version import VERSION -from .operations.job_operations import JobOperations -from .operations.pipeline_operations import PipelineOperations -from .operations.recurrence_operations import RecurrenceOperations -from . import models - - -class DataLakeAnalyticsJobManagementClientConfiguration(AzureConfiguration): - """Configuration for DataLakeAnalyticsJobManagementClient - Note that all parameters used to create this instance are saved as instance - attributes. - - :param credentials: Credentials needed for the client to connect to Azure. - :type credentials: :mod:`A msrestazure Credentials - object` - :param adla_job_dns_suffix: The DNS suffix used as the base for all Azure - Data Lake Analytics Job service requests. - :type adla_job_dns_suffix: str - """ - - def __init__( - self, credentials, adla_job_dns_suffix): - if credentials is None: - raise ValueError("Parameter 'credentials' must not be None.") - if adla_job_dns_suffix is None: - raise ValueError("Parameter 'adla_job_dns_suffix' must not be None.") - base_url = 'https://{accountName}.{adlaJobDnsSuffix}' - - super(DataLakeAnalyticsJobManagementClientConfiguration, self).__init__(base_url) - - self.add_user_agent('azure-mgmt-datalake-analytics/{}'.format(VERSION)) - self.add_user_agent('Azure-SDK-For-Python') - - self.credentials = credentials - self.adla_job_dns_suffix = adla_job_dns_suffix +from ._configuration import DataLakeAnalyticsJobManagementClientConfiguration +from .operations import JobOperations +from .operations import PipelineOperations +from .operations import RecurrenceOperations +from . import models class DataLakeAnalyticsJobManagementClient(SDKClient): diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/__init__.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/__init__.py index a45a43d93918..a40cd8ca5f88 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/__init__.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/__init__.py @@ -10,69 +10,69 @@ # -------------------------------------------------------------------------- try: - from .job_inner_error_py3 import JobInnerError - from .job_error_details_py3 import JobErrorDetails - from .job_state_audit_record_py3 import JobStateAuditRecord - from .job_properties_py3 import JobProperties - from .job_information_py3 import JobInformation - from .job_relationship_properties_py3 import JobRelationshipProperties - from .job_information_basic_py3 import JobInformationBasic - from .job_resource_py3 import JobResource - from .job_statistics_vertex_py3 import JobStatisticsVertex - from .resource_usage_statistics_py3 import ResourceUsageStatistics - from .job_statistics_vertex_stage_py3 import JobStatisticsVertexStage - from .job_statistics_py3 import JobStatistics - from .job_data_path_py3 import JobDataPath - from .diagnostics_py3 import Diagnostics - from .usql_job_properties_py3 import USqlJobProperties - from .hive_job_properties_py3 import HiveJobProperties - from .scope_job_resource_py3 import ScopeJobResource - from .scope_job_properties_py3 import ScopeJobProperties - from .job_pipeline_run_information_py3 import JobPipelineRunInformation - from .job_pipeline_information_py3 import JobPipelineInformation - from .job_recurrence_information_py3 import JobRecurrenceInformation - from .create_job_properties_py3 import CreateJobProperties - from .base_job_parameters_py3 import BaseJobParameters - from .create_job_parameters_py3 import CreateJobParameters - from .create_scope_job_parameters_py3 import CreateScopeJobParameters - from .create_usql_job_properties_py3 import CreateUSqlJobProperties - from .create_scope_job_properties_py3 import CreateScopeJobProperties - from .build_job_parameters_py3 import BuildJobParameters - from .update_job_parameters_py3 import UpdateJobParameters + from ._models_py3 import BaseJobParameters + from ._models_py3 import BuildJobParameters + from ._models_py3 import CreateJobParameters + from ._models_py3 import CreateJobProperties + from ._models_py3 import CreateScopeJobParameters + from ._models_py3 import CreateScopeJobProperties + from ._models_py3 import CreateUSqlJobProperties + from ._models_py3 import Diagnostics + from ._models_py3 import HiveJobProperties + from ._models_py3 import JobDataPath + from ._models_py3 import JobErrorDetails + from ._models_py3 import JobInformation + from ._models_py3 import JobInformationBasic + from ._models_py3 import JobInnerError + from ._models_py3 import JobPipelineInformation + from ._models_py3 import JobPipelineRunInformation + from ._models_py3 import JobProperties + from ._models_py3 import JobRecurrenceInformation + from ._models_py3 import JobRelationshipProperties + from ._models_py3 import JobResource + from ._models_py3 import JobStateAuditRecord + from ._models_py3 import JobStatistics + from ._models_py3 import JobStatisticsVertex + from ._models_py3 import JobStatisticsVertexStage + from ._models_py3 import ResourceUsageStatistics + from ._models_py3 import ScopeJobProperties + from ._models_py3 import ScopeJobResource + from ._models_py3 import UpdateJobParameters + from ._models_py3 import USqlJobProperties except (SyntaxError, ImportError): - from .job_inner_error import JobInnerError - from .job_error_details import JobErrorDetails - from .job_state_audit_record import JobStateAuditRecord - from .job_properties import JobProperties - from .job_information import JobInformation - from .job_relationship_properties import JobRelationshipProperties - from .job_information_basic import JobInformationBasic - from .job_resource import JobResource - from .job_statistics_vertex import JobStatisticsVertex - from .resource_usage_statistics import ResourceUsageStatistics - from .job_statistics_vertex_stage import JobStatisticsVertexStage - from .job_statistics import JobStatistics - from .job_data_path import JobDataPath - from .diagnostics import Diagnostics - from .usql_job_properties import USqlJobProperties - from .hive_job_properties import HiveJobProperties - from .scope_job_resource import ScopeJobResource - from .scope_job_properties import ScopeJobProperties - from .job_pipeline_run_information import JobPipelineRunInformation - from .job_pipeline_information import JobPipelineInformation - from .job_recurrence_information import JobRecurrenceInformation - from .create_job_properties import CreateJobProperties - from .base_job_parameters import BaseJobParameters - from .create_job_parameters import CreateJobParameters - from .create_scope_job_parameters import CreateScopeJobParameters - from .create_usql_job_properties import CreateUSqlJobProperties - from .create_scope_job_properties import CreateScopeJobProperties - from .build_job_parameters import BuildJobParameters - from .update_job_parameters import UpdateJobParameters -from .job_information_basic_paged import JobInformationBasicPaged -from .job_pipeline_information_paged import JobPipelineInformationPaged -from .job_recurrence_information_paged import JobRecurrenceInformationPaged -from .data_lake_analytics_job_management_client_enums import ( + from ._models import BaseJobParameters + from ._models import BuildJobParameters + from ._models import CreateJobParameters + from ._models import CreateJobProperties + from ._models import CreateScopeJobParameters + from ._models import CreateScopeJobProperties + from ._models import CreateUSqlJobProperties + from ._models import Diagnostics + from ._models import HiveJobProperties + from ._models import JobDataPath + from ._models import JobErrorDetails + from ._models import JobInformation + from ._models import JobInformationBasic + from ._models import JobInnerError + from ._models import JobPipelineInformation + from ._models import JobPipelineRunInformation + from ._models import JobProperties + from ._models import JobRecurrenceInformation + from ._models import JobRelationshipProperties + from ._models import JobResource + from ._models import JobStateAuditRecord + from ._models import JobStatistics + from ._models import JobStatisticsVertex + from ._models import JobStatisticsVertexStage + from ._models import ResourceUsageStatistics + from ._models import ScopeJobProperties + from ._models import ScopeJobResource + from ._models import UpdateJobParameters + from ._models import USqlJobProperties +from ._paged_models import JobInformationBasicPaged +from ._paged_models import JobPipelineInformationPaged +from ._paged_models import JobRecurrenceInformationPaged +from ._data_lake_analytics_job_management_client_enums import ( SeverityTypes, JobType, JobState, @@ -82,35 +82,35 @@ ) __all__ = [ - 'JobInnerError', + 'BaseJobParameters', + 'BuildJobParameters', + 'CreateJobParameters', + 'CreateJobProperties', + 'CreateScopeJobParameters', + 'CreateScopeJobProperties', + 'CreateUSqlJobProperties', + 'Diagnostics', + 'HiveJobProperties', + 'JobDataPath', 'JobErrorDetails', - 'JobStateAuditRecord', - 'JobProperties', 'JobInformation', - 'JobRelationshipProperties', 'JobInformationBasic', + 'JobInnerError', + 'JobPipelineInformation', + 'JobPipelineRunInformation', + 'JobProperties', + 'JobRecurrenceInformation', + 'JobRelationshipProperties', 'JobResource', + 'JobStateAuditRecord', + 'JobStatistics', 'JobStatisticsVertex', - 'ResourceUsageStatistics', 'JobStatisticsVertexStage', - 'JobStatistics', - 'JobDataPath', - 'Diagnostics', - 'USqlJobProperties', - 'HiveJobProperties', - 'ScopeJobResource', + 'ResourceUsageStatistics', 'ScopeJobProperties', - 'JobPipelineRunInformation', - 'JobPipelineInformation', - 'JobRecurrenceInformation', - 'CreateJobProperties', - 'BaseJobParameters', - 'CreateJobParameters', - 'CreateScopeJobParameters', - 'CreateUSqlJobProperties', - 'CreateScopeJobProperties', - 'BuildJobParameters', + 'ScopeJobResource', 'UpdateJobParameters', + 'USqlJobProperties', 'JobInformationBasicPaged', 'JobPipelineInformationPaged', 'JobRecurrenceInformationPaged', diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/data_lake_analytics_job_management_client_enums.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_data_lake_analytics_job_management_client_enums.py similarity index 96% rename from sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/data_lake_analytics_job_management_client_enums.py rename to sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_data_lake_analytics_job_management_client_enums.py index e0003269cb7e..e81ed60f32da 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/data_lake_analytics_job_management_client_enums.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_data_lake_analytics_job_management_client_enums.py @@ -41,6 +41,8 @@ class JobState(str, Enum): starting = "Starting" paused = "Paused" waiting_for_capacity = "WaitingForCapacity" + yielded = "Yielded" + finalizing = "Finalizing" class JobResult(str, Enum): diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_models.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_models.py new file mode 100644 index 000000000000..fdf314b067b4 --- /dev/null +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_models.py @@ -0,0 +1,1818 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BaseJobParameters(Model): + """Data Lake Analytics Job Parameters base class for build and submit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + } + + def __init__(self, **kwargs): + super(BaseJobParameters, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.properties = kwargs.get('properties', None) + + +class BuildJobParameters(BaseJobParameters): + """The parameters used to build a new Data Lake Analytics job. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + :param name: The friendly name of the job to build. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(BuildJobParameters, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + + +class CloudError(Model): + """CloudError. + """ + + _attribute_map = { + } + + +class CreateJobParameters(BaseJobParameters): + """The parameters used to submit a new Data Lake Analytics job. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + :param name: Required. The friendly name of the job to submit. + :type name: str + :param degree_of_parallelism: The degree of parallelism to use for this + job. At most one of degreeOfParallelism and degreeOfParallelismPercent + should be specified. If none, a default value of 1 will be used for + degreeOfParallelism. Default value: 1 . + :type degree_of_parallelism: int + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. At most one of degreeOfParallelism and + degreeOfParallelismPercent should be specified. If none, a default value + of 1 will be used for degreeOfParallelism. + :type degree_of_parallelism_percent: float + :param priority: The priority value to use for the current job. Lower + numbers have a higher priority. By default, a job has a priority of 1000. + This must be greater than 0. + :type priority: int + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + 'name': {'key': 'name', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + } + + def __init__(self, **kwargs): + super(CreateJobParameters, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.degree_of_parallelism = kwargs.get('degree_of_parallelism', 1) + self.degree_of_parallelism_percent = kwargs.get('degree_of_parallelism_percent', None) + self.priority = kwargs.get('priority', None) + self.log_file_patterns = kwargs.get('log_file_patterns', None) + self.related = kwargs.get('related', None) + + +class CreateJobProperties(Model): + """The common Data Lake Analytics job properties for job submission. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CreateUSqlJobProperties, CreateScopeJobProperties + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'USql': 'CreateUSqlJobProperties', 'Scope': 'CreateScopeJobProperties'} + } + + def __init__(self, **kwargs): + super(CreateJobProperties, self).__init__(**kwargs) + self.runtime_version = kwargs.get('runtime_version', None) + self.script = kwargs.get('script', None) + self.type = None + + +class CreateScopeJobParameters(CreateJobParameters): + """The parameters used to submit a new Data Lake Analytics Scope job. (Only + for use internally with Scope job type.). + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + :param name: Required. The friendly name of the job to submit. + :type name: str + :param degree_of_parallelism: The degree of parallelism to use for this + job. At most one of degreeOfParallelism and degreeOfParallelismPercent + should be specified. If none, a default value of 1 will be used for + degreeOfParallelism. Default value: 1 . + :type degree_of_parallelism: int + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. At most one of degreeOfParallelism and + degreeOfParallelismPercent should be specified. If none, a default value + of 1 will be used for degreeOfParallelism. + :type degree_of_parallelism_percent: float + :param priority: The priority value to use for the current job. Lower + numbers have a higher priority. By default, a job has a priority of 1000. + This must be greater than 0. + :type priority: int + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + :param tags: The key-value pairs used to add additional metadata to the + job information. + :type tags: dict[str, str] + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + 'name': {'key': 'name', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, **kwargs): + super(CreateScopeJobParameters, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + + +class CreateScopeJobProperties(CreateJobProperties): + """Scope job properties used when submitting Scope jobs. (Only for use + internally with Scope job type.). + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :param resources: The list of resources that are required by the job. + :type resources: + list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] + :param notifier: The list of email addresses, separated by semi-colons, to + notify when the job reaches a terminal state. + :type notifier: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, + 'notifier': {'key': 'notifier', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CreateScopeJobProperties, self).__init__(**kwargs) + self.resources = kwargs.get('resources', None) + self.notifier = kwargs.get('notifier', None) + self.type = 'Scope' + + +class CreateUSqlJobProperties(CreateJobProperties): + """U-SQL job properties used when submitting U-SQL jobs. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :param compile_mode: The specific compilation mode for the job used during + execution. If this is not specified during submission, the server will + determine the optimal compilation mode. Possible values include: + 'Semantic', 'Full', 'SingleBox' + :type compile_mode: str or + ~azure.mgmt.datalake.analytics.job.models.CompileMode + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, + } + + def __init__(self, **kwargs): + super(CreateUSqlJobProperties, self).__init__(**kwargs) + self.compile_mode = kwargs.get('compile_mode', None) + self.type = 'USql' + + +class Diagnostics(Model): + """Error diagnostic information for failed jobs. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar message: The error message. + :vartype message: str + :ivar severity: The severity of the error. Possible values include: + 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar line_number: The line number the error occurred on. + :vartype line_number: int + :ivar column_number: The column where the error occurred. + :vartype column_number: int + :ivar start: The starting index of the error. + :vartype start: int + :ivar end: The ending index of the error. + :vartype end: int + """ + + _validation = { + 'message': {'readonly': True}, + 'severity': {'readonly': True}, + 'line_number': {'readonly': True}, + 'column_number': {'readonly': True}, + 'start': {'readonly': True}, + 'end': {'readonly': True}, + } + + _attribute_map = { + 'message': {'key': 'message', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'line_number': {'key': 'lineNumber', 'type': 'int'}, + 'column_number': {'key': 'columnNumber', 'type': 'int'}, + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(Diagnostics, self).__init__(**kwargs) + self.message = None + self.severity = None + self.line_number = None + self.column_number = None + self.start = None + self.end = None + + +class JobProperties(Model): + """The common Data Lake Analytics job properties. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: USqlJobProperties, HiveJobProperties, ScopeJobProperties + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'USql': 'USqlJobProperties', 'Hive': 'HiveJobProperties', 'Scope': 'ScopeJobProperties'} + } + + def __init__(self, **kwargs): + super(JobProperties, self).__init__(**kwargs) + self.runtime_version = kwargs.get('runtime_version', None) + self.script = kwargs.get('script', None) + self.type = None + + +class HiveJobProperties(JobProperties): + """Hive job properties used when retrieving Hive jobs. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :ivar logs_location: The Hive logs location. + :vartype logs_location: str + :ivar output_location: The location of Hive job output files (both + execution output and results). + :vartype output_location: str + :ivar statement_count: The number of statements that will be run based on + the script. + :vartype statement_count: int + :ivar executed_statement_count: The number of statements that have been + run based on the script. + :vartype executed_statement_count: int + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + 'logs_location': {'readonly': True}, + 'output_location': {'readonly': True}, + 'statement_count': {'readonly': True}, + 'executed_statement_count': {'readonly': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'logs_location': {'key': 'logsLocation', 'type': 'str'}, + 'output_location': {'key': 'outputLocation', 'type': 'str'}, + 'statement_count': {'key': 'statementCount', 'type': 'int'}, + 'executed_statement_count': {'key': 'executedStatementCount', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(HiveJobProperties, self).__init__(**kwargs) + self.logs_location = None + self.output_location = None + self.statement_count = None + self.executed_statement_count = None + self.type = 'Hive' + + +class JobDataPath(Model): + """A Data Lake Analytics job data path item. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar job_id: The ID of the job this data is for. + :vartype job_id: str + :ivar command: The command that this job data relates to. + :vartype command: str + :ivar paths: The list of paths to all of the job data. + :vartype paths: list[str] + """ + + _validation = { + 'job_id': {'readonly': True}, + 'command': {'readonly': True}, + 'paths': {'readonly': True}, + } + + _attribute_map = { + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'command': {'key': 'command', 'type': 'str'}, + 'paths': {'key': 'paths', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(JobDataPath, self).__init__(**kwargs) + self.job_id = None + self.command = None + self.paths = None + + +class JobErrorDetails(Model): + """The Data Lake Analytics job error details. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar error_id: The specific identifier for the type of error encountered + in the job. + :vartype error_id: str + :ivar severity: The severity level of the failure. Possible values + include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', + 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar source: The ultimate source of the failure (usually either SYSTEM or + USER). + :vartype source: str + :ivar message: The user friendly error message for the failure. + :vartype message: str + :ivar description: The error message description. + :vartype description: str + :ivar details: The details of the error message. + :vartype details: str + :ivar line_number: The specific line number in the job where the error + occurred. + :vartype line_number: int + :ivar start_offset: The start offset in the job where the error was found + :vartype start_offset: int + :ivar end_offset: The end offset in the job where the error was found. + :vartype end_offset: int + :ivar resolution: The recommended resolution for the failure, if any. + :vartype resolution: str + :ivar file_path: The path to any supplemental error files, if any. + :vartype file_path: str + :ivar help_link: The link to MSDN or Azure help for this type of error, if + any. + :vartype help_link: str + :ivar internal_diagnostics: The internal diagnostic stack trace if the + user requesting the job error details has sufficient permissions it will + be retrieved, otherwise it will be empty. + :vartype internal_diagnostics: str + :ivar inner_error: The inner error of this specific job error message, if + any. + :vartype inner_error: + ~azure.mgmt.datalake.analytics.job.models.JobInnerError + """ + + _validation = { + 'error_id': {'readonly': True}, + 'severity': {'readonly': True}, + 'source': {'readonly': True}, + 'message': {'readonly': True}, + 'description': {'readonly': True}, + 'details': {'readonly': True}, + 'line_number': {'readonly': True}, + 'start_offset': {'readonly': True}, + 'end_offset': {'readonly': True}, + 'resolution': {'readonly': True}, + 'file_path': {'readonly': True}, + 'help_link': {'readonly': True}, + 'internal_diagnostics': {'readonly': True}, + 'inner_error': {'readonly': True}, + } + + _attribute_map = { + 'error_id': {'key': 'errorId', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'source': {'key': 'source', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'line_number': {'key': 'lineNumber', 'type': 'int'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, + 'end_offset': {'key': 'endOffset', 'type': 'int'}, + 'resolution': {'key': 'resolution', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'help_link': {'key': 'helpLink', 'type': 'str'}, + 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, + 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, + } + + def __init__(self, **kwargs): + super(JobErrorDetails, self).__init__(**kwargs) + self.error_id = None + self.severity = None + self.source = None + self.message = None + self.description = None + self.details = None + self.line_number = None + self.start_offset = None + self.end_offset = None + self.resolution = None + self.file_path = None + self.help_link = None + self.internal_diagnostics = None + self.inner_error = None + + +class JobInformationBasic(Model): + """The common Data Lake Analytics job information properties. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar job_id: The job's unique identifier (a GUID). + :vartype job_id: str + :param name: Required. The friendly name of the job. + :type name: str + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :ivar submitter: The user or account that submitted the job. + :vartype submitter: str + :param degree_of_parallelism: The degree of parallelism used for this job. + Default value: 1 . + :type degree_of_parallelism: int + :ivar degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :vartype degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers + have a higher priority. By default, a job has a priority of 1000. This + must be greater than 0. + :type priority: int + :ivar submit_time: The time the job was submitted to the service. + :vartype submit_time: datetime + :ivar start_time: The start time of the job. + :vartype start_time: datetime + :ivar end_time: The completion time of the job. + :vartype end_time: datetime + :ivar state: The job state. When the job is in the Ended state, refer to + Result and ErrorMessage for details. Possible values include: 'Accepted', + 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', + 'Starting', 'Paused', 'WaitingForCapacity', 'Yielded', 'Finalizing' + :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState + :ivar result: The result of job execution or the current result of the + running job. Possible values include: 'None', 'Succeeded', 'Cancelled', + 'Failed' + :vartype result: str or + ~azure.mgmt.datalake.analytics.job.models.JobResult + :ivar log_folder: The log folder path to use in the following format: + adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. + :vartype log_folder: str + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + :param tags: The key-value pairs used to add additional metadata to the + job information. (Only for use internally with Scope job type.) + :type tags: dict[str, str] + :ivar hierarchy_queue_node: the name of hierarchy queue node this job is + assigned to, Null if job has not been assigned yet or the account doesn't + have hierarchy queue. + :vartype hierarchy_queue_node: str + """ + + _validation = { + 'job_id': {'readonly': True}, + 'name': {'required': True}, + 'type': {'required': True}, + 'submitter': {'readonly': True}, + 'degree_of_parallelism_percent': {'readonly': True}, + 'submit_time': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'state': {'readonly': True}, + 'result': {'readonly': True}, + 'log_folder': {'readonly': True}, + 'hierarchy_queue_node': {'readonly': True}, + } + + _attribute_map = { + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'JobType'}, + 'submitter': {'key': 'submitter', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'result': {'key': 'result', 'type': 'JobResult'}, + 'log_folder': {'key': 'logFolder', 'type': 'str'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'hierarchy_queue_node': {'key': 'hierarchyQueueNode', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobInformationBasic, self).__init__(**kwargs) + self.job_id = None + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) + self.submitter = None + self.degree_of_parallelism = kwargs.get('degree_of_parallelism', 1) + self.degree_of_parallelism_percent = None + self.priority = kwargs.get('priority', None) + self.submit_time = None + self.start_time = None + self.end_time = None + self.state = None + self.result = None + self.log_folder = None + self.log_file_patterns = kwargs.get('log_file_patterns', None) + self.related = kwargs.get('related', None) + self.tags = kwargs.get('tags', None) + self.hierarchy_queue_node = None + + +class JobInformation(JobInformationBasic): + """The extended Data Lake Analytics job information properties returned when + retrieving a specific job. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar job_id: The job's unique identifier (a GUID). + :vartype job_id: str + :param name: Required. The friendly name of the job. + :type name: str + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :ivar submitter: The user or account that submitted the job. + :vartype submitter: str + :param degree_of_parallelism: The degree of parallelism used for this job. + Default value: 1 . + :type degree_of_parallelism: int + :ivar degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :vartype degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers + have a higher priority. By default, a job has a priority of 1000. This + must be greater than 0. + :type priority: int + :ivar submit_time: The time the job was submitted to the service. + :vartype submit_time: datetime + :ivar start_time: The start time of the job. + :vartype start_time: datetime + :ivar end_time: The completion time of the job. + :vartype end_time: datetime + :ivar state: The job state. When the job is in the Ended state, refer to + Result and ErrorMessage for details. Possible values include: 'Accepted', + 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', + 'Starting', 'Paused', 'WaitingForCapacity', 'Yielded', 'Finalizing' + :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState + :ivar result: The result of job execution or the current result of the + running job. Possible values include: 'None', 'Succeeded', 'Cancelled', + 'Failed' + :vartype result: str or + ~azure.mgmt.datalake.analytics.job.models.JobResult + :ivar log_folder: The log folder path to use in the following format: + adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. + :vartype log_folder: str + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + :param tags: The key-value pairs used to add additional metadata to the + job information. (Only for use internally with Scope job type.) + :type tags: dict[str, str] + :ivar hierarchy_queue_node: the name of hierarchy queue node this job is + assigned to, Null if job has not been assigned yet or the account doesn't + have hierarchy queue. + :vartype hierarchy_queue_node: str + :ivar error_message: The error message details for the job, if the job + failed. + :vartype error_message: + list[~azure.mgmt.datalake.analytics.job.models.JobErrorDetails] + :ivar state_audit_records: The job state audit records, indicating when + various operations have been performed on this job. + :vartype state_audit_records: + list[~azure.mgmt.datalake.analytics.job.models.JobStateAuditRecord] + :param properties: Required. The job specific properties. + :type properties: ~azure.mgmt.datalake.analytics.job.models.JobProperties + """ + + _validation = { + 'job_id': {'readonly': True}, + 'name': {'required': True}, + 'type': {'required': True}, + 'submitter': {'readonly': True}, + 'degree_of_parallelism_percent': {'readonly': True}, + 'submit_time': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'state': {'readonly': True}, + 'result': {'readonly': True}, + 'log_folder': {'readonly': True}, + 'hierarchy_queue_node': {'readonly': True}, + 'error_message': {'readonly': True}, + 'state_audit_records': {'readonly': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'JobType'}, + 'submitter': {'key': 'submitter', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'result': {'key': 'result', 'type': 'JobResult'}, + 'log_folder': {'key': 'logFolder', 'type': 'str'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'hierarchy_queue_node': {'key': 'hierarchyQueueNode', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': '[JobErrorDetails]'}, + 'state_audit_records': {'key': 'stateAuditRecords', 'type': '[JobStateAuditRecord]'}, + 'properties': {'key': 'properties', 'type': 'JobProperties'}, + } + + def __init__(self, **kwargs): + super(JobInformation, self).__init__(**kwargs) + self.error_message = None + self.state_audit_records = None + self.properties = kwargs.get('properties', None) + + +class JobInnerError(Model): + """The Data Lake Analytics job error details. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar error_id: The specific identifier for the type of error encountered + in the job. + :vartype error_id: str + :ivar severity: The severity level of the failure. Possible values + include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', + 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar source: The ultimate source of the failure (usually either SYSTEM or + USER). + :vartype source: str + :ivar message: The user friendly error message for the failure. + :vartype message: str + :ivar description: The error message description. + :vartype description: str + :ivar details: The details of the error message. + :vartype details: str + :ivar diagnostic_code: The diagnostic error code. + :vartype diagnostic_code: int + :ivar component: The component that failed. + :vartype component: str + :ivar resolution: The recommended resolution for the failure, if any. + :vartype resolution: str + :ivar help_link: The link to MSDN or Azure help for this type of error, if + any. + :vartype help_link: str + :ivar internal_diagnostics: The internal diagnostic stack trace if the + user requesting the job error details has sufficient permissions it will + be retrieved, otherwise it will be empty. + :vartype internal_diagnostics: str + :ivar inner_error: The inner error of this specific job error message, if + any. + :vartype inner_error: + ~azure.mgmt.datalake.analytics.job.models.JobInnerError + """ + + _validation = { + 'error_id': {'readonly': True}, + 'severity': {'readonly': True}, + 'source': {'readonly': True}, + 'message': {'readonly': True}, + 'description': {'readonly': True}, + 'details': {'readonly': True}, + 'diagnostic_code': {'readonly': True}, + 'component': {'readonly': True}, + 'resolution': {'readonly': True}, + 'help_link': {'readonly': True}, + 'internal_diagnostics': {'readonly': True}, + 'inner_error': {'readonly': True}, + } + + _attribute_map = { + 'error_id': {'key': 'errorId', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'source': {'key': 'source', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'diagnostic_code': {'key': 'diagnosticCode', 'type': 'int'}, + 'component': {'key': 'component', 'type': 'str'}, + 'resolution': {'key': 'resolution', 'type': 'str'}, + 'help_link': {'key': 'helpLink', 'type': 'str'}, + 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, + 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, + } + + def __init__(self, **kwargs): + super(JobInnerError, self).__init__(**kwargs) + self.error_id = None + self.severity = None + self.source = None + self.message = None + self.description = None + self.details = None + self.diagnostic_code = None + self.component = None + self.resolution = None + self.help_link = None + self.internal_diagnostics = None + self.inner_error = None + + +class JobPipelineInformation(Model): + """Job Pipeline Information, showing the relationship of jobs and recurrences + of those jobs in a pipeline. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar pipeline_id: The job relationship pipeline identifier (a GUID). + :vartype pipeline_id: str + :ivar pipeline_name: The friendly name of the job relationship pipeline, + which does not need to be unique. + :vartype pipeline_name: str + :ivar pipeline_uri: The pipeline uri, unique, links to the originating + service for this pipeline. + :vartype pipeline_uri: str + :ivar num_jobs_failed: The number of jobs in this pipeline that have + failed. + :vartype num_jobs_failed: int + :ivar num_jobs_canceled: The number of jobs in this pipeline that have + been canceled. + :vartype num_jobs_canceled: int + :ivar num_jobs_succeeded: The number of jobs in this pipeline that have + succeeded. + :vartype num_jobs_succeeded: int + :ivar au_hours_failed: The number of job execution hours that resulted in + failed jobs. + :vartype au_hours_failed: float + :ivar au_hours_canceled: The number of job execution hours that resulted + in canceled jobs. + :vartype au_hours_canceled: float + :ivar au_hours_succeeded: The number of job execution hours that resulted + in successful jobs. + :vartype au_hours_succeeded: float + :ivar last_submit_time: The last time a job in this pipeline was + submitted. + :vartype last_submit_time: datetime + :ivar runs: The list of recurrence identifiers representing each run of + this pipeline. + :vartype runs: + list[~azure.mgmt.datalake.analytics.job.models.JobPipelineRunInformation] + :ivar recurrences: The list of recurrence identifiers representing each + run of this pipeline. + :vartype recurrences: list[str] + """ + + _validation = { + 'pipeline_id': {'readonly': True}, + 'pipeline_name': {'readonly': True, 'max_length': 260}, + 'pipeline_uri': {'readonly': True}, + 'num_jobs_failed': {'readonly': True}, + 'num_jobs_canceled': {'readonly': True}, + 'num_jobs_succeeded': {'readonly': True}, + 'au_hours_failed': {'readonly': True}, + 'au_hours_canceled': {'readonly': True}, + 'au_hours_succeeded': {'readonly': True}, + 'last_submit_time': {'readonly': True}, + 'runs': {'readonly': True}, + 'recurrences': {'readonly': True}, + } + + _attribute_map = { + 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, + 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, + 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, + 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, + 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, + 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, + 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, + 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, + 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, + 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, + 'runs': {'key': 'runs', 'type': '[JobPipelineRunInformation]'}, + 'recurrences': {'key': 'recurrences', 'type': '[str]'}, + } + + def __init__(self, **kwargs): + super(JobPipelineInformation, self).__init__(**kwargs) + self.pipeline_id = None + self.pipeline_name = None + self.pipeline_uri = None + self.num_jobs_failed = None + self.num_jobs_canceled = None + self.num_jobs_succeeded = None + self.au_hours_failed = None + self.au_hours_canceled = None + self.au_hours_succeeded = None + self.last_submit_time = None + self.runs = None + self.recurrences = None + + +class JobPipelineRunInformation(Model): + """Run info for a specific job pipeline. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar run_id: The run identifier of an instance of pipeline executions (a + GUID). + :vartype run_id: str + :ivar last_submit_time: The time this instance was last submitted. + :vartype last_submit_time: datetime + """ + + _validation = { + 'run_id': {'readonly': True}, + 'last_submit_time': {'readonly': True}, + } + + _attribute_map = { + 'run_id': {'key': 'runId', 'type': 'str'}, + 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(JobPipelineRunInformation, self).__init__(**kwargs) + self.run_id = None + self.last_submit_time = None + + +class JobRecurrenceInformation(Model): + """Recurrence job information for a specific recurrence. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar recurrence_id: The recurrence identifier (a GUID), unique per + activity/script, regardless of iterations. This is something to link + different occurrences of the same job together. + :vartype recurrence_id: str + :ivar recurrence_name: The recurrence name, user friendly name for the + correlation between jobs. + :vartype recurrence_name: str + :ivar num_jobs_failed: The number of jobs in this recurrence that have + failed. + :vartype num_jobs_failed: int + :ivar num_jobs_canceled: The number of jobs in this recurrence that have + been canceled. + :vartype num_jobs_canceled: int + :ivar num_jobs_succeeded: The number of jobs in this recurrence that have + succeeded. + :vartype num_jobs_succeeded: int + :ivar au_hours_failed: The number of job execution hours that resulted in + failed jobs. + :vartype au_hours_failed: float + :ivar au_hours_canceled: The number of job execution hours that resulted + in canceled jobs. + :vartype au_hours_canceled: float + :ivar au_hours_succeeded: The number of job execution hours that resulted + in successful jobs. + :vartype au_hours_succeeded: float + :ivar last_submit_time: The last time a job in this recurrence was + submitted. + :vartype last_submit_time: datetime + """ + + _validation = { + 'recurrence_id': {'readonly': True}, + 'recurrence_name': {'readonly': True}, + 'num_jobs_failed': {'readonly': True}, + 'num_jobs_canceled': {'readonly': True}, + 'num_jobs_succeeded': {'readonly': True}, + 'au_hours_failed': {'readonly': True}, + 'au_hours_canceled': {'readonly': True}, + 'au_hours_succeeded': {'readonly': True}, + 'last_submit_time': {'readonly': True}, + } + + _attribute_map = { + 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, + 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, + 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, + 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, + 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, + 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, + 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, + 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, + 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(JobRecurrenceInformation, self).__init__(**kwargs) + self.recurrence_id = None + self.recurrence_name = None + self.num_jobs_failed = None + self.num_jobs_canceled = None + self.num_jobs_succeeded = None + self.au_hours_failed = None + self.au_hours_canceled = None + self.au_hours_succeeded = None + self.last_submit_time = None + + +class JobRelationshipProperties(Model): + """Job relationship information properties including pipeline information, + correlation information, etc. + + All required parameters must be populated in order to send to Azure. + + :param pipeline_id: The job relationship pipeline identifier (a GUID). + :type pipeline_id: str + :param pipeline_name: The friendly name of the job relationship pipeline, + which does not need to be unique. + :type pipeline_name: str + :param pipeline_uri: The pipeline uri, unique, links to the originating + service for this pipeline. + :type pipeline_uri: str + :param run_id: The run identifier (a GUID), unique identifier of the + iteration of this pipeline. + :type run_id: str + :param recurrence_id: Required. The recurrence identifier (a GUID), unique + per activity/script, regardless of iterations. This is something to link + different occurrences of the same job together. + :type recurrence_id: str + :param recurrence_name: The recurrence name, user friendly name for the + correlation between jobs. + :type recurrence_name: str + """ + + _validation = { + 'pipeline_name': {'max_length': 260}, + 'recurrence_id': {'required': True}, + 'recurrence_name': {'max_length': 260}, + } + + _attribute_map = { + 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, + 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, + 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, + 'run_id': {'key': 'runId', 'type': 'str'}, + 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, + 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobRelationshipProperties, self).__init__(**kwargs) + self.pipeline_id = kwargs.get('pipeline_id', None) + self.pipeline_name = kwargs.get('pipeline_name', None) + self.pipeline_uri = kwargs.get('pipeline_uri', None) + self.run_id = kwargs.get('run_id', None) + self.recurrence_id = kwargs.get('recurrence_id', None) + self.recurrence_name = kwargs.get('recurrence_name', None) + + +class JobResource(Model): + """The Data Lake Analytics job resources. + + :param name: The name of the resource. + :type name: str + :param resource_path: The path to the resource. + :type resource_path: str + :param type: The job resource type. Possible values include: + 'VertexResource', 'JobManagerResource', 'StatisticsResource', + 'VertexResourceInUserFolder', 'JobManagerResourceInUserFolder', + 'StatisticsResourceInUserFolder' + :type type: str or + ~azure.mgmt.datalake.analytics.job.models.JobResourceType + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'resource_path': {'key': 'resourcePath', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'JobResourceType'}, + } + + def __init__(self, **kwargs): + super(JobResource, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.resource_path = kwargs.get('resource_path', None) + self.type = kwargs.get('type', None) + + +class JobStateAuditRecord(Model): + """The Data Lake Analytics job state audit records for tracking the lifecycle + of a job. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar new_state: The new state the job is in. + :vartype new_state: str + :ivar time_stamp: The time stamp that the state change took place. + :vartype time_stamp: datetime + :ivar requested_by_user: The user who requests the change. + :vartype requested_by_user: str + :ivar details: The details of the audit log. + :vartype details: str + """ + + _validation = { + 'new_state': {'readonly': True}, + 'time_stamp': {'readonly': True}, + 'requested_by_user': {'readonly': True}, + 'details': {'readonly': True}, + } + + _attribute_map = { + 'new_state': {'key': 'newState', 'type': 'str'}, + 'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'}, + 'requested_by_user': {'key': 'requestedByUser', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobStateAuditRecord, self).__init__(**kwargs) + self.new_state = None + self.time_stamp = None + self.requested_by_user = None + self.details = None + + +class JobStatistics(Model): + """The Data Lake Analytics job execution statistics. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar last_update_time_utc: The last update time for the statistics. + :vartype last_update_time_utc: datetime + :ivar finalizing_time_utc: The job finalizing start time. + :vartype finalizing_time_utc: datetime + :ivar stages: The list of stages for the job. + :vartype stages: + list[~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertexStage] + """ + + _validation = { + 'last_update_time_utc': {'readonly': True}, + 'finalizing_time_utc': {'readonly': True}, + 'stages': {'readonly': True}, + } + + _attribute_map = { + 'last_update_time_utc': {'key': 'lastUpdateTimeUtc', 'type': 'iso-8601'}, + 'finalizing_time_utc': {'key': 'finalizingTimeUtc', 'type': 'iso-8601'}, + 'stages': {'key': 'stages', 'type': '[JobStatisticsVertexStage]'}, + } + + def __init__(self, **kwargs): + super(JobStatistics, self).__init__(**kwargs) + self.last_update_time_utc = None + self.finalizing_time_utc = None + self.stages = None + + +class JobStatisticsVertex(Model): + """The detailed information for a vertex. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar name: The name of the vertex. + :vartype name: str + :ivar vertex_id: The id of the vertex. + :vartype vertex_id: str + :ivar execution_time: The amount of execution time of the vertex. + :vartype execution_time: timedelta + :ivar data_read: The amount of data read of the vertex, in bytes. + :vartype data_read: long + :ivar peak_mem_usage: The amount of peak memory usage of the vertex, in + bytes. + :vartype peak_mem_usage: long + """ + + _validation = { + 'name': {'readonly': True}, + 'vertex_id': {'readonly': True}, + 'execution_time': {'readonly': True}, + 'data_read': {'readonly': True}, + 'peak_mem_usage': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'vertex_id': {'key': 'vertexId', 'type': 'str'}, + 'execution_time': {'key': 'executionTime', 'type': 'duration'}, + 'data_read': {'key': 'dataRead', 'type': 'long'}, + 'peak_mem_usage': {'key': 'peakMemUsage', 'type': 'long'}, + } + + def __init__(self, **kwargs): + super(JobStatisticsVertex, self).__init__(**kwargs) + self.name = None + self.vertex_id = None + self.execution_time = None + self.data_read = None + self.peak_mem_usage = None + + +class JobStatisticsVertexStage(Model): + """The Data Lake Analytics job statistics vertex stage information. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar data_read: The amount of data read, in bytes. + :vartype data_read: long + :ivar data_read_cross_pod: The amount of data read across multiple pods, + in bytes. + :vartype data_read_cross_pod: long + :ivar data_read_intra_pod: The amount of data read in one pod, in bytes. + :vartype data_read_intra_pod: long + :ivar data_to_read: The amount of data remaining to be read, in bytes. + :vartype data_to_read: long + :ivar data_written: The amount of data written, in bytes. + :vartype data_written: long + :ivar duplicate_discard_count: The number of duplicates that were + discarded. + :vartype duplicate_discard_count: int + :ivar failed_count: The number of failures that occurred in this stage. + :vartype failed_count: int + :ivar max_vertex_data_read: The maximum amount of data read in a single + vertex, in bytes. + :vartype max_vertex_data_read: long + :ivar min_vertex_data_read: The minimum amount of data read in a single + vertex, in bytes. + :vartype min_vertex_data_read: long + :ivar read_failure_count: The number of read failures in this stage. + :vartype read_failure_count: int + :ivar revocation_count: The number of vertices that were revoked during + this stage. + :vartype revocation_count: int + :ivar running_count: The number of currently running vertices in this + stage. + :vartype running_count: int + :ivar scheduled_count: The number of currently scheduled vertices in this + stage. + :vartype scheduled_count: int + :ivar stage_name: The name of this stage in job execution. + :vartype stage_name: str + :ivar succeeded_count: The number of vertices that succeeded in this + stage. + :vartype succeeded_count: int + :ivar temp_data_written: The amount of temporary data written, in bytes. + :vartype temp_data_written: long + :ivar total_count: The total vertex count for this stage. + :vartype total_count: int + :ivar total_failed_time: The amount of time that failed vertices took up + in this stage. + :vartype total_failed_time: timedelta + :ivar total_progress: The current progress of this stage, as a percentage. + :vartype total_progress: int + :ivar total_succeeded_time: The amount of time all successful vertices + took in this stage. + :vartype total_succeeded_time: timedelta + :ivar total_peak_mem_usage: The sum of the peak memory usage of all the + vertices in the stage, in bytes. + :vartype total_peak_mem_usage: long + :ivar total_execution_time: The sum of the total execution time of all the + vertices in the stage. + :vartype total_execution_time: timedelta + :param max_data_read_vertex: the vertex with the maximum amount of data + read. + :type max_data_read_vertex: + ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex + :param max_execution_time_vertex: the vertex with the maximum execution + time. + :type max_execution_time_vertex: + ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex + :param max_peak_mem_usage_vertex: the vertex with the maximum peak memory + usage. + :type max_peak_mem_usage_vertex: + ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex + :ivar estimated_vertex_cpu_core_count: The estimated vertex CPU core + count. + :vartype estimated_vertex_cpu_core_count: int + :ivar estimated_vertex_peak_cpu_core_count: The estimated vertex peak CPU + core count. + :vartype estimated_vertex_peak_cpu_core_count: int + :ivar estimated_vertex_mem_size: The estimated vertex memory size, in + bytes. + :vartype estimated_vertex_mem_size: long + :param allocated_container_cpu_core_count: The statistics information for + the allocated container CPU core count. + :type allocated_container_cpu_core_count: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + :param allocated_container_mem_size: The statistics information for the + allocated container memory size. + :type allocated_container_mem_size: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + :param used_vertex_cpu_core_count: The statistics information for the used + vertex CPU core count. + :type used_vertex_cpu_core_count: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + :param used_vertex_peak_mem_size: The statistics information for the used + vertex peak memory size. + :type used_vertex_peak_mem_size: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + """ + + _validation = { + 'data_read': {'readonly': True}, + 'data_read_cross_pod': {'readonly': True}, + 'data_read_intra_pod': {'readonly': True}, + 'data_to_read': {'readonly': True}, + 'data_written': {'readonly': True}, + 'duplicate_discard_count': {'readonly': True}, + 'failed_count': {'readonly': True}, + 'max_vertex_data_read': {'readonly': True}, + 'min_vertex_data_read': {'readonly': True}, + 'read_failure_count': {'readonly': True}, + 'revocation_count': {'readonly': True}, + 'running_count': {'readonly': True}, + 'scheduled_count': {'readonly': True}, + 'stage_name': {'readonly': True}, + 'succeeded_count': {'readonly': True}, + 'temp_data_written': {'readonly': True}, + 'total_count': {'readonly': True}, + 'total_failed_time': {'readonly': True}, + 'total_progress': {'readonly': True}, + 'total_succeeded_time': {'readonly': True}, + 'total_peak_mem_usage': {'readonly': True}, + 'total_execution_time': {'readonly': True}, + 'estimated_vertex_cpu_core_count': {'readonly': True}, + 'estimated_vertex_peak_cpu_core_count': {'readonly': True}, + 'estimated_vertex_mem_size': {'readonly': True}, + } + + _attribute_map = { + 'data_read': {'key': 'dataRead', 'type': 'long'}, + 'data_read_cross_pod': {'key': 'dataReadCrossPod', 'type': 'long'}, + 'data_read_intra_pod': {'key': 'dataReadIntraPod', 'type': 'long'}, + 'data_to_read': {'key': 'dataToRead', 'type': 'long'}, + 'data_written': {'key': 'dataWritten', 'type': 'long'}, + 'duplicate_discard_count': {'key': 'duplicateDiscardCount', 'type': 'int'}, + 'failed_count': {'key': 'failedCount', 'type': 'int'}, + 'max_vertex_data_read': {'key': 'maxVertexDataRead', 'type': 'long'}, + 'min_vertex_data_read': {'key': 'minVertexDataRead', 'type': 'long'}, + 'read_failure_count': {'key': 'readFailureCount', 'type': 'int'}, + 'revocation_count': {'key': 'revocationCount', 'type': 'int'}, + 'running_count': {'key': 'runningCount', 'type': 'int'}, + 'scheduled_count': {'key': 'scheduledCount', 'type': 'int'}, + 'stage_name': {'key': 'stageName', 'type': 'str'}, + 'succeeded_count': {'key': 'succeededCount', 'type': 'int'}, + 'temp_data_written': {'key': 'tempDataWritten', 'type': 'long'}, + 'total_count': {'key': 'totalCount', 'type': 'int'}, + 'total_failed_time': {'key': 'totalFailedTime', 'type': 'duration'}, + 'total_progress': {'key': 'totalProgress', 'type': 'int'}, + 'total_succeeded_time': {'key': 'totalSucceededTime', 'type': 'duration'}, + 'total_peak_mem_usage': {'key': 'totalPeakMemUsage', 'type': 'long'}, + 'total_execution_time': {'key': 'totalExecutionTime', 'type': 'duration'}, + 'max_data_read_vertex': {'key': 'maxDataReadVertex', 'type': 'JobStatisticsVertex'}, + 'max_execution_time_vertex': {'key': 'maxExecutionTimeVertex', 'type': 'JobStatisticsVertex'}, + 'max_peak_mem_usage_vertex': {'key': 'maxPeakMemUsageVertex', 'type': 'JobStatisticsVertex'}, + 'estimated_vertex_cpu_core_count': {'key': 'estimatedVertexCpuCoreCount', 'type': 'int'}, + 'estimated_vertex_peak_cpu_core_count': {'key': 'estimatedVertexPeakCpuCoreCount', 'type': 'int'}, + 'estimated_vertex_mem_size': {'key': 'estimatedVertexMemSize', 'type': 'long'}, + 'allocated_container_cpu_core_count': {'key': 'allocatedContainerCpuCoreCount', 'type': 'ResourceUsageStatistics'}, + 'allocated_container_mem_size': {'key': 'allocatedContainerMemSize', 'type': 'ResourceUsageStatistics'}, + 'used_vertex_cpu_core_count': {'key': 'usedVertexCpuCoreCount', 'type': 'ResourceUsageStatistics'}, + 'used_vertex_peak_mem_size': {'key': 'usedVertexPeakMemSize', 'type': 'ResourceUsageStatistics'}, + } + + def __init__(self, **kwargs): + super(JobStatisticsVertexStage, self).__init__(**kwargs) + self.data_read = None + self.data_read_cross_pod = None + self.data_read_intra_pod = None + self.data_to_read = None + self.data_written = None + self.duplicate_discard_count = None + self.failed_count = None + self.max_vertex_data_read = None + self.min_vertex_data_read = None + self.read_failure_count = None + self.revocation_count = None + self.running_count = None + self.scheduled_count = None + self.stage_name = None + self.succeeded_count = None + self.temp_data_written = None + self.total_count = None + self.total_failed_time = None + self.total_progress = None + self.total_succeeded_time = None + self.total_peak_mem_usage = None + self.total_execution_time = None + self.max_data_read_vertex = kwargs.get('max_data_read_vertex', None) + self.max_execution_time_vertex = kwargs.get('max_execution_time_vertex', None) + self.max_peak_mem_usage_vertex = kwargs.get('max_peak_mem_usage_vertex', None) + self.estimated_vertex_cpu_core_count = None + self.estimated_vertex_peak_cpu_core_count = None + self.estimated_vertex_mem_size = None + self.allocated_container_cpu_core_count = kwargs.get('allocated_container_cpu_core_count', None) + self.allocated_container_mem_size = kwargs.get('allocated_container_mem_size', None) + self.used_vertex_cpu_core_count = kwargs.get('used_vertex_cpu_core_count', None) + self.used_vertex_peak_mem_size = kwargs.get('used_vertex_peak_mem_size', None) + + +class ResourceUsageStatistics(Model): + """The statistics information for resource usage. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar average: The average value. + :vartype average: float + :ivar minimum: The minimum value. + :vartype minimum: long + :ivar maximum: The maximum value. + :vartype maximum: long + """ + + _validation = { + 'average': {'readonly': True}, + 'minimum': {'readonly': True}, + 'maximum': {'readonly': True}, + } + + _attribute_map = { + 'average': {'key': 'average', 'type': 'float'}, + 'minimum': {'key': 'minimum', 'type': 'long'}, + 'maximum': {'key': 'maximum', 'type': 'long'}, + } + + def __init__(self, **kwargs): + super(ResourceUsageStatistics, self).__init__(**kwargs) + self.average = None + self.minimum = None + self.maximum = None + + +class ScopeJobProperties(JobProperties): + """Scope job properties used when submitting and retrieving Scope jobs. (Only + for use internally with Scope job type.). + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :ivar resources: The list of resources that are required by the job. + :vartype resources: + list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] + :ivar user_algebra_path: The algebra file path after the job has + completed. + :vartype user_algebra_path: str + :param notifier: The list of email addresses, separated by semi-colons, to + notify when the job reaches a terminal state. + :type notifier: str + :ivar total_compilation_time: The total time this job spent compiling. + This value should not be set by the user and will be ignored if it is. + :vartype total_compilation_time: timedelta + :ivar total_queued_time: The total time this job spent queued. This value + should not be set by the user and will be ignored if it is. + :vartype total_queued_time: timedelta + :ivar total_running_time: The total time this job spent executing. This + value should not be set by the user and will be ignored if it is. + :vartype total_running_time: timedelta + :ivar total_paused_time: The total time this job spent paused. This value + should not be set by the user and will be ignored if it is. + :vartype total_paused_time: timedelta + :ivar root_process_node_id: The ID used to identify the job manager + coordinating job execution. This value should not be set by the user and + will be ignored if it is. + :vartype root_process_node_id: str + :ivar yarn_application_id: The ID used to identify the yarn application + executing the job. This value should not be set by the user and will be + ignored if it is. + :vartype yarn_application_id: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + 'resources': {'readonly': True}, + 'user_algebra_path': {'readonly': True}, + 'total_compilation_time': {'readonly': True}, + 'total_queued_time': {'readonly': True}, + 'total_running_time': {'readonly': True}, + 'total_paused_time': {'readonly': True}, + 'root_process_node_id': {'readonly': True}, + 'yarn_application_id': {'readonly': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, + 'user_algebra_path': {'key': 'userAlgebraPath', 'type': 'str'}, + 'notifier': {'key': 'notifier', 'type': 'str'}, + 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, + 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, + 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, + 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, + 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, + 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ScopeJobProperties, self).__init__(**kwargs) + self.resources = None + self.user_algebra_path = None + self.notifier = kwargs.get('notifier', None) + self.total_compilation_time = None + self.total_queued_time = None + self.total_running_time = None + self.total_paused_time = None + self.root_process_node_id = None + self.yarn_application_id = None + self.type = 'Scope' + + +class ScopeJobResource(Model): + """The Scope job resources. (Only for use internally with Scope job type.). + + :param name: The name of the resource. + :type name: str + :param path: The path to the resource. + :type path: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'path': {'key': 'path', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ScopeJobResource, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.path = kwargs.get('path', None) + + +class UpdateJobParameters(Model): + """The parameters that can be used to update existing Data Lake Analytics job + information properties. (Only for use internally with Scope job type.). + + :param degree_of_parallelism: The degree of parallelism used for this job. + :type degree_of_parallelism: int + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :type degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers + have a higher priority. By default, a job has a priority of 1000. This + must be greater than 0. + :type priority: int + :param tags: The key-value pairs used to add additional metadata to the + job information. + :type tags: dict[str, str] + """ + + _attribute_map = { + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, **kwargs): + super(UpdateJobParameters, self).__init__(**kwargs) + self.degree_of_parallelism = kwargs.get('degree_of_parallelism', None) + self.degree_of_parallelism_percent = kwargs.get('degree_of_parallelism_percent', None) + self.priority = kwargs.get('priority', None) + self.tags = kwargs.get('tags', None) + + +class USqlJobProperties(JobProperties): + """U-SQL job properties used when retrieving U-SQL jobs. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :ivar resources: The list of resources that are required by the job. + :vartype resources: + list[~azure.mgmt.datalake.analytics.job.models.JobResource] + :param statistics: The job specific statistics. + :type statistics: ~azure.mgmt.datalake.analytics.job.models.JobStatistics + :param debug_data: The job specific debug data locations. + :type debug_data: ~azure.mgmt.datalake.analytics.job.models.JobDataPath + :ivar diagnostics: The diagnostics for the job. + :vartype diagnostics: + list[~azure.mgmt.datalake.analytics.job.models.Diagnostics] + :ivar algebra_file_path: The algebra file path after the job has + completed. + :vartype algebra_file_path: str + :ivar total_compilation_time: The total time this job spent compiling. + This value should not be set by the user and will be ignored if it is. + :vartype total_compilation_time: timedelta + :ivar total_queued_time: The total time this job spent queued. This value + should not be set by the user and will be ignored if it is. + :vartype total_queued_time: timedelta + :ivar total_running_time: The total time this job spent executing. This + value should not be set by the user and will be ignored if it is. + :vartype total_running_time: timedelta + :ivar total_paused_time: The total time this job spent paused. This value + should not be set by the user and will be ignored if it is. + :vartype total_paused_time: timedelta + :ivar root_process_node_id: The ID used to identify the job manager + coordinating job execution. This value should not be set by the user and + will be ignored if it is. + :vartype root_process_node_id: str + :ivar yarn_application_id: The ID used to identify the yarn application + executing the job. This value should not be set by the user and will be + ignored if it is. + :vartype yarn_application_id: str + :ivar yarn_application_time_stamp: The timestamp (in ticks) for the yarn + application executing the job. This value should not be set by the user + and will be ignored if it is. + :vartype yarn_application_time_stamp: long + :ivar compile_mode: The specific compilation mode for the job used during + execution. If this is not specified during submission, the server will + determine the optimal compilation mode. Possible values include: + 'Semantic', 'Full', 'SingleBox' + :vartype compile_mode: str or + ~azure.mgmt.datalake.analytics.job.models.CompileMode + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + 'resources': {'readonly': True}, + 'diagnostics': {'readonly': True}, + 'algebra_file_path': {'readonly': True}, + 'total_compilation_time': {'readonly': True}, + 'total_queued_time': {'readonly': True}, + 'total_running_time': {'readonly': True}, + 'total_paused_time': {'readonly': True}, + 'root_process_node_id': {'readonly': True}, + 'yarn_application_id': {'readonly': True}, + 'yarn_application_time_stamp': {'readonly': True}, + 'compile_mode': {'readonly': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'resources': {'key': 'resources', 'type': '[JobResource]'}, + 'statistics': {'key': 'statistics', 'type': 'JobStatistics'}, + 'debug_data': {'key': 'debugData', 'type': 'JobDataPath'}, + 'diagnostics': {'key': 'diagnostics', 'type': '[Diagnostics]'}, + 'algebra_file_path': {'key': 'algebraFilePath', 'type': 'str'}, + 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, + 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, + 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, + 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, + 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, + 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, + 'yarn_application_time_stamp': {'key': 'yarnApplicationTimeStamp', 'type': 'long'}, + 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, + } + + def __init__(self, **kwargs): + super(USqlJobProperties, self).__init__(**kwargs) + self.resources = None + self.statistics = kwargs.get('statistics', None) + self.debug_data = kwargs.get('debug_data', None) + self.diagnostics = None + self.algebra_file_path = None + self.total_compilation_time = None + self.total_queued_time = None + self.total_running_time = None + self.total_paused_time = None + self.root_process_node_id = None + self.yarn_application_id = None + self.yarn_application_time_stamp = None + self.compile_mode = None + self.type = 'USql' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_models_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_models_py3.py new file mode 100644 index 000000000000..61cd1854efac --- /dev/null +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_models_py3.py @@ -0,0 +1,1818 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BaseJobParameters(Model): + """Data Lake Analytics Job Parameters base class for build and submit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + } + + def __init__(self, *, type, properties, **kwargs) -> None: + super(BaseJobParameters, self).__init__(**kwargs) + self.type = type + self.properties = properties + + +class BuildJobParameters(BaseJobParameters): + """The parameters used to build a new Data Lake Analytics job. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + :param name: The friendly name of the job to build. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + def __init__(self, *, type, properties, name: str=None, **kwargs) -> None: + super(BuildJobParameters, self).__init__(type=type, properties=properties, **kwargs) + self.name = name + + +class CloudError(Model): + """CloudError. + """ + + _attribute_map = { + } + + +class CreateJobParameters(BaseJobParameters): + """The parameters used to submit a new Data Lake Analytics job. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + :param name: Required. The friendly name of the job to submit. + :type name: str + :param degree_of_parallelism: The degree of parallelism to use for this + job. At most one of degreeOfParallelism and degreeOfParallelismPercent + should be specified. If none, a default value of 1 will be used for + degreeOfParallelism. Default value: 1 . + :type degree_of_parallelism: int + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. At most one of degreeOfParallelism and + degreeOfParallelismPercent should be specified. If none, a default value + of 1 will be used for degreeOfParallelism. + :type degree_of_parallelism_percent: float + :param priority: The priority value to use for the current job. Lower + numbers have a higher priority. By default, a job has a priority of 1000. + This must be greater than 0. + :type priority: int + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + 'name': {'key': 'name', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + } + + def __init__(self, *, type, properties, name: str, degree_of_parallelism: int=1, degree_of_parallelism_percent: float=None, priority: int=None, log_file_patterns=None, related=None, **kwargs) -> None: + super(CreateJobParameters, self).__init__(type=type, properties=properties, **kwargs) + self.name = name + self.degree_of_parallelism = degree_of_parallelism + self.degree_of_parallelism_percent = degree_of_parallelism_percent + self.priority = priority + self.log_file_patterns = log_file_patterns + self.related = related + + +class CreateJobProperties(Model): + """The common Data Lake Analytics job properties for job submission. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CreateUSqlJobProperties, CreateScopeJobProperties + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'USql': 'CreateUSqlJobProperties', 'Scope': 'CreateScopeJobProperties'} + } + + def __init__(self, *, script: str, runtime_version: str=None, **kwargs) -> None: + super(CreateJobProperties, self).__init__(**kwargs) + self.runtime_version = runtime_version + self.script = script + self.type = None + + +class CreateScopeJobParameters(CreateJobParameters): + """The parameters used to submit a new Data Lake Analytics Scope job. (Only + for use internally with Scope job type.). + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :param properties: Required. The job specific properties. + :type properties: + ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties + :param name: Required. The friendly name of the job to submit. + :type name: str + :param degree_of_parallelism: The degree of parallelism to use for this + job. At most one of degreeOfParallelism and degreeOfParallelismPercent + should be specified. If none, a default value of 1 will be used for + degreeOfParallelism. Default value: 1 . + :type degree_of_parallelism: int + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. At most one of degreeOfParallelism and + degreeOfParallelismPercent should be specified. If none, a default value + of 1 will be used for degreeOfParallelism. + :type degree_of_parallelism_percent: float + :param priority: The priority value to use for the current job. Lower + numbers have a higher priority. By default, a job has a priority of 1000. + This must be greater than 0. + :type priority: int + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + :param tags: The key-value pairs used to add additional metadata to the + job information. + :type tags: dict[str, str] + """ + + _validation = { + 'type': {'required': True}, + 'properties': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'JobType'}, + 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, + 'name': {'key': 'name', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, *, type, properties, name: str, degree_of_parallelism: int=1, degree_of_parallelism_percent: float=None, priority: int=None, log_file_patterns=None, related=None, tags=None, **kwargs) -> None: + super(CreateScopeJobParameters, self).__init__(type=type, properties=properties, name=name, degree_of_parallelism=degree_of_parallelism, degree_of_parallelism_percent=degree_of_parallelism_percent, priority=priority, log_file_patterns=log_file_patterns, related=related, **kwargs) + self.tags = tags + + +class CreateScopeJobProperties(CreateJobProperties): + """Scope job properties used when submitting Scope jobs. (Only for use + internally with Scope job type.). + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :param resources: The list of resources that are required by the job. + :type resources: + list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] + :param notifier: The list of email addresses, separated by semi-colons, to + notify when the job reaches a terminal state. + :type notifier: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, + 'notifier': {'key': 'notifier', 'type': 'str'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, resources=None, notifier: str=None, **kwargs) -> None: + super(CreateScopeJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.resources = resources + self.notifier = notifier + self.type = 'Scope' + + +class CreateUSqlJobProperties(CreateJobProperties): + """U-SQL job properties used when submitting U-SQL jobs. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :param compile_mode: The specific compilation mode for the job used during + execution. If this is not specified during submission, the server will + determine the optimal compilation mode. Possible values include: + 'Semantic', 'Full', 'SingleBox' + :type compile_mode: str or + ~azure.mgmt.datalake.analytics.job.models.CompileMode + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, compile_mode=None, **kwargs) -> None: + super(CreateUSqlJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.compile_mode = compile_mode + self.type = 'USql' + + +class Diagnostics(Model): + """Error diagnostic information for failed jobs. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar message: The error message. + :vartype message: str + :ivar severity: The severity of the error. Possible values include: + 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar line_number: The line number the error occurred on. + :vartype line_number: int + :ivar column_number: The column where the error occurred. + :vartype column_number: int + :ivar start: The starting index of the error. + :vartype start: int + :ivar end: The ending index of the error. + :vartype end: int + """ + + _validation = { + 'message': {'readonly': True}, + 'severity': {'readonly': True}, + 'line_number': {'readonly': True}, + 'column_number': {'readonly': True}, + 'start': {'readonly': True}, + 'end': {'readonly': True}, + } + + _attribute_map = { + 'message': {'key': 'message', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'line_number': {'key': 'lineNumber', 'type': 'int'}, + 'column_number': {'key': 'columnNumber', 'type': 'int'}, + 'start': {'key': 'start', 'type': 'int'}, + 'end': {'key': 'end', 'type': 'int'}, + } + + def __init__(self, **kwargs) -> None: + super(Diagnostics, self).__init__(**kwargs) + self.message = None + self.severity = None + self.line_number = None + self.column_number = None + self.start = None + self.end = None + + +class JobProperties(Model): + """The common Data Lake Analytics job properties. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: USqlJobProperties, HiveJobProperties, ScopeJobProperties + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'USql': 'USqlJobProperties', 'Hive': 'HiveJobProperties', 'Scope': 'ScopeJobProperties'} + } + + def __init__(self, *, script: str, runtime_version: str=None, **kwargs) -> None: + super(JobProperties, self).__init__(**kwargs) + self.runtime_version = runtime_version + self.script = script + self.type = None + + +class HiveJobProperties(JobProperties): + """Hive job properties used when retrieving Hive jobs. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :ivar logs_location: The Hive logs location. + :vartype logs_location: str + :ivar output_location: The location of Hive job output files (both + execution output and results). + :vartype output_location: str + :ivar statement_count: The number of statements that will be run based on + the script. + :vartype statement_count: int + :ivar executed_statement_count: The number of statements that have been + run based on the script. + :vartype executed_statement_count: int + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + 'logs_location': {'readonly': True}, + 'output_location': {'readonly': True}, + 'statement_count': {'readonly': True}, + 'executed_statement_count': {'readonly': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'logs_location': {'key': 'logsLocation', 'type': 'str'}, + 'output_location': {'key': 'outputLocation', 'type': 'str'}, + 'statement_count': {'key': 'statementCount', 'type': 'int'}, + 'executed_statement_count': {'key': 'executedStatementCount', 'type': 'int'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, **kwargs) -> None: + super(HiveJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.logs_location = None + self.output_location = None + self.statement_count = None + self.executed_statement_count = None + self.type = 'Hive' + + +class JobDataPath(Model): + """A Data Lake Analytics job data path item. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar job_id: The ID of the job this data is for. + :vartype job_id: str + :ivar command: The command that this job data relates to. + :vartype command: str + :ivar paths: The list of paths to all of the job data. + :vartype paths: list[str] + """ + + _validation = { + 'job_id': {'readonly': True}, + 'command': {'readonly': True}, + 'paths': {'readonly': True}, + } + + _attribute_map = { + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'command': {'key': 'command', 'type': 'str'}, + 'paths': {'key': 'paths', 'type': '[str]'}, + } + + def __init__(self, **kwargs) -> None: + super(JobDataPath, self).__init__(**kwargs) + self.job_id = None + self.command = None + self.paths = None + + +class JobErrorDetails(Model): + """The Data Lake Analytics job error details. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar error_id: The specific identifier for the type of error encountered + in the job. + :vartype error_id: str + :ivar severity: The severity level of the failure. Possible values + include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', + 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar source: The ultimate source of the failure (usually either SYSTEM or + USER). + :vartype source: str + :ivar message: The user friendly error message for the failure. + :vartype message: str + :ivar description: The error message description. + :vartype description: str + :ivar details: The details of the error message. + :vartype details: str + :ivar line_number: The specific line number in the job where the error + occurred. + :vartype line_number: int + :ivar start_offset: The start offset in the job where the error was found + :vartype start_offset: int + :ivar end_offset: The end offset in the job where the error was found. + :vartype end_offset: int + :ivar resolution: The recommended resolution for the failure, if any. + :vartype resolution: str + :ivar file_path: The path to any supplemental error files, if any. + :vartype file_path: str + :ivar help_link: The link to MSDN or Azure help for this type of error, if + any. + :vartype help_link: str + :ivar internal_diagnostics: The internal diagnostic stack trace if the + user requesting the job error details has sufficient permissions it will + be retrieved, otherwise it will be empty. + :vartype internal_diagnostics: str + :ivar inner_error: The inner error of this specific job error message, if + any. + :vartype inner_error: + ~azure.mgmt.datalake.analytics.job.models.JobInnerError + """ + + _validation = { + 'error_id': {'readonly': True}, + 'severity': {'readonly': True}, + 'source': {'readonly': True}, + 'message': {'readonly': True}, + 'description': {'readonly': True}, + 'details': {'readonly': True}, + 'line_number': {'readonly': True}, + 'start_offset': {'readonly': True}, + 'end_offset': {'readonly': True}, + 'resolution': {'readonly': True}, + 'file_path': {'readonly': True}, + 'help_link': {'readonly': True}, + 'internal_diagnostics': {'readonly': True}, + 'inner_error': {'readonly': True}, + } + + _attribute_map = { + 'error_id': {'key': 'errorId', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'source': {'key': 'source', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'line_number': {'key': 'lineNumber', 'type': 'int'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, + 'end_offset': {'key': 'endOffset', 'type': 'int'}, + 'resolution': {'key': 'resolution', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'help_link': {'key': 'helpLink', 'type': 'str'}, + 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, + 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, + } + + def __init__(self, **kwargs) -> None: + super(JobErrorDetails, self).__init__(**kwargs) + self.error_id = None + self.severity = None + self.source = None + self.message = None + self.description = None + self.details = None + self.line_number = None + self.start_offset = None + self.end_offset = None + self.resolution = None + self.file_path = None + self.help_link = None + self.internal_diagnostics = None + self.inner_error = None + + +class JobInformationBasic(Model): + """The common Data Lake Analytics job information properties. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar job_id: The job's unique identifier (a GUID). + :vartype job_id: str + :param name: Required. The friendly name of the job. + :type name: str + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :ivar submitter: The user or account that submitted the job. + :vartype submitter: str + :param degree_of_parallelism: The degree of parallelism used for this job. + Default value: 1 . + :type degree_of_parallelism: int + :ivar degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :vartype degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers + have a higher priority. By default, a job has a priority of 1000. This + must be greater than 0. + :type priority: int + :ivar submit_time: The time the job was submitted to the service. + :vartype submit_time: datetime + :ivar start_time: The start time of the job. + :vartype start_time: datetime + :ivar end_time: The completion time of the job. + :vartype end_time: datetime + :ivar state: The job state. When the job is in the Ended state, refer to + Result and ErrorMessage for details. Possible values include: 'Accepted', + 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', + 'Starting', 'Paused', 'WaitingForCapacity', 'Yielded', 'Finalizing' + :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState + :ivar result: The result of job execution or the current result of the + running job. Possible values include: 'None', 'Succeeded', 'Cancelled', + 'Failed' + :vartype result: str or + ~azure.mgmt.datalake.analytics.job.models.JobResult + :ivar log_folder: The log folder path to use in the following format: + adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. + :vartype log_folder: str + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + :param tags: The key-value pairs used to add additional metadata to the + job information. (Only for use internally with Scope job type.) + :type tags: dict[str, str] + :ivar hierarchy_queue_node: the name of hierarchy queue node this job is + assigned to, Null if job has not been assigned yet or the account doesn't + have hierarchy queue. + :vartype hierarchy_queue_node: str + """ + + _validation = { + 'job_id': {'readonly': True}, + 'name': {'required': True}, + 'type': {'required': True}, + 'submitter': {'readonly': True}, + 'degree_of_parallelism_percent': {'readonly': True}, + 'submit_time': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'state': {'readonly': True}, + 'result': {'readonly': True}, + 'log_folder': {'readonly': True}, + 'hierarchy_queue_node': {'readonly': True}, + } + + _attribute_map = { + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'JobType'}, + 'submitter': {'key': 'submitter', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'result': {'key': 'result', 'type': 'JobResult'}, + 'log_folder': {'key': 'logFolder', 'type': 'str'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'hierarchy_queue_node': {'key': 'hierarchyQueueNode', 'type': 'str'}, + } + + def __init__(self, *, name: str, type, degree_of_parallelism: int=1, priority: int=None, log_file_patterns=None, related=None, tags=None, **kwargs) -> None: + super(JobInformationBasic, self).__init__(**kwargs) + self.job_id = None + self.name = name + self.type = type + self.submitter = None + self.degree_of_parallelism = degree_of_parallelism + self.degree_of_parallelism_percent = None + self.priority = priority + self.submit_time = None + self.start_time = None + self.end_time = None + self.state = None + self.result = None + self.log_folder = None + self.log_file_patterns = log_file_patterns + self.related = related + self.tags = tags + self.hierarchy_queue_node = None + + +class JobInformation(JobInformationBasic): + """The extended Data Lake Analytics job information properties returned when + retrieving a specific job. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar job_id: The job's unique identifier (a GUID). + :vartype job_id: str + :param name: Required. The friendly name of the job. + :type name: str + :param type: Required. The job type of the current job (Hive, USql, or + Scope (for internal use only)). Possible values include: 'USql', 'Hive', + 'Scope' + :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType + :ivar submitter: The user or account that submitted the job. + :vartype submitter: str + :param degree_of_parallelism: The degree of parallelism used for this job. + Default value: 1 . + :type degree_of_parallelism: int + :ivar degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :vartype degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers + have a higher priority. By default, a job has a priority of 1000. This + must be greater than 0. + :type priority: int + :ivar submit_time: The time the job was submitted to the service. + :vartype submit_time: datetime + :ivar start_time: The start time of the job. + :vartype start_time: datetime + :ivar end_time: The completion time of the job. + :vartype end_time: datetime + :ivar state: The job state. When the job is in the Ended state, refer to + Result and ErrorMessage for details. Possible values include: 'Accepted', + 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', + 'Starting', 'Paused', 'WaitingForCapacity', 'Yielded', 'Finalizing' + :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState + :ivar result: The result of job execution or the current result of the + running job. Possible values include: 'None', 'Succeeded', 'Cancelled', + 'Failed' + :vartype result: str or + ~azure.mgmt.datalake.analytics.job.models.JobResult + :ivar log_folder: The log folder path to use in the following format: + adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. + :vartype log_folder: str + :param log_file_patterns: The list of log file name patterns to find in + the logFolder. '*' is the only matching character allowed. Example format: + jobExecution*.log or *mylog*.txt + :type log_file_patterns: list[str] + :param related: The recurring job relationship information properties. + :type related: + ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties + :param tags: The key-value pairs used to add additional metadata to the + job information. (Only for use internally with Scope job type.) + :type tags: dict[str, str] + :ivar hierarchy_queue_node: the name of hierarchy queue node this job is + assigned to, Null if job has not been assigned yet or the account doesn't + have hierarchy queue. + :vartype hierarchy_queue_node: str + :ivar error_message: The error message details for the job, if the job + failed. + :vartype error_message: + list[~azure.mgmt.datalake.analytics.job.models.JobErrorDetails] + :ivar state_audit_records: The job state audit records, indicating when + various operations have been performed on this job. + :vartype state_audit_records: + list[~azure.mgmt.datalake.analytics.job.models.JobStateAuditRecord] + :param properties: Required. The job specific properties. + :type properties: ~azure.mgmt.datalake.analytics.job.models.JobProperties + """ + + _validation = { + 'job_id': {'readonly': True}, + 'name': {'required': True}, + 'type': {'required': True}, + 'submitter': {'readonly': True}, + 'degree_of_parallelism_percent': {'readonly': True}, + 'submit_time': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'state': {'readonly': True}, + 'result': {'readonly': True}, + 'log_folder': {'readonly': True}, + 'hierarchy_queue_node': {'readonly': True}, + 'error_message': {'readonly': True}, + 'state_audit_records': {'readonly': True}, + 'properties': {'required': True}, + } + + _attribute_map = { + 'job_id': {'key': 'jobId', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'JobType'}, + 'submitter': {'key': 'submitter', 'type': 'str'}, + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'state': {'key': 'state', 'type': 'JobState'}, + 'result': {'key': 'result', 'type': 'JobResult'}, + 'log_folder': {'key': 'logFolder', 'type': 'str'}, + 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, + 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'hierarchy_queue_node': {'key': 'hierarchyQueueNode', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': '[JobErrorDetails]'}, + 'state_audit_records': {'key': 'stateAuditRecords', 'type': '[JobStateAuditRecord]'}, + 'properties': {'key': 'properties', 'type': 'JobProperties'}, + } + + def __init__(self, *, name: str, type, properties, degree_of_parallelism: int=1, priority: int=None, log_file_patterns=None, related=None, tags=None, **kwargs) -> None: + super(JobInformation, self).__init__(name=name, type=type, degree_of_parallelism=degree_of_parallelism, priority=priority, log_file_patterns=log_file_patterns, related=related, tags=tags, **kwargs) + self.error_message = None + self.state_audit_records = None + self.properties = properties + + +class JobInnerError(Model): + """The Data Lake Analytics job error details. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar error_id: The specific identifier for the type of error encountered + in the job. + :vartype error_id: str + :ivar severity: The severity level of the failure. Possible values + include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', + 'UserWarning' + :vartype severity: str or + ~azure.mgmt.datalake.analytics.job.models.SeverityTypes + :ivar source: The ultimate source of the failure (usually either SYSTEM or + USER). + :vartype source: str + :ivar message: The user friendly error message for the failure. + :vartype message: str + :ivar description: The error message description. + :vartype description: str + :ivar details: The details of the error message. + :vartype details: str + :ivar diagnostic_code: The diagnostic error code. + :vartype diagnostic_code: int + :ivar component: The component that failed. + :vartype component: str + :ivar resolution: The recommended resolution for the failure, if any. + :vartype resolution: str + :ivar help_link: The link to MSDN or Azure help for this type of error, if + any. + :vartype help_link: str + :ivar internal_diagnostics: The internal diagnostic stack trace if the + user requesting the job error details has sufficient permissions it will + be retrieved, otherwise it will be empty. + :vartype internal_diagnostics: str + :ivar inner_error: The inner error of this specific job error message, if + any. + :vartype inner_error: + ~azure.mgmt.datalake.analytics.job.models.JobInnerError + """ + + _validation = { + 'error_id': {'readonly': True}, + 'severity': {'readonly': True}, + 'source': {'readonly': True}, + 'message': {'readonly': True}, + 'description': {'readonly': True}, + 'details': {'readonly': True}, + 'diagnostic_code': {'readonly': True}, + 'component': {'readonly': True}, + 'resolution': {'readonly': True}, + 'help_link': {'readonly': True}, + 'internal_diagnostics': {'readonly': True}, + 'inner_error': {'readonly': True}, + } + + _attribute_map = { + 'error_id': {'key': 'errorId', 'type': 'str'}, + 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, + 'source': {'key': 'source', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'diagnostic_code': {'key': 'diagnosticCode', 'type': 'int'}, + 'component': {'key': 'component', 'type': 'str'}, + 'resolution': {'key': 'resolution', 'type': 'str'}, + 'help_link': {'key': 'helpLink', 'type': 'str'}, + 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, + 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, + } + + def __init__(self, **kwargs) -> None: + super(JobInnerError, self).__init__(**kwargs) + self.error_id = None + self.severity = None + self.source = None + self.message = None + self.description = None + self.details = None + self.diagnostic_code = None + self.component = None + self.resolution = None + self.help_link = None + self.internal_diagnostics = None + self.inner_error = None + + +class JobPipelineInformation(Model): + """Job Pipeline Information, showing the relationship of jobs and recurrences + of those jobs in a pipeline. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar pipeline_id: The job relationship pipeline identifier (a GUID). + :vartype pipeline_id: str + :ivar pipeline_name: The friendly name of the job relationship pipeline, + which does not need to be unique. + :vartype pipeline_name: str + :ivar pipeline_uri: The pipeline uri, unique, links to the originating + service for this pipeline. + :vartype pipeline_uri: str + :ivar num_jobs_failed: The number of jobs in this pipeline that have + failed. + :vartype num_jobs_failed: int + :ivar num_jobs_canceled: The number of jobs in this pipeline that have + been canceled. + :vartype num_jobs_canceled: int + :ivar num_jobs_succeeded: The number of jobs in this pipeline that have + succeeded. + :vartype num_jobs_succeeded: int + :ivar au_hours_failed: The number of job execution hours that resulted in + failed jobs. + :vartype au_hours_failed: float + :ivar au_hours_canceled: The number of job execution hours that resulted + in canceled jobs. + :vartype au_hours_canceled: float + :ivar au_hours_succeeded: The number of job execution hours that resulted + in successful jobs. + :vartype au_hours_succeeded: float + :ivar last_submit_time: The last time a job in this pipeline was + submitted. + :vartype last_submit_time: datetime + :ivar runs: The list of recurrence identifiers representing each run of + this pipeline. + :vartype runs: + list[~azure.mgmt.datalake.analytics.job.models.JobPipelineRunInformation] + :ivar recurrences: The list of recurrence identifiers representing each + run of this pipeline. + :vartype recurrences: list[str] + """ + + _validation = { + 'pipeline_id': {'readonly': True}, + 'pipeline_name': {'readonly': True, 'max_length': 260}, + 'pipeline_uri': {'readonly': True}, + 'num_jobs_failed': {'readonly': True}, + 'num_jobs_canceled': {'readonly': True}, + 'num_jobs_succeeded': {'readonly': True}, + 'au_hours_failed': {'readonly': True}, + 'au_hours_canceled': {'readonly': True}, + 'au_hours_succeeded': {'readonly': True}, + 'last_submit_time': {'readonly': True}, + 'runs': {'readonly': True}, + 'recurrences': {'readonly': True}, + } + + _attribute_map = { + 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, + 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, + 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, + 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, + 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, + 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, + 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, + 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, + 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, + 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, + 'runs': {'key': 'runs', 'type': '[JobPipelineRunInformation]'}, + 'recurrences': {'key': 'recurrences', 'type': '[str]'}, + } + + def __init__(self, **kwargs) -> None: + super(JobPipelineInformation, self).__init__(**kwargs) + self.pipeline_id = None + self.pipeline_name = None + self.pipeline_uri = None + self.num_jobs_failed = None + self.num_jobs_canceled = None + self.num_jobs_succeeded = None + self.au_hours_failed = None + self.au_hours_canceled = None + self.au_hours_succeeded = None + self.last_submit_time = None + self.runs = None + self.recurrences = None + + +class JobPipelineRunInformation(Model): + """Run info for a specific job pipeline. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar run_id: The run identifier of an instance of pipeline executions (a + GUID). + :vartype run_id: str + :ivar last_submit_time: The time this instance was last submitted. + :vartype last_submit_time: datetime + """ + + _validation = { + 'run_id': {'readonly': True}, + 'last_submit_time': {'readonly': True}, + } + + _attribute_map = { + 'run_id': {'key': 'runId', 'type': 'str'}, + 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs) -> None: + super(JobPipelineRunInformation, self).__init__(**kwargs) + self.run_id = None + self.last_submit_time = None + + +class JobRecurrenceInformation(Model): + """Recurrence job information for a specific recurrence. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar recurrence_id: The recurrence identifier (a GUID), unique per + activity/script, regardless of iterations. This is something to link + different occurrences of the same job together. + :vartype recurrence_id: str + :ivar recurrence_name: The recurrence name, user friendly name for the + correlation between jobs. + :vartype recurrence_name: str + :ivar num_jobs_failed: The number of jobs in this recurrence that have + failed. + :vartype num_jobs_failed: int + :ivar num_jobs_canceled: The number of jobs in this recurrence that have + been canceled. + :vartype num_jobs_canceled: int + :ivar num_jobs_succeeded: The number of jobs in this recurrence that have + succeeded. + :vartype num_jobs_succeeded: int + :ivar au_hours_failed: The number of job execution hours that resulted in + failed jobs. + :vartype au_hours_failed: float + :ivar au_hours_canceled: The number of job execution hours that resulted + in canceled jobs. + :vartype au_hours_canceled: float + :ivar au_hours_succeeded: The number of job execution hours that resulted + in successful jobs. + :vartype au_hours_succeeded: float + :ivar last_submit_time: The last time a job in this recurrence was + submitted. + :vartype last_submit_time: datetime + """ + + _validation = { + 'recurrence_id': {'readonly': True}, + 'recurrence_name': {'readonly': True}, + 'num_jobs_failed': {'readonly': True}, + 'num_jobs_canceled': {'readonly': True}, + 'num_jobs_succeeded': {'readonly': True}, + 'au_hours_failed': {'readonly': True}, + 'au_hours_canceled': {'readonly': True}, + 'au_hours_succeeded': {'readonly': True}, + 'last_submit_time': {'readonly': True}, + } + + _attribute_map = { + 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, + 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, + 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, + 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, + 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, + 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, + 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, + 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, + 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs) -> None: + super(JobRecurrenceInformation, self).__init__(**kwargs) + self.recurrence_id = None + self.recurrence_name = None + self.num_jobs_failed = None + self.num_jobs_canceled = None + self.num_jobs_succeeded = None + self.au_hours_failed = None + self.au_hours_canceled = None + self.au_hours_succeeded = None + self.last_submit_time = None + + +class JobRelationshipProperties(Model): + """Job relationship information properties including pipeline information, + correlation information, etc. + + All required parameters must be populated in order to send to Azure. + + :param pipeline_id: The job relationship pipeline identifier (a GUID). + :type pipeline_id: str + :param pipeline_name: The friendly name of the job relationship pipeline, + which does not need to be unique. + :type pipeline_name: str + :param pipeline_uri: The pipeline uri, unique, links to the originating + service for this pipeline. + :type pipeline_uri: str + :param run_id: The run identifier (a GUID), unique identifier of the + iteration of this pipeline. + :type run_id: str + :param recurrence_id: Required. The recurrence identifier (a GUID), unique + per activity/script, regardless of iterations. This is something to link + different occurrences of the same job together. + :type recurrence_id: str + :param recurrence_name: The recurrence name, user friendly name for the + correlation between jobs. + :type recurrence_name: str + """ + + _validation = { + 'pipeline_name': {'max_length': 260}, + 'recurrence_id': {'required': True}, + 'recurrence_name': {'max_length': 260}, + } + + _attribute_map = { + 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, + 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, + 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, + 'run_id': {'key': 'runId', 'type': 'str'}, + 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, + 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, + } + + def __init__(self, *, recurrence_id: str, pipeline_id: str=None, pipeline_name: str=None, pipeline_uri: str=None, run_id: str=None, recurrence_name: str=None, **kwargs) -> None: + super(JobRelationshipProperties, self).__init__(**kwargs) + self.pipeline_id = pipeline_id + self.pipeline_name = pipeline_name + self.pipeline_uri = pipeline_uri + self.run_id = run_id + self.recurrence_id = recurrence_id + self.recurrence_name = recurrence_name + + +class JobResource(Model): + """The Data Lake Analytics job resources. + + :param name: The name of the resource. + :type name: str + :param resource_path: The path to the resource. + :type resource_path: str + :param type: The job resource type. Possible values include: + 'VertexResource', 'JobManagerResource', 'StatisticsResource', + 'VertexResourceInUserFolder', 'JobManagerResourceInUserFolder', + 'StatisticsResourceInUserFolder' + :type type: str or + ~azure.mgmt.datalake.analytics.job.models.JobResourceType + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'resource_path': {'key': 'resourcePath', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'JobResourceType'}, + } + + def __init__(self, *, name: str=None, resource_path: str=None, type=None, **kwargs) -> None: + super(JobResource, self).__init__(**kwargs) + self.name = name + self.resource_path = resource_path + self.type = type + + +class JobStateAuditRecord(Model): + """The Data Lake Analytics job state audit records for tracking the lifecycle + of a job. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar new_state: The new state the job is in. + :vartype new_state: str + :ivar time_stamp: The time stamp that the state change took place. + :vartype time_stamp: datetime + :ivar requested_by_user: The user who requests the change. + :vartype requested_by_user: str + :ivar details: The details of the audit log. + :vartype details: str + """ + + _validation = { + 'new_state': {'readonly': True}, + 'time_stamp': {'readonly': True}, + 'requested_by_user': {'readonly': True}, + 'details': {'readonly': True}, + } + + _attribute_map = { + 'new_state': {'key': 'newState', 'type': 'str'}, + 'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'}, + 'requested_by_user': {'key': 'requestedByUser', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(JobStateAuditRecord, self).__init__(**kwargs) + self.new_state = None + self.time_stamp = None + self.requested_by_user = None + self.details = None + + +class JobStatistics(Model): + """The Data Lake Analytics job execution statistics. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar last_update_time_utc: The last update time for the statistics. + :vartype last_update_time_utc: datetime + :ivar finalizing_time_utc: The job finalizing start time. + :vartype finalizing_time_utc: datetime + :ivar stages: The list of stages for the job. + :vartype stages: + list[~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertexStage] + """ + + _validation = { + 'last_update_time_utc': {'readonly': True}, + 'finalizing_time_utc': {'readonly': True}, + 'stages': {'readonly': True}, + } + + _attribute_map = { + 'last_update_time_utc': {'key': 'lastUpdateTimeUtc', 'type': 'iso-8601'}, + 'finalizing_time_utc': {'key': 'finalizingTimeUtc', 'type': 'iso-8601'}, + 'stages': {'key': 'stages', 'type': '[JobStatisticsVertexStage]'}, + } + + def __init__(self, **kwargs) -> None: + super(JobStatistics, self).__init__(**kwargs) + self.last_update_time_utc = None + self.finalizing_time_utc = None + self.stages = None + + +class JobStatisticsVertex(Model): + """The detailed information for a vertex. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar name: The name of the vertex. + :vartype name: str + :ivar vertex_id: The id of the vertex. + :vartype vertex_id: str + :ivar execution_time: The amount of execution time of the vertex. + :vartype execution_time: timedelta + :ivar data_read: The amount of data read of the vertex, in bytes. + :vartype data_read: long + :ivar peak_mem_usage: The amount of peak memory usage of the vertex, in + bytes. + :vartype peak_mem_usage: long + """ + + _validation = { + 'name': {'readonly': True}, + 'vertex_id': {'readonly': True}, + 'execution_time': {'readonly': True}, + 'data_read': {'readonly': True}, + 'peak_mem_usage': {'readonly': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'vertex_id': {'key': 'vertexId', 'type': 'str'}, + 'execution_time': {'key': 'executionTime', 'type': 'duration'}, + 'data_read': {'key': 'dataRead', 'type': 'long'}, + 'peak_mem_usage': {'key': 'peakMemUsage', 'type': 'long'}, + } + + def __init__(self, **kwargs) -> None: + super(JobStatisticsVertex, self).__init__(**kwargs) + self.name = None + self.vertex_id = None + self.execution_time = None + self.data_read = None + self.peak_mem_usage = None + + +class JobStatisticsVertexStage(Model): + """The Data Lake Analytics job statistics vertex stage information. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar data_read: The amount of data read, in bytes. + :vartype data_read: long + :ivar data_read_cross_pod: The amount of data read across multiple pods, + in bytes. + :vartype data_read_cross_pod: long + :ivar data_read_intra_pod: The amount of data read in one pod, in bytes. + :vartype data_read_intra_pod: long + :ivar data_to_read: The amount of data remaining to be read, in bytes. + :vartype data_to_read: long + :ivar data_written: The amount of data written, in bytes. + :vartype data_written: long + :ivar duplicate_discard_count: The number of duplicates that were + discarded. + :vartype duplicate_discard_count: int + :ivar failed_count: The number of failures that occurred in this stage. + :vartype failed_count: int + :ivar max_vertex_data_read: The maximum amount of data read in a single + vertex, in bytes. + :vartype max_vertex_data_read: long + :ivar min_vertex_data_read: The minimum amount of data read in a single + vertex, in bytes. + :vartype min_vertex_data_read: long + :ivar read_failure_count: The number of read failures in this stage. + :vartype read_failure_count: int + :ivar revocation_count: The number of vertices that were revoked during + this stage. + :vartype revocation_count: int + :ivar running_count: The number of currently running vertices in this + stage. + :vartype running_count: int + :ivar scheduled_count: The number of currently scheduled vertices in this + stage. + :vartype scheduled_count: int + :ivar stage_name: The name of this stage in job execution. + :vartype stage_name: str + :ivar succeeded_count: The number of vertices that succeeded in this + stage. + :vartype succeeded_count: int + :ivar temp_data_written: The amount of temporary data written, in bytes. + :vartype temp_data_written: long + :ivar total_count: The total vertex count for this stage. + :vartype total_count: int + :ivar total_failed_time: The amount of time that failed vertices took up + in this stage. + :vartype total_failed_time: timedelta + :ivar total_progress: The current progress of this stage, as a percentage. + :vartype total_progress: int + :ivar total_succeeded_time: The amount of time all successful vertices + took in this stage. + :vartype total_succeeded_time: timedelta + :ivar total_peak_mem_usage: The sum of the peak memory usage of all the + vertices in the stage, in bytes. + :vartype total_peak_mem_usage: long + :ivar total_execution_time: The sum of the total execution time of all the + vertices in the stage. + :vartype total_execution_time: timedelta + :param max_data_read_vertex: the vertex with the maximum amount of data + read. + :type max_data_read_vertex: + ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex + :param max_execution_time_vertex: the vertex with the maximum execution + time. + :type max_execution_time_vertex: + ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex + :param max_peak_mem_usage_vertex: the vertex with the maximum peak memory + usage. + :type max_peak_mem_usage_vertex: + ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex + :ivar estimated_vertex_cpu_core_count: The estimated vertex CPU core + count. + :vartype estimated_vertex_cpu_core_count: int + :ivar estimated_vertex_peak_cpu_core_count: The estimated vertex peak CPU + core count. + :vartype estimated_vertex_peak_cpu_core_count: int + :ivar estimated_vertex_mem_size: The estimated vertex memory size, in + bytes. + :vartype estimated_vertex_mem_size: long + :param allocated_container_cpu_core_count: The statistics information for + the allocated container CPU core count. + :type allocated_container_cpu_core_count: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + :param allocated_container_mem_size: The statistics information for the + allocated container memory size. + :type allocated_container_mem_size: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + :param used_vertex_cpu_core_count: The statistics information for the used + vertex CPU core count. + :type used_vertex_cpu_core_count: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + :param used_vertex_peak_mem_size: The statistics information for the used + vertex peak memory size. + :type used_vertex_peak_mem_size: + ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics + """ + + _validation = { + 'data_read': {'readonly': True}, + 'data_read_cross_pod': {'readonly': True}, + 'data_read_intra_pod': {'readonly': True}, + 'data_to_read': {'readonly': True}, + 'data_written': {'readonly': True}, + 'duplicate_discard_count': {'readonly': True}, + 'failed_count': {'readonly': True}, + 'max_vertex_data_read': {'readonly': True}, + 'min_vertex_data_read': {'readonly': True}, + 'read_failure_count': {'readonly': True}, + 'revocation_count': {'readonly': True}, + 'running_count': {'readonly': True}, + 'scheduled_count': {'readonly': True}, + 'stage_name': {'readonly': True}, + 'succeeded_count': {'readonly': True}, + 'temp_data_written': {'readonly': True}, + 'total_count': {'readonly': True}, + 'total_failed_time': {'readonly': True}, + 'total_progress': {'readonly': True}, + 'total_succeeded_time': {'readonly': True}, + 'total_peak_mem_usage': {'readonly': True}, + 'total_execution_time': {'readonly': True}, + 'estimated_vertex_cpu_core_count': {'readonly': True}, + 'estimated_vertex_peak_cpu_core_count': {'readonly': True}, + 'estimated_vertex_mem_size': {'readonly': True}, + } + + _attribute_map = { + 'data_read': {'key': 'dataRead', 'type': 'long'}, + 'data_read_cross_pod': {'key': 'dataReadCrossPod', 'type': 'long'}, + 'data_read_intra_pod': {'key': 'dataReadIntraPod', 'type': 'long'}, + 'data_to_read': {'key': 'dataToRead', 'type': 'long'}, + 'data_written': {'key': 'dataWritten', 'type': 'long'}, + 'duplicate_discard_count': {'key': 'duplicateDiscardCount', 'type': 'int'}, + 'failed_count': {'key': 'failedCount', 'type': 'int'}, + 'max_vertex_data_read': {'key': 'maxVertexDataRead', 'type': 'long'}, + 'min_vertex_data_read': {'key': 'minVertexDataRead', 'type': 'long'}, + 'read_failure_count': {'key': 'readFailureCount', 'type': 'int'}, + 'revocation_count': {'key': 'revocationCount', 'type': 'int'}, + 'running_count': {'key': 'runningCount', 'type': 'int'}, + 'scheduled_count': {'key': 'scheduledCount', 'type': 'int'}, + 'stage_name': {'key': 'stageName', 'type': 'str'}, + 'succeeded_count': {'key': 'succeededCount', 'type': 'int'}, + 'temp_data_written': {'key': 'tempDataWritten', 'type': 'long'}, + 'total_count': {'key': 'totalCount', 'type': 'int'}, + 'total_failed_time': {'key': 'totalFailedTime', 'type': 'duration'}, + 'total_progress': {'key': 'totalProgress', 'type': 'int'}, + 'total_succeeded_time': {'key': 'totalSucceededTime', 'type': 'duration'}, + 'total_peak_mem_usage': {'key': 'totalPeakMemUsage', 'type': 'long'}, + 'total_execution_time': {'key': 'totalExecutionTime', 'type': 'duration'}, + 'max_data_read_vertex': {'key': 'maxDataReadVertex', 'type': 'JobStatisticsVertex'}, + 'max_execution_time_vertex': {'key': 'maxExecutionTimeVertex', 'type': 'JobStatisticsVertex'}, + 'max_peak_mem_usage_vertex': {'key': 'maxPeakMemUsageVertex', 'type': 'JobStatisticsVertex'}, + 'estimated_vertex_cpu_core_count': {'key': 'estimatedVertexCpuCoreCount', 'type': 'int'}, + 'estimated_vertex_peak_cpu_core_count': {'key': 'estimatedVertexPeakCpuCoreCount', 'type': 'int'}, + 'estimated_vertex_mem_size': {'key': 'estimatedVertexMemSize', 'type': 'long'}, + 'allocated_container_cpu_core_count': {'key': 'allocatedContainerCpuCoreCount', 'type': 'ResourceUsageStatistics'}, + 'allocated_container_mem_size': {'key': 'allocatedContainerMemSize', 'type': 'ResourceUsageStatistics'}, + 'used_vertex_cpu_core_count': {'key': 'usedVertexCpuCoreCount', 'type': 'ResourceUsageStatistics'}, + 'used_vertex_peak_mem_size': {'key': 'usedVertexPeakMemSize', 'type': 'ResourceUsageStatistics'}, + } + + def __init__(self, *, max_data_read_vertex=None, max_execution_time_vertex=None, max_peak_mem_usage_vertex=None, allocated_container_cpu_core_count=None, allocated_container_mem_size=None, used_vertex_cpu_core_count=None, used_vertex_peak_mem_size=None, **kwargs) -> None: + super(JobStatisticsVertexStage, self).__init__(**kwargs) + self.data_read = None + self.data_read_cross_pod = None + self.data_read_intra_pod = None + self.data_to_read = None + self.data_written = None + self.duplicate_discard_count = None + self.failed_count = None + self.max_vertex_data_read = None + self.min_vertex_data_read = None + self.read_failure_count = None + self.revocation_count = None + self.running_count = None + self.scheduled_count = None + self.stage_name = None + self.succeeded_count = None + self.temp_data_written = None + self.total_count = None + self.total_failed_time = None + self.total_progress = None + self.total_succeeded_time = None + self.total_peak_mem_usage = None + self.total_execution_time = None + self.max_data_read_vertex = max_data_read_vertex + self.max_execution_time_vertex = max_execution_time_vertex + self.max_peak_mem_usage_vertex = max_peak_mem_usage_vertex + self.estimated_vertex_cpu_core_count = None + self.estimated_vertex_peak_cpu_core_count = None + self.estimated_vertex_mem_size = None + self.allocated_container_cpu_core_count = allocated_container_cpu_core_count + self.allocated_container_mem_size = allocated_container_mem_size + self.used_vertex_cpu_core_count = used_vertex_cpu_core_count + self.used_vertex_peak_mem_size = used_vertex_peak_mem_size + + +class ResourceUsageStatistics(Model): + """The statistics information for resource usage. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar average: The average value. + :vartype average: float + :ivar minimum: The minimum value. + :vartype minimum: long + :ivar maximum: The maximum value. + :vartype maximum: long + """ + + _validation = { + 'average': {'readonly': True}, + 'minimum': {'readonly': True}, + 'maximum': {'readonly': True}, + } + + _attribute_map = { + 'average': {'key': 'average', 'type': 'float'}, + 'minimum': {'key': 'minimum', 'type': 'long'}, + 'maximum': {'key': 'maximum', 'type': 'long'}, + } + + def __init__(self, **kwargs) -> None: + super(ResourceUsageStatistics, self).__init__(**kwargs) + self.average = None + self.minimum = None + self.maximum = None + + +class ScopeJobProperties(JobProperties): + """Scope job properties used when submitting and retrieving Scope jobs. (Only + for use internally with Scope job type.). + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :ivar resources: The list of resources that are required by the job. + :vartype resources: + list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] + :ivar user_algebra_path: The algebra file path after the job has + completed. + :vartype user_algebra_path: str + :param notifier: The list of email addresses, separated by semi-colons, to + notify when the job reaches a terminal state. + :type notifier: str + :ivar total_compilation_time: The total time this job spent compiling. + This value should not be set by the user and will be ignored if it is. + :vartype total_compilation_time: timedelta + :ivar total_queued_time: The total time this job spent queued. This value + should not be set by the user and will be ignored if it is. + :vartype total_queued_time: timedelta + :ivar total_running_time: The total time this job spent executing. This + value should not be set by the user and will be ignored if it is. + :vartype total_running_time: timedelta + :ivar total_paused_time: The total time this job spent paused. This value + should not be set by the user and will be ignored if it is. + :vartype total_paused_time: timedelta + :ivar root_process_node_id: The ID used to identify the job manager + coordinating job execution. This value should not be set by the user and + will be ignored if it is. + :vartype root_process_node_id: str + :ivar yarn_application_id: The ID used to identify the yarn application + executing the job. This value should not be set by the user and will be + ignored if it is. + :vartype yarn_application_id: str + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + 'resources': {'readonly': True}, + 'user_algebra_path': {'readonly': True}, + 'total_compilation_time': {'readonly': True}, + 'total_queued_time': {'readonly': True}, + 'total_running_time': {'readonly': True}, + 'total_paused_time': {'readonly': True}, + 'root_process_node_id': {'readonly': True}, + 'yarn_application_id': {'readonly': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, + 'user_algebra_path': {'key': 'userAlgebraPath', 'type': 'str'}, + 'notifier': {'key': 'notifier', 'type': 'str'}, + 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, + 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, + 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, + 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, + 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, + 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, notifier: str=None, **kwargs) -> None: + super(ScopeJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.resources = None + self.user_algebra_path = None + self.notifier = notifier + self.total_compilation_time = None + self.total_queued_time = None + self.total_running_time = None + self.total_paused_time = None + self.root_process_node_id = None + self.yarn_application_id = None + self.type = 'Scope' + + +class ScopeJobResource(Model): + """The Scope job resources. (Only for use internally with Scope job type.). + + :param name: The name of the resource. + :type name: str + :param path: The path to the resource. + :type path: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'path': {'key': 'path', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, path: str=None, **kwargs) -> None: + super(ScopeJobResource, self).__init__(**kwargs) + self.name = name + self.path = path + + +class UpdateJobParameters(Model): + """The parameters that can be used to update existing Data Lake Analytics job + information properties. (Only for use internally with Scope job type.). + + :param degree_of_parallelism: The degree of parallelism used for this job. + :type degree_of_parallelism: int + :param degree_of_parallelism_percent: the degree of parallelism in + percentage used for this job. + :type degree_of_parallelism_percent: float + :param priority: The priority value for the current job. Lower numbers + have a higher priority. By default, a job has a priority of 1000. This + must be greater than 0. + :type priority: int + :param tags: The key-value pairs used to add additional metadata to the + job information. + :type tags: dict[str, str] + """ + + _attribute_map = { + 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, + 'degree_of_parallelism_percent': {'key': 'degreeOfParallelismPercent', 'type': 'float'}, + 'priority': {'key': 'priority', 'type': 'int'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + } + + def __init__(self, *, degree_of_parallelism: int=None, degree_of_parallelism_percent: float=None, priority: int=None, tags=None, **kwargs) -> None: + super(UpdateJobParameters, self).__init__(**kwargs) + self.degree_of_parallelism = degree_of_parallelism + self.degree_of_parallelism_percent = degree_of_parallelism_percent + self.priority = priority + self.tags = tags + + +class USqlJobProperties(JobProperties): + """U-SQL job properties used when retrieving U-SQL jobs. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :param runtime_version: The runtime version of the Data Lake Analytics + engine to use for the specific type of job being run. + :type runtime_version: str + :param script: Required. The script to run. Please note that the maximum + script size is 3 MB. + :type script: str + :param type: Required. Constant filled by server. + :type type: str + :ivar resources: The list of resources that are required by the job. + :vartype resources: + list[~azure.mgmt.datalake.analytics.job.models.JobResource] + :param statistics: The job specific statistics. + :type statistics: ~azure.mgmt.datalake.analytics.job.models.JobStatistics + :param debug_data: The job specific debug data locations. + :type debug_data: ~azure.mgmt.datalake.analytics.job.models.JobDataPath + :ivar diagnostics: The diagnostics for the job. + :vartype diagnostics: + list[~azure.mgmt.datalake.analytics.job.models.Diagnostics] + :ivar algebra_file_path: The algebra file path after the job has + completed. + :vartype algebra_file_path: str + :ivar total_compilation_time: The total time this job spent compiling. + This value should not be set by the user and will be ignored if it is. + :vartype total_compilation_time: timedelta + :ivar total_queued_time: The total time this job spent queued. This value + should not be set by the user and will be ignored if it is. + :vartype total_queued_time: timedelta + :ivar total_running_time: The total time this job spent executing. This + value should not be set by the user and will be ignored if it is. + :vartype total_running_time: timedelta + :ivar total_paused_time: The total time this job spent paused. This value + should not be set by the user and will be ignored if it is. + :vartype total_paused_time: timedelta + :ivar root_process_node_id: The ID used to identify the job manager + coordinating job execution. This value should not be set by the user and + will be ignored if it is. + :vartype root_process_node_id: str + :ivar yarn_application_id: The ID used to identify the yarn application + executing the job. This value should not be set by the user and will be + ignored if it is. + :vartype yarn_application_id: str + :ivar yarn_application_time_stamp: The timestamp (in ticks) for the yarn + application executing the job. This value should not be set by the user + and will be ignored if it is. + :vartype yarn_application_time_stamp: long + :ivar compile_mode: The specific compilation mode for the job used during + execution. If this is not specified during submission, the server will + determine the optimal compilation mode. Possible values include: + 'Semantic', 'Full', 'SingleBox' + :vartype compile_mode: str or + ~azure.mgmt.datalake.analytics.job.models.CompileMode + """ + + _validation = { + 'script': {'required': True}, + 'type': {'required': True}, + 'resources': {'readonly': True}, + 'diagnostics': {'readonly': True}, + 'algebra_file_path': {'readonly': True}, + 'total_compilation_time': {'readonly': True}, + 'total_queued_time': {'readonly': True}, + 'total_running_time': {'readonly': True}, + 'total_paused_time': {'readonly': True}, + 'root_process_node_id': {'readonly': True}, + 'yarn_application_id': {'readonly': True}, + 'yarn_application_time_stamp': {'readonly': True}, + 'compile_mode': {'readonly': True}, + } + + _attribute_map = { + 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, + 'script': {'key': 'script', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'resources': {'key': 'resources', 'type': '[JobResource]'}, + 'statistics': {'key': 'statistics', 'type': 'JobStatistics'}, + 'debug_data': {'key': 'debugData', 'type': 'JobDataPath'}, + 'diagnostics': {'key': 'diagnostics', 'type': '[Diagnostics]'}, + 'algebra_file_path': {'key': 'algebraFilePath', 'type': 'str'}, + 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, + 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, + 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, + 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, + 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, + 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, + 'yarn_application_time_stamp': {'key': 'yarnApplicationTimeStamp', 'type': 'long'}, + 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, + } + + def __init__(self, *, script: str, runtime_version: str=None, statistics=None, debug_data=None, **kwargs) -> None: + super(USqlJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) + self.resources = None + self.statistics = statistics + self.debug_data = debug_data + self.diagnostics = None + self.algebra_file_path = None + self.total_compilation_time = None + self.total_queued_time = None + self.total_running_time = None + self.total_paused_time = None + self.root_process_node_id = None + self.yarn_application_id = None + self.yarn_application_time_stamp = None + self.compile_mode = None + self.type = 'USql' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_paged_models.py similarity index 50% rename from sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information_paged.py rename to sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_paged_models.py index ead66eb0c359..324309d7b7f7 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information_paged.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/_paged_models.py @@ -12,6 +12,32 @@ from msrest.paging import Paged +class JobInformationBasicPaged(Paged): + """ + A paging container for iterating over a list of :class:`JobInformationBasic ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[JobInformationBasic]'} + } + + def __init__(self, *args, **kwargs): + + super(JobInformationBasicPaged, self).__init__(*args, **kwargs) +class JobPipelineInformationPaged(Paged): + """ + A paging container for iterating over a list of :class:`JobPipelineInformation ` object + """ + + _attribute_map = { + 'next_link': {'key': 'nextLink', 'type': 'str'}, + 'current_page': {'key': 'value', 'type': '[JobPipelineInformation]'} + } + + def __init__(self, *args, **kwargs): + + super(JobPipelineInformationPaged, self).__init__(*args, **kwargs) class JobRecurrenceInformationPaged(Paged): """ A paging container for iterating over a list of :class:`JobRecurrenceInformation ` object diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters.py deleted file mode 100644 index cb184b099a95..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class BaseJobParameters(Model): - """Data Lake Analytics Job Parameters base class for build and submit. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: Required. The job specific properties. - :type properties: - ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - """ - - _validation = { - 'type': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'JobType'}, - 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, - } - - def __init__(self, **kwargs): - super(BaseJobParameters, self).__init__(**kwargs) - self.type = kwargs.get('type', None) - self.properties = kwargs.get('properties', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters_py3.py deleted file mode 100644 index df4ee0146392..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/base_job_parameters_py3.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class BaseJobParameters(Model): - """Data Lake Analytics Job Parameters base class for build and submit. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: Required. The job specific properties. - :type properties: - ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - """ - - _validation = { - 'type': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'JobType'}, - 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, - } - - def __init__(self, *, type, properties, **kwargs) -> None: - super(BaseJobParameters, self).__init__(**kwargs) - self.type = type - self.properties = properties diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters.py deleted file mode 100644 index 7e96058ad235..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .base_job_parameters import BaseJobParameters - - -class BuildJobParameters(BaseJobParameters): - """The parameters used to build a new Data Lake Analytics job. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: Required. The job specific properties. - :type properties: - ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - :param name: The friendly name of the job to build. - :type name: str - """ - - _validation = { - 'type': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'JobType'}, - 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(BuildJobParameters, self).__init__(**kwargs) - self.name = kwargs.get('name', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters_py3.py deleted file mode 100644 index 6f9e91fba271..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/build_job_parameters_py3.py +++ /dev/null @@ -1,44 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .base_job_parameters_py3 import BaseJobParameters - - -class BuildJobParameters(BaseJobParameters): - """The parameters used to build a new Data Lake Analytics job. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: Required. The job specific properties. - :type properties: - ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - :param name: The friendly name of the job to build. - :type name: str - """ - - _validation = { - 'type': {'required': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'JobType'}, - 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - def __init__(self, *, type, properties, name: str=None, **kwargs) -> None: - super(BuildJobParameters, self).__init__(type=type, properties=properties, **kwargs) - self.name = name diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters.py deleted file mode 100644 index 04bf0a5d7580..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .base_job_parameters import BaseJobParameters - - -class CreateJobParameters(BaseJobParameters): - """The parameters used to submit a new Data Lake Analytics job. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: Required. The job specific properties. - :type properties: - ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - :param name: Required. The friendly name of the job to submit. - :type name: str - :param degree_of_parallelism: The degree of parallelism to use for this - job. This must be greater than 0, if set to less than 0 it will default to - 1. Default value: 1 . - :type degree_of_parallelism: int - :param priority: The priority value to use for the current job. Lower - numbers have a higher priority. By default, a job has a priority of 1000. - This must be greater than 0. - :type priority: int - :param log_file_patterns: The list of log file name patterns to find in - the logFolder. '*' is the only matching character allowed. Example format: - jobExecution*.log or *mylog*.txt - :type log_file_patterns: list[str] - :param related: The recurring job relationship information properties. - :type related: - ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - """ - - _validation = { - 'type': {'required': True}, - 'properties': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'JobType'}, - 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, - 'name': {'key': 'name', 'type': 'str'}, - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, - 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, - } - - def __init__(self, **kwargs): - super(CreateJobParameters, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.degree_of_parallelism = kwargs.get('degree_of_parallelism', 1) - self.priority = kwargs.get('priority', None) - self.log_file_patterns = kwargs.get('log_file_patterns', None) - self.related = kwargs.get('related', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters_py3.py deleted file mode 100644 index 006f18d9fbe1..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_parameters_py3.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .base_job_parameters_py3 import BaseJobParameters - - -class CreateJobParameters(BaseJobParameters): - """The parameters used to submit a new Data Lake Analytics job. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: Required. The job specific properties. - :type properties: - ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - :param name: Required. The friendly name of the job to submit. - :type name: str - :param degree_of_parallelism: The degree of parallelism to use for this - job. This must be greater than 0, if set to less than 0 it will default to - 1. Default value: 1 . - :type degree_of_parallelism: int - :param priority: The priority value to use for the current job. Lower - numbers have a higher priority. By default, a job has a priority of 1000. - This must be greater than 0. - :type priority: int - :param log_file_patterns: The list of log file name patterns to find in - the logFolder. '*' is the only matching character allowed. Example format: - jobExecution*.log or *mylog*.txt - :type log_file_patterns: list[str] - :param related: The recurring job relationship information properties. - :type related: - ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - """ - - _validation = { - 'type': {'required': True}, - 'properties': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'JobType'}, - 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, - 'name': {'key': 'name', 'type': 'str'}, - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, - 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, - } - - def __init__(self, *, type, properties, name: str, degree_of_parallelism: int=1, priority: int=None, log_file_patterns=None, related=None, **kwargs) -> None: - super(CreateJobParameters, self).__init__(type=type, properties=properties, **kwargs) - self.name = name - self.degree_of_parallelism = degree_of_parallelism - self.priority = priority - self.log_file_patterns = log_file_patterns - self.related = related diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties.py deleted file mode 100644 index 60960f47bd98..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class CreateJobProperties(Model): - """The common Data Lake Analytics job properties for job submission. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CreateUSqlJobProperties, CreateScopeJobProperties - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'USql': 'CreateUSqlJobProperties', 'Scope': 'CreateScopeJobProperties'} - } - - def __init__(self, **kwargs): - super(CreateJobProperties, self).__init__(**kwargs) - self.runtime_version = kwargs.get('runtime_version', None) - self.script = kwargs.get('script', None) - self.type = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties_py3.py deleted file mode 100644 index 8e1e9942743b..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_job_properties_py3.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class CreateJobProperties(Model): - """The common Data Lake Analytics job properties for job submission. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CreateUSqlJobProperties, CreateScopeJobProperties - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'USql': 'CreateUSqlJobProperties', 'Scope': 'CreateScopeJobProperties'} - } - - def __init__(self, *, script: str, runtime_version: str=None, **kwargs) -> None: - super(CreateJobProperties, self).__init__(**kwargs) - self.runtime_version = runtime_version - self.script = script - self.type = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters.py deleted file mode 100644 index bf021107964a..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .create_job_parameters import CreateJobParameters - - -class CreateScopeJobParameters(CreateJobParameters): - """The parameters used to submit a new Data Lake Analytics Scope job. (Only - for use internally with Scope job type.). - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: Required. The job specific properties. - :type properties: - ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - :param name: Required. The friendly name of the job to submit. - :type name: str - :param degree_of_parallelism: The degree of parallelism to use for this - job. This must be greater than 0, if set to less than 0 it will default to - 1. Default value: 1 . - :type degree_of_parallelism: int - :param priority: The priority value to use for the current job. Lower - numbers have a higher priority. By default, a job has a priority of 1000. - This must be greater than 0. - :type priority: int - :param log_file_patterns: The list of log file name patterns to find in - the logFolder. '*' is the only matching character allowed. Example format: - jobExecution*.log or *mylog*.txt - :type log_file_patterns: list[str] - :param related: The recurring job relationship information properties. - :type related: - ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - :param tags: The key-value pairs used to add additional metadata to the - job information. - :type tags: dict[str, str] - """ - - _validation = { - 'type': {'required': True}, - 'properties': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'JobType'}, - 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, - 'name': {'key': 'name', 'type': 'str'}, - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, - 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - } - - def __init__(self, **kwargs): - super(CreateScopeJobParameters, self).__init__(**kwargs) - self.tags = kwargs.get('tags', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters_py3.py deleted file mode 100644 index 83f273d86478..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_parameters_py3.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .create_job_parameters_py3 import CreateJobParameters - - -class CreateScopeJobParameters(CreateJobParameters): - """The parameters used to submit a new Data Lake Analytics Scope job. (Only - for use internally with Scope job type.). - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :param properties: Required. The job specific properties. - :type properties: - ~azure.mgmt.datalake.analytics.job.models.CreateJobProperties - :param name: Required. The friendly name of the job to submit. - :type name: str - :param degree_of_parallelism: The degree of parallelism to use for this - job. This must be greater than 0, if set to less than 0 it will default to - 1. Default value: 1 . - :type degree_of_parallelism: int - :param priority: The priority value to use for the current job. Lower - numbers have a higher priority. By default, a job has a priority of 1000. - This must be greater than 0. - :type priority: int - :param log_file_patterns: The list of log file name patterns to find in - the logFolder. '*' is the only matching character allowed. Example format: - jobExecution*.log or *mylog*.txt - :type log_file_patterns: list[str] - :param related: The recurring job relationship information properties. - :type related: - ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - :param tags: The key-value pairs used to add additional metadata to the - job information. - :type tags: dict[str, str] - """ - - _validation = { - 'type': {'required': True}, - 'properties': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': 'type', 'type': 'JobType'}, - 'properties': {'key': 'properties', 'type': 'CreateJobProperties'}, - 'name': {'key': 'name', 'type': 'str'}, - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, - 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - } - - def __init__(self, *, type, properties, name: str, degree_of_parallelism: int=1, priority: int=None, log_file_patterns=None, related=None, tags=None, **kwargs) -> None: - super(CreateScopeJobParameters, self).__init__(type=type, properties=properties, name=name, degree_of_parallelism=degree_of_parallelism, priority=priority, log_file_patterns=log_file_patterns, related=related, **kwargs) - self.tags = tags diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties.py deleted file mode 100644 index 3c720218ff2d..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .create_job_properties import CreateJobProperties - - -class CreateScopeJobProperties(CreateJobProperties): - """Scope job properties used when submitting Scope jobs. (Only for use - internally with Scope job type.). - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :param resources: The list of resources that are required by the job. - :type resources: - list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] - :param notifier: The list of email addresses, separated by semi-colons, to - notify when the job reaches a terminal state. - :type notifier: str - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, - 'notifier': {'key': 'notifier', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(CreateScopeJobProperties, self).__init__(**kwargs) - self.resources = kwargs.get('resources', None) - self.notifier = kwargs.get('notifier', None) - self.type = 'Scope' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties_py3.py deleted file mode 100644 index e67a033dd8cd..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_scope_job_properties_py3.py +++ /dev/null @@ -1,54 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .create_job_properties_py3 import CreateJobProperties - - -class CreateScopeJobProperties(CreateJobProperties): - """Scope job properties used when submitting Scope jobs. (Only for use - internally with Scope job type.). - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :param resources: The list of resources that are required by the job. - :type resources: - list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] - :param notifier: The list of email addresses, separated by semi-colons, to - notify when the job reaches a terminal state. - :type notifier: str - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, - 'notifier': {'key': 'notifier', 'type': 'str'}, - } - - def __init__(self, *, script: str, runtime_version: str=None, resources=None, notifier: str=None, **kwargs) -> None: - super(CreateScopeJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) - self.resources = resources - self.notifier = notifier - self.type = 'Scope' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties.py deleted file mode 100644 index b2a399714c82..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .create_job_properties import CreateJobProperties - - -class CreateUSqlJobProperties(CreateJobProperties): - """U-SQL job properties used when submitting U-SQL jobs. - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :param compile_mode: The specific compilation mode for the job used during - execution. If this is not specified during submission, the server will - determine the optimal compilation mode. Possible values include: - 'Semantic', 'Full', 'SingleBox' - :type compile_mode: str or - ~azure.mgmt.datalake.analytics.job.models.CompileMode - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, - } - - def __init__(self, **kwargs): - super(CreateUSqlJobProperties, self).__init__(**kwargs) - self.compile_mode = kwargs.get('compile_mode', None) - self.type = 'USql' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties_py3.py deleted file mode 100644 index 946ada0909e5..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/create_usql_job_properties_py3.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .create_job_properties_py3 import CreateJobProperties - - -class CreateUSqlJobProperties(CreateJobProperties): - """U-SQL job properties used when submitting U-SQL jobs. - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :param compile_mode: The specific compilation mode for the job used during - execution. If this is not specified during submission, the server will - determine the optimal compilation mode. Possible values include: - 'Semantic', 'Full', 'SingleBox' - :type compile_mode: str or - ~azure.mgmt.datalake.analytics.job.models.CompileMode - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, - } - - def __init__(self, *, script: str, runtime_version: str=None, compile_mode=None, **kwargs) -> None: - super(CreateUSqlJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) - self.compile_mode = compile_mode - self.type = 'USql' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics.py deleted file mode 100644 index bdf4b3834333..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Diagnostics(Model): - """Error diagnostic information for failed jobs. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar message: The error message. - :vartype message: str - :ivar severity: The severity of the error. Possible values include: - 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning' - :vartype severity: str or - ~azure.mgmt.datalake.analytics.job.models.SeverityTypes - :ivar line_number: The line number the error occured on. - :vartype line_number: int - :ivar column_number: The column where the error occured. - :vartype column_number: int - :ivar start: The starting index of the error. - :vartype start: int - :ivar end: The ending index of the error. - :vartype end: int - """ - - _validation = { - 'message': {'readonly': True}, - 'severity': {'readonly': True}, - 'line_number': {'readonly': True}, - 'column_number': {'readonly': True}, - 'start': {'readonly': True}, - 'end': {'readonly': True}, - } - - _attribute_map = { - 'message': {'key': 'message', 'type': 'str'}, - 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, - 'line_number': {'key': 'lineNumber', 'type': 'int'}, - 'column_number': {'key': 'columnNumber', 'type': 'int'}, - 'start': {'key': 'start', 'type': 'int'}, - 'end': {'key': 'end', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(Diagnostics, self).__init__(**kwargs) - self.message = None - self.severity = None - self.line_number = None - self.column_number = None - self.start = None - self.end = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics_py3.py deleted file mode 100644 index 3887cba9542a..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/diagnostics_py3.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class Diagnostics(Model): - """Error diagnostic information for failed jobs. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar message: The error message. - :vartype message: str - :ivar severity: The severity of the error. Possible values include: - 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', 'UserWarning' - :vartype severity: str or - ~azure.mgmt.datalake.analytics.job.models.SeverityTypes - :ivar line_number: The line number the error occured on. - :vartype line_number: int - :ivar column_number: The column where the error occured. - :vartype column_number: int - :ivar start: The starting index of the error. - :vartype start: int - :ivar end: The ending index of the error. - :vartype end: int - """ - - _validation = { - 'message': {'readonly': True}, - 'severity': {'readonly': True}, - 'line_number': {'readonly': True}, - 'column_number': {'readonly': True}, - 'start': {'readonly': True}, - 'end': {'readonly': True}, - } - - _attribute_map = { - 'message': {'key': 'message', 'type': 'str'}, - 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, - 'line_number': {'key': 'lineNumber', 'type': 'int'}, - 'column_number': {'key': 'columnNumber', 'type': 'int'}, - 'start': {'key': 'start', 'type': 'int'}, - 'end': {'key': 'end', 'type': 'int'}, - } - - def __init__(self, **kwargs) -> None: - super(Diagnostics, self).__init__(**kwargs) - self.message = None - self.severity = None - self.line_number = None - self.column_number = None - self.start = None - self.end = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties.py deleted file mode 100644 index 5e8a85f0f4fd..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .job_properties import JobProperties - - -class HiveJobProperties(JobProperties): - """Hive job properties used when retrieving Hive jobs. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :ivar logs_location: The Hive logs location. - :vartype logs_location: str - :ivar output_location: The location of Hive job output files (both - execution output and results). - :vartype output_location: str - :ivar statement_count: The number of statements that will be run based on - the script. - :vartype statement_count: int - :ivar executed_statement_count: The number of statements that have been - run based on the script. - :vartype executed_statement_count: int - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - 'logs_location': {'readonly': True}, - 'output_location': {'readonly': True}, - 'statement_count': {'readonly': True}, - 'executed_statement_count': {'readonly': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'logs_location': {'key': 'logsLocation', 'type': 'str'}, - 'output_location': {'key': 'outputLocation', 'type': 'str'}, - 'statement_count': {'key': 'statementCount', 'type': 'int'}, - 'executed_statement_count': {'key': 'executedStatementCount', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(HiveJobProperties, self).__init__(**kwargs) - self.logs_location = None - self.output_location = None - self.statement_count = None - self.executed_statement_count = None - self.type = 'Hive' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties_py3.py deleted file mode 100644 index 8660637ca460..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/hive_job_properties_py3.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .job_properties_py3 import JobProperties - - -class HiveJobProperties(JobProperties): - """Hive job properties used when retrieving Hive jobs. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :ivar logs_location: The Hive logs location. - :vartype logs_location: str - :ivar output_location: The location of Hive job output files (both - execution output and results). - :vartype output_location: str - :ivar statement_count: The number of statements that will be run based on - the script. - :vartype statement_count: int - :ivar executed_statement_count: The number of statements that have been - run based on the script. - :vartype executed_statement_count: int - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - 'logs_location': {'readonly': True}, - 'output_location': {'readonly': True}, - 'statement_count': {'readonly': True}, - 'executed_statement_count': {'readonly': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'logs_location': {'key': 'logsLocation', 'type': 'str'}, - 'output_location': {'key': 'outputLocation', 'type': 'str'}, - 'statement_count': {'key': 'statementCount', 'type': 'int'}, - 'executed_statement_count': {'key': 'executedStatementCount', 'type': 'int'}, - } - - def __init__(self, *, script: str, runtime_version: str=None, **kwargs) -> None: - super(HiveJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) - self.logs_location = None - self.output_location = None - self.statement_count = None - self.executed_statement_count = None - self.type = 'Hive' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path.py deleted file mode 100644 index 1ec4c66441d3..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobDataPath(Model): - """A Data Lake Analytics job data path item. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar job_id: The ID of the job this data is for. - :vartype job_id: str - :ivar command: The command that this job data relates to. - :vartype command: str - :ivar paths: The list of paths to all of the job data. - :vartype paths: list[str] - """ - - _validation = { - 'job_id': {'readonly': True}, - 'command': {'readonly': True}, - 'paths': {'readonly': True}, - } - - _attribute_map = { - 'job_id': {'key': 'jobId', 'type': 'str'}, - 'command': {'key': 'command', 'type': 'str'}, - 'paths': {'key': 'paths', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(JobDataPath, self).__init__(**kwargs) - self.job_id = None - self.command = None - self.paths = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path_py3.py deleted file mode 100644 index 08f8667caf49..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_data_path_py3.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobDataPath(Model): - """A Data Lake Analytics job data path item. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar job_id: The ID of the job this data is for. - :vartype job_id: str - :ivar command: The command that this job data relates to. - :vartype command: str - :ivar paths: The list of paths to all of the job data. - :vartype paths: list[str] - """ - - _validation = { - 'job_id': {'readonly': True}, - 'command': {'readonly': True}, - 'paths': {'readonly': True}, - } - - _attribute_map = { - 'job_id': {'key': 'jobId', 'type': 'str'}, - 'command': {'key': 'command', 'type': 'str'}, - 'paths': {'key': 'paths', 'type': '[str]'}, - } - - def __init__(self, **kwargs) -> None: - super(JobDataPath, self).__init__(**kwargs) - self.job_id = None - self.command = None - self.paths = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details.py deleted file mode 100644 index c72f9491bc9d..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobErrorDetails(Model): - """The Data Lake Analytics job error details. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar error_id: The specific identifier for the type of error encountered - in the job. - :vartype error_id: str - :ivar severity: The severity level of the failure. Possible values - include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', - 'UserWarning' - :vartype severity: str or - ~azure.mgmt.datalake.analytics.job.models.SeverityTypes - :ivar source: The ultimate source of the failure (usually either SYSTEM or - USER). - :vartype source: str - :ivar message: The user friendly error message for the failure. - :vartype message: str - :ivar description: The error message description. - :vartype description: str - :ivar details: The details of the error message. - :vartype details: str - :ivar line_number: The specific line number in the job where the error - occured. - :vartype line_number: int - :ivar start_offset: The start offset in the job where the error was found - :vartype start_offset: int - :ivar end_offset: The end offset in the job where the error was found. - :vartype end_offset: int - :ivar resolution: The recommended resolution for the failure, if any. - :vartype resolution: str - :ivar file_path: The path to any supplemental error files, if any. - :vartype file_path: str - :ivar help_link: The link to MSDN or Azure help for this type of error, if - any. - :vartype help_link: str - :ivar internal_diagnostics: The internal diagnostic stack trace if the - user requesting the job error details has sufficient permissions it will - be retrieved, otherwise it will be empty. - :vartype internal_diagnostics: str - :ivar inner_error: The inner error of this specific job error message, if - any. - :vartype inner_error: - ~azure.mgmt.datalake.analytics.job.models.JobInnerError - """ - - _validation = { - 'error_id': {'readonly': True}, - 'severity': {'readonly': True}, - 'source': {'readonly': True}, - 'message': {'readonly': True}, - 'description': {'readonly': True}, - 'details': {'readonly': True}, - 'line_number': {'readonly': True}, - 'start_offset': {'readonly': True}, - 'end_offset': {'readonly': True}, - 'resolution': {'readonly': True}, - 'file_path': {'readonly': True}, - 'help_link': {'readonly': True}, - 'internal_diagnostics': {'readonly': True}, - 'inner_error': {'readonly': True}, - } - - _attribute_map = { - 'error_id': {'key': 'errorId', 'type': 'str'}, - 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, - 'source': {'key': 'source', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'line_number': {'key': 'lineNumber', 'type': 'int'}, - 'start_offset': {'key': 'startOffset', 'type': 'int'}, - 'end_offset': {'key': 'endOffset', 'type': 'int'}, - 'resolution': {'key': 'resolution', 'type': 'str'}, - 'file_path': {'key': 'filePath', 'type': 'str'}, - 'help_link': {'key': 'helpLink', 'type': 'str'}, - 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, - 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, - } - - def __init__(self, **kwargs): - super(JobErrorDetails, self).__init__(**kwargs) - self.error_id = None - self.severity = None - self.source = None - self.message = None - self.description = None - self.details = None - self.line_number = None - self.start_offset = None - self.end_offset = None - self.resolution = None - self.file_path = None - self.help_link = None - self.internal_diagnostics = None - self.inner_error = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details_py3.py deleted file mode 100644 index 6dbffa254dd3..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_error_details_py3.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobErrorDetails(Model): - """The Data Lake Analytics job error details. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar error_id: The specific identifier for the type of error encountered - in the job. - :vartype error_id: str - :ivar severity: The severity level of the failure. Possible values - include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', - 'UserWarning' - :vartype severity: str or - ~azure.mgmt.datalake.analytics.job.models.SeverityTypes - :ivar source: The ultimate source of the failure (usually either SYSTEM or - USER). - :vartype source: str - :ivar message: The user friendly error message for the failure. - :vartype message: str - :ivar description: The error message description. - :vartype description: str - :ivar details: The details of the error message. - :vartype details: str - :ivar line_number: The specific line number in the job where the error - occured. - :vartype line_number: int - :ivar start_offset: The start offset in the job where the error was found - :vartype start_offset: int - :ivar end_offset: The end offset in the job where the error was found. - :vartype end_offset: int - :ivar resolution: The recommended resolution for the failure, if any. - :vartype resolution: str - :ivar file_path: The path to any supplemental error files, if any. - :vartype file_path: str - :ivar help_link: The link to MSDN or Azure help for this type of error, if - any. - :vartype help_link: str - :ivar internal_diagnostics: The internal diagnostic stack trace if the - user requesting the job error details has sufficient permissions it will - be retrieved, otherwise it will be empty. - :vartype internal_diagnostics: str - :ivar inner_error: The inner error of this specific job error message, if - any. - :vartype inner_error: - ~azure.mgmt.datalake.analytics.job.models.JobInnerError - """ - - _validation = { - 'error_id': {'readonly': True}, - 'severity': {'readonly': True}, - 'source': {'readonly': True}, - 'message': {'readonly': True}, - 'description': {'readonly': True}, - 'details': {'readonly': True}, - 'line_number': {'readonly': True}, - 'start_offset': {'readonly': True}, - 'end_offset': {'readonly': True}, - 'resolution': {'readonly': True}, - 'file_path': {'readonly': True}, - 'help_link': {'readonly': True}, - 'internal_diagnostics': {'readonly': True}, - 'inner_error': {'readonly': True}, - } - - _attribute_map = { - 'error_id': {'key': 'errorId', 'type': 'str'}, - 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, - 'source': {'key': 'source', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'line_number': {'key': 'lineNumber', 'type': 'int'}, - 'start_offset': {'key': 'startOffset', 'type': 'int'}, - 'end_offset': {'key': 'endOffset', 'type': 'int'}, - 'resolution': {'key': 'resolution', 'type': 'str'}, - 'file_path': {'key': 'filePath', 'type': 'str'}, - 'help_link': {'key': 'helpLink', 'type': 'str'}, - 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, - 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, - } - - def __init__(self, **kwargs) -> None: - super(JobErrorDetails, self).__init__(**kwargs) - self.error_id = None - self.severity = None - self.source = None - self.message = None - self.description = None - self.details = None - self.line_number = None - self.start_offset = None - self.end_offset = None - self.resolution = None - self.file_path = None - self.help_link = None - self.internal_diagnostics = None - self.inner_error = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information.py deleted file mode 100644 index 72dd9118ae72..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information.py +++ /dev/null @@ -1,124 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .job_information_basic import JobInformationBasic - - -class JobInformation(JobInformationBasic): - """The extended Data Lake Analytics job information properties returned when - retrieving a specific job. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar job_id: The job's unique identifier (a GUID). - :vartype job_id: str - :param name: Required. The friendly name of the job. - :type name: str - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :ivar submitter: The user or account that submitted the job. - :vartype submitter: str - :param degree_of_parallelism: The degree of parallelism used for this job. - This must be greater than 0, if set to less than 0 it will default to 1. - Default value: 1 . - :type degree_of_parallelism: int - :param priority: The priority value for the current job. Lower numbers - have a higher priority. By default, a job has a priority of 1000. This - must be greater than 0. - :type priority: int - :ivar submit_time: The time the job was submitted to the service. - :vartype submit_time: datetime - :ivar start_time: The start time of the job. - :vartype start_time: datetime - :ivar end_time: The completion time of the job. - :vartype end_time: datetime - :ivar state: The job state. When the job is in the Ended state, refer to - Result and ErrorMessage for details. Possible values include: 'Accepted', - 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', - 'Starting', 'Paused', 'WaitingForCapacity' - :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState - :ivar result: The result of job execution or the current result of the - running job. Possible values include: 'None', 'Succeeded', 'Cancelled', - 'Failed' - :vartype result: str or - ~azure.mgmt.datalake.analytics.job.models.JobResult - :ivar log_folder: The log folder path to use in the following format: - adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. - :vartype log_folder: str - :param log_file_patterns: The list of log file name patterns to find in - the logFolder. '*' is the only matching character allowed. Example format: - jobExecution*.log or *mylog*.txt - :type log_file_patterns: list[str] - :param related: The recurring job relationship information properties. - :type related: - ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - :param tags: The key-value pairs used to add additional metadata to the - job information. (Only for use internally with Scope job type.) - :type tags: dict[str, str] - :ivar error_message: The error message details for the job, if the job - failed. - :vartype error_message: - list[~azure.mgmt.datalake.analytics.job.models.JobErrorDetails] - :ivar state_audit_records: The job state audit records, indicating when - various operations have been performed on this job. - :vartype state_audit_records: - list[~azure.mgmt.datalake.analytics.job.models.JobStateAuditRecord] - :param properties: Required. The job specific properties. - :type properties: ~azure.mgmt.datalake.analytics.job.models.JobProperties - """ - - _validation = { - 'job_id': {'readonly': True}, - 'name': {'required': True}, - 'type': {'required': True}, - 'submitter': {'readonly': True}, - 'submit_time': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'state': {'readonly': True}, - 'result': {'readonly': True}, - 'log_folder': {'readonly': True}, - 'error_message': {'readonly': True}, - 'state_audit_records': {'readonly': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'job_id': {'key': 'jobId', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'JobType'}, - 'submitter': {'key': 'submitter', 'type': 'str'}, - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobState'}, - 'result': {'key': 'result', 'type': 'JobResult'}, - 'log_folder': {'key': 'logFolder', 'type': 'str'}, - 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, - 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - 'error_message': {'key': 'errorMessage', 'type': '[JobErrorDetails]'}, - 'state_audit_records': {'key': 'stateAuditRecords', 'type': '[JobStateAuditRecord]'}, - 'properties': {'key': 'properties', 'type': 'JobProperties'}, - } - - def __init__(self, **kwargs): - super(JobInformation, self).__init__(**kwargs) - self.error_message = None - self.state_audit_records = None - self.properties = kwargs.get('properties', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic.py deleted file mode 100644 index 8b05115a2c66..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobInformationBasic(Model): - """The common Data Lake Analytics job information properties. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar job_id: The job's unique identifier (a GUID). - :vartype job_id: str - :param name: Required. The friendly name of the job. - :type name: str - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :ivar submitter: The user or account that submitted the job. - :vartype submitter: str - :param degree_of_parallelism: The degree of parallelism used for this job. - This must be greater than 0, if set to less than 0 it will default to 1. - Default value: 1 . - :type degree_of_parallelism: int - :param priority: The priority value for the current job. Lower numbers - have a higher priority. By default, a job has a priority of 1000. This - must be greater than 0. - :type priority: int - :ivar submit_time: The time the job was submitted to the service. - :vartype submit_time: datetime - :ivar start_time: The start time of the job. - :vartype start_time: datetime - :ivar end_time: The completion time of the job. - :vartype end_time: datetime - :ivar state: The job state. When the job is in the Ended state, refer to - Result and ErrorMessage for details. Possible values include: 'Accepted', - 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', - 'Starting', 'Paused', 'WaitingForCapacity' - :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState - :ivar result: The result of job execution or the current result of the - running job. Possible values include: 'None', 'Succeeded', 'Cancelled', - 'Failed' - :vartype result: str or - ~azure.mgmt.datalake.analytics.job.models.JobResult - :ivar log_folder: The log folder path to use in the following format: - adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. - :vartype log_folder: str - :param log_file_patterns: The list of log file name patterns to find in - the logFolder. '*' is the only matching character allowed. Example format: - jobExecution*.log or *mylog*.txt - :type log_file_patterns: list[str] - :param related: The recurring job relationship information properties. - :type related: - ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - :param tags: The key-value pairs used to add additional metadata to the - job information. (Only for use internally with Scope job type.) - :type tags: dict[str, str] - """ - - _validation = { - 'job_id': {'readonly': True}, - 'name': {'required': True}, - 'type': {'required': True}, - 'submitter': {'readonly': True}, - 'submit_time': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'state': {'readonly': True}, - 'result': {'readonly': True}, - 'log_folder': {'readonly': True}, - } - - _attribute_map = { - 'job_id': {'key': 'jobId', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'JobType'}, - 'submitter': {'key': 'submitter', 'type': 'str'}, - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobState'}, - 'result': {'key': 'result', 'type': 'JobResult'}, - 'log_folder': {'key': 'logFolder', 'type': 'str'}, - 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, - 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - } - - def __init__(self, **kwargs): - super(JobInformationBasic, self).__init__(**kwargs) - self.job_id = None - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) - self.submitter = None - self.degree_of_parallelism = kwargs.get('degree_of_parallelism', 1) - self.priority = kwargs.get('priority', None) - self.submit_time = None - self.start_time = None - self.end_time = None - self.state = None - self.result = None - self.log_folder = None - self.log_file_patterns = kwargs.get('log_file_patterns', None) - self.related = kwargs.get('related', None) - self.tags = kwargs.get('tags', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic_paged.py deleted file mode 100644 index 85a36a8c5510..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class JobInformationBasicPaged(Paged): - """ - A paging container for iterating over a list of :class:`JobInformationBasic ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[JobInformationBasic]'} - } - - def __init__(self, *args, **kwargs): - - super(JobInformationBasicPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic_py3.py deleted file mode 100644 index 9bf3be4006c6..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_basic_py3.py +++ /dev/null @@ -1,119 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobInformationBasic(Model): - """The common Data Lake Analytics job information properties. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar job_id: The job's unique identifier (a GUID). - :vartype job_id: str - :param name: Required. The friendly name of the job. - :type name: str - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :ivar submitter: The user or account that submitted the job. - :vartype submitter: str - :param degree_of_parallelism: The degree of parallelism used for this job. - This must be greater than 0, if set to less than 0 it will default to 1. - Default value: 1 . - :type degree_of_parallelism: int - :param priority: The priority value for the current job. Lower numbers - have a higher priority. By default, a job has a priority of 1000. This - must be greater than 0. - :type priority: int - :ivar submit_time: The time the job was submitted to the service. - :vartype submit_time: datetime - :ivar start_time: The start time of the job. - :vartype start_time: datetime - :ivar end_time: The completion time of the job. - :vartype end_time: datetime - :ivar state: The job state. When the job is in the Ended state, refer to - Result and ErrorMessage for details. Possible values include: 'Accepted', - 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', - 'Starting', 'Paused', 'WaitingForCapacity' - :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState - :ivar result: The result of job execution or the current result of the - running job. Possible values include: 'None', 'Succeeded', 'Cancelled', - 'Failed' - :vartype result: str or - ~azure.mgmt.datalake.analytics.job.models.JobResult - :ivar log_folder: The log folder path to use in the following format: - adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. - :vartype log_folder: str - :param log_file_patterns: The list of log file name patterns to find in - the logFolder. '*' is the only matching character allowed. Example format: - jobExecution*.log or *mylog*.txt - :type log_file_patterns: list[str] - :param related: The recurring job relationship information properties. - :type related: - ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - :param tags: The key-value pairs used to add additional metadata to the - job information. (Only for use internally with Scope job type.) - :type tags: dict[str, str] - """ - - _validation = { - 'job_id': {'readonly': True}, - 'name': {'required': True}, - 'type': {'required': True}, - 'submitter': {'readonly': True}, - 'submit_time': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'state': {'readonly': True}, - 'result': {'readonly': True}, - 'log_folder': {'readonly': True}, - } - - _attribute_map = { - 'job_id': {'key': 'jobId', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'JobType'}, - 'submitter': {'key': 'submitter', 'type': 'str'}, - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobState'}, - 'result': {'key': 'result', 'type': 'JobResult'}, - 'log_folder': {'key': 'logFolder', 'type': 'str'}, - 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, - 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - } - - def __init__(self, *, name: str, type, degree_of_parallelism: int=1, priority: int=None, log_file_patterns=None, related=None, tags=None, **kwargs) -> None: - super(JobInformationBasic, self).__init__(**kwargs) - self.job_id = None - self.name = name - self.type = type - self.submitter = None - self.degree_of_parallelism = degree_of_parallelism - self.priority = priority - self.submit_time = None - self.start_time = None - self.end_time = None - self.state = None - self.result = None - self.log_folder = None - self.log_file_patterns = log_file_patterns - self.related = related - self.tags = tags diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_py3.py deleted file mode 100644 index 367249c602f3..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_information_py3.py +++ /dev/null @@ -1,124 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .job_information_basic_py3 import JobInformationBasic - - -class JobInformation(JobInformationBasic): - """The extended Data Lake Analytics job information properties returned when - retrieving a specific job. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar job_id: The job's unique identifier (a GUID). - :vartype job_id: str - :param name: Required. The friendly name of the job. - :type name: str - :param type: Required. The job type of the current job (Hive, USql, or - Scope (for internal use only)). Possible values include: 'USql', 'Hive', - 'Scope' - :type type: str or ~azure.mgmt.datalake.analytics.job.models.JobType - :ivar submitter: The user or account that submitted the job. - :vartype submitter: str - :param degree_of_parallelism: The degree of parallelism used for this job. - This must be greater than 0, if set to less than 0 it will default to 1. - Default value: 1 . - :type degree_of_parallelism: int - :param priority: The priority value for the current job. Lower numbers - have a higher priority. By default, a job has a priority of 1000. This - must be greater than 0. - :type priority: int - :ivar submit_time: The time the job was submitted to the service. - :vartype submit_time: datetime - :ivar start_time: The start time of the job. - :vartype start_time: datetime - :ivar end_time: The completion time of the job. - :vartype end_time: datetime - :ivar state: The job state. When the job is in the Ended state, refer to - Result and ErrorMessage for details. Possible values include: 'Accepted', - 'Compiling', 'Ended', 'New', 'Queued', 'Running', 'Scheduling', - 'Starting', 'Paused', 'WaitingForCapacity' - :vartype state: str or ~azure.mgmt.datalake.analytics.job.models.JobState - :ivar result: The result of job execution or the current result of the - running job. Possible values include: 'None', 'Succeeded', 'Cancelled', - 'Failed' - :vartype result: str or - ~azure.mgmt.datalake.analytics.job.models.JobResult - :ivar log_folder: The log folder path to use in the following format: - adl://.azuredatalakestore.net/system/jobservice/jobs/Usql/2016/03/13/17/18/5fe51957-93bc-4de0-8ddc-c5a4753b068b/logs/. - :vartype log_folder: str - :param log_file_patterns: The list of log file name patterns to find in - the logFolder. '*' is the only matching character allowed. Example format: - jobExecution*.log or *mylog*.txt - :type log_file_patterns: list[str] - :param related: The recurring job relationship information properties. - :type related: - ~azure.mgmt.datalake.analytics.job.models.JobRelationshipProperties - :param tags: The key-value pairs used to add additional metadata to the - job information. (Only for use internally with Scope job type.) - :type tags: dict[str, str] - :ivar error_message: The error message details for the job, if the job - failed. - :vartype error_message: - list[~azure.mgmt.datalake.analytics.job.models.JobErrorDetails] - :ivar state_audit_records: The job state audit records, indicating when - various operations have been performed on this job. - :vartype state_audit_records: - list[~azure.mgmt.datalake.analytics.job.models.JobStateAuditRecord] - :param properties: Required. The job specific properties. - :type properties: ~azure.mgmt.datalake.analytics.job.models.JobProperties - """ - - _validation = { - 'job_id': {'readonly': True}, - 'name': {'required': True}, - 'type': {'required': True}, - 'submitter': {'readonly': True}, - 'submit_time': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'state': {'readonly': True}, - 'result': {'readonly': True}, - 'log_folder': {'readonly': True}, - 'error_message': {'readonly': True}, - 'state_audit_records': {'readonly': True}, - 'properties': {'required': True}, - } - - _attribute_map = { - 'job_id': {'key': 'jobId', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'JobType'}, - 'submitter': {'key': 'submitter', 'type': 'str'}, - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'submit_time': {'key': 'submitTime', 'type': 'iso-8601'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'state': {'key': 'state', 'type': 'JobState'}, - 'result': {'key': 'result', 'type': 'JobResult'}, - 'log_folder': {'key': 'logFolder', 'type': 'str'}, - 'log_file_patterns': {'key': 'logFilePatterns', 'type': '[str]'}, - 'related': {'key': 'related', 'type': 'JobRelationshipProperties'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - 'error_message': {'key': 'errorMessage', 'type': '[JobErrorDetails]'}, - 'state_audit_records': {'key': 'stateAuditRecords', 'type': '[JobStateAuditRecord]'}, - 'properties': {'key': 'properties', 'type': 'JobProperties'}, - } - - def __init__(self, *, name: str, type, properties, degree_of_parallelism: int=1, priority: int=None, log_file_patterns=None, related=None, tags=None, **kwargs) -> None: - super(JobInformation, self).__init__(name=name, type=type, degree_of_parallelism=degree_of_parallelism, priority=priority, log_file_patterns=log_file_patterns, related=related, tags=tags, **kwargs) - self.error_message = None - self.state_audit_records = None - self.properties = properties diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error.py deleted file mode 100644 index ebf20a919050..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobInnerError(Model): - """The Data Lake Analytics job error details. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar error_id: The specific identifier for the type of error encountered - in the job. - :vartype error_id: str - :ivar severity: The severity level of the failure. Possible values - include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', - 'UserWarning' - :vartype severity: str or - ~azure.mgmt.datalake.analytics.job.models.SeverityTypes - :ivar source: The ultimate source of the failure (usually either SYSTEM or - USER). - :vartype source: str - :ivar message: The user friendly error message for the failure. - :vartype message: str - :ivar description: The error message description. - :vartype description: str - :ivar details: The details of the error message. - :vartype details: str - :ivar diagnostic_code: The diagnostic error code. - :vartype diagnostic_code: int - :ivar component: The component that failed. - :vartype component: str - :ivar resolution: The recommended resolution for the failure, if any. - :vartype resolution: str - :ivar help_link: The link to MSDN or Azure help for this type of error, if - any. - :vartype help_link: str - :ivar internal_diagnostics: The internal diagnostic stack trace if the - user requesting the job error details has sufficient permissions it will - be retrieved, otherwise it will be empty. - :vartype internal_diagnostics: str - :ivar inner_error: The inner error of this specific job error message, if - any. - :vartype inner_error: - ~azure.mgmt.datalake.analytics.job.models.JobInnerError - """ - - _validation = { - 'error_id': {'readonly': True}, - 'severity': {'readonly': True}, - 'source': {'readonly': True}, - 'message': {'readonly': True}, - 'description': {'readonly': True}, - 'details': {'readonly': True}, - 'diagnostic_code': {'readonly': True}, - 'component': {'readonly': True}, - 'resolution': {'readonly': True}, - 'help_link': {'readonly': True}, - 'internal_diagnostics': {'readonly': True}, - 'inner_error': {'readonly': True}, - } - - _attribute_map = { - 'error_id': {'key': 'errorId', 'type': 'str'}, - 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, - 'source': {'key': 'source', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'diagnostic_code': {'key': 'diagnosticCode', 'type': 'int'}, - 'component': {'key': 'component', 'type': 'str'}, - 'resolution': {'key': 'resolution', 'type': 'str'}, - 'help_link': {'key': 'helpLink', 'type': 'str'}, - 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, - 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, - } - - def __init__(self, **kwargs): - super(JobInnerError, self).__init__(**kwargs) - self.error_id = None - self.severity = None - self.source = None - self.message = None - self.description = None - self.details = None - self.diagnostic_code = None - self.component = None - self.resolution = None - self.help_link = None - self.internal_diagnostics = None - self.inner_error = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error_py3.py deleted file mode 100644 index 78de225efe91..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_inner_error_py3.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobInnerError(Model): - """The Data Lake Analytics job error details. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar error_id: The specific identifier for the type of error encountered - in the job. - :vartype error_id: str - :ivar severity: The severity level of the failure. Possible values - include: 'Warning', 'Error', 'Info', 'SevereWarning', 'Deprecated', - 'UserWarning' - :vartype severity: str or - ~azure.mgmt.datalake.analytics.job.models.SeverityTypes - :ivar source: The ultimate source of the failure (usually either SYSTEM or - USER). - :vartype source: str - :ivar message: The user friendly error message for the failure. - :vartype message: str - :ivar description: The error message description. - :vartype description: str - :ivar details: The details of the error message. - :vartype details: str - :ivar diagnostic_code: The diagnostic error code. - :vartype diagnostic_code: int - :ivar component: The component that failed. - :vartype component: str - :ivar resolution: The recommended resolution for the failure, if any. - :vartype resolution: str - :ivar help_link: The link to MSDN or Azure help for this type of error, if - any. - :vartype help_link: str - :ivar internal_diagnostics: The internal diagnostic stack trace if the - user requesting the job error details has sufficient permissions it will - be retrieved, otherwise it will be empty. - :vartype internal_diagnostics: str - :ivar inner_error: The inner error of this specific job error message, if - any. - :vartype inner_error: - ~azure.mgmt.datalake.analytics.job.models.JobInnerError - """ - - _validation = { - 'error_id': {'readonly': True}, - 'severity': {'readonly': True}, - 'source': {'readonly': True}, - 'message': {'readonly': True}, - 'description': {'readonly': True}, - 'details': {'readonly': True}, - 'diagnostic_code': {'readonly': True}, - 'component': {'readonly': True}, - 'resolution': {'readonly': True}, - 'help_link': {'readonly': True}, - 'internal_diagnostics': {'readonly': True}, - 'inner_error': {'readonly': True}, - } - - _attribute_map = { - 'error_id': {'key': 'errorId', 'type': 'str'}, - 'severity': {'key': 'severity', 'type': 'SeverityTypes'}, - 'source': {'key': 'source', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'diagnostic_code': {'key': 'diagnosticCode', 'type': 'int'}, - 'component': {'key': 'component', 'type': 'str'}, - 'resolution': {'key': 'resolution', 'type': 'str'}, - 'help_link': {'key': 'helpLink', 'type': 'str'}, - 'internal_diagnostics': {'key': 'internalDiagnostics', 'type': 'str'}, - 'inner_error': {'key': 'innerError', 'type': 'JobInnerError'}, - } - - def __init__(self, **kwargs) -> None: - super(JobInnerError, self).__init__(**kwargs) - self.error_id = None - self.severity = None - self.source = None - self.message = None - self.description = None - self.details = None - self.diagnostic_code = None - self.component = None - self.resolution = None - self.help_link = None - self.internal_diagnostics = None - self.inner_error = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information.py deleted file mode 100644 index 763a8ac89801..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information.py +++ /dev/null @@ -1,103 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobPipelineInformation(Model): - """Job Pipeline Information, showing the relationship of jobs and recurrences - of those jobs in a pipeline. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar pipeline_id: The job relationship pipeline identifier (a GUID). - :vartype pipeline_id: str - :ivar pipeline_name: The friendly name of the job relationship pipeline, - which does not need to be unique. - :vartype pipeline_name: str - :ivar pipeline_uri: The pipeline uri, unique, links to the originating - service for this pipeline. - :vartype pipeline_uri: str - :ivar num_jobs_failed: The number of jobs in this pipeline that have - failed. - :vartype num_jobs_failed: int - :ivar num_jobs_canceled: The number of jobs in this pipeline that have - been canceled. - :vartype num_jobs_canceled: int - :ivar num_jobs_succeeded: The number of jobs in this pipeline that have - succeeded. - :vartype num_jobs_succeeded: int - :ivar au_hours_failed: The number of job execution hours that resulted in - failed jobs. - :vartype au_hours_failed: float - :ivar au_hours_canceled: The number of job execution hours that resulted - in canceled jobs. - :vartype au_hours_canceled: float - :ivar au_hours_succeeded: The number of job execution hours that resulted - in successful jobs. - :vartype au_hours_succeeded: float - :ivar last_submit_time: The last time a job in this pipeline was - submitted. - :vartype last_submit_time: datetime - :ivar runs: The list of recurrence identifiers representing each run of - this pipeline. - :vartype runs: - list[~azure.mgmt.datalake.analytics.job.models.JobPipelineRunInformation] - :ivar recurrences: The list of recurrence identifiers representing each - run of this pipeline. - :vartype recurrences: list[str] - """ - - _validation = { - 'pipeline_id': {'readonly': True}, - 'pipeline_name': {'readonly': True, 'max_length': 260}, - 'pipeline_uri': {'readonly': True}, - 'num_jobs_failed': {'readonly': True}, - 'num_jobs_canceled': {'readonly': True}, - 'num_jobs_succeeded': {'readonly': True}, - 'au_hours_failed': {'readonly': True}, - 'au_hours_canceled': {'readonly': True}, - 'au_hours_succeeded': {'readonly': True}, - 'last_submit_time': {'readonly': True}, - 'runs': {'readonly': True}, - 'recurrences': {'readonly': True}, - } - - _attribute_map = { - 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, - 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, - 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, - 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, - 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, - 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, - 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, - 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, - 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, - 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, - 'runs': {'key': 'runs', 'type': '[JobPipelineRunInformation]'}, - 'recurrences': {'key': 'recurrences', 'type': '[str]'}, - } - - def __init__(self, **kwargs): - super(JobPipelineInformation, self).__init__(**kwargs) - self.pipeline_id = None - self.pipeline_name = None - self.pipeline_uri = None - self.num_jobs_failed = None - self.num_jobs_canceled = None - self.num_jobs_succeeded = None - self.au_hours_failed = None - self.au_hours_canceled = None - self.au_hours_succeeded = None - self.last_submit_time = None - self.runs = None - self.recurrences = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information_paged.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information_paged.py deleted file mode 100644 index c33529c88e6b..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information_paged.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.paging import Paged - - -class JobPipelineInformationPaged(Paged): - """ - A paging container for iterating over a list of :class:`JobPipelineInformation ` object - """ - - _attribute_map = { - 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[JobPipelineInformation]'} - } - - def __init__(self, *args, **kwargs): - - super(JobPipelineInformationPaged, self).__init__(*args, **kwargs) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information_py3.py deleted file mode 100644 index 3f088103e097..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_information_py3.py +++ /dev/null @@ -1,103 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobPipelineInformation(Model): - """Job Pipeline Information, showing the relationship of jobs and recurrences - of those jobs in a pipeline. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar pipeline_id: The job relationship pipeline identifier (a GUID). - :vartype pipeline_id: str - :ivar pipeline_name: The friendly name of the job relationship pipeline, - which does not need to be unique. - :vartype pipeline_name: str - :ivar pipeline_uri: The pipeline uri, unique, links to the originating - service for this pipeline. - :vartype pipeline_uri: str - :ivar num_jobs_failed: The number of jobs in this pipeline that have - failed. - :vartype num_jobs_failed: int - :ivar num_jobs_canceled: The number of jobs in this pipeline that have - been canceled. - :vartype num_jobs_canceled: int - :ivar num_jobs_succeeded: The number of jobs in this pipeline that have - succeeded. - :vartype num_jobs_succeeded: int - :ivar au_hours_failed: The number of job execution hours that resulted in - failed jobs. - :vartype au_hours_failed: float - :ivar au_hours_canceled: The number of job execution hours that resulted - in canceled jobs. - :vartype au_hours_canceled: float - :ivar au_hours_succeeded: The number of job execution hours that resulted - in successful jobs. - :vartype au_hours_succeeded: float - :ivar last_submit_time: The last time a job in this pipeline was - submitted. - :vartype last_submit_time: datetime - :ivar runs: The list of recurrence identifiers representing each run of - this pipeline. - :vartype runs: - list[~azure.mgmt.datalake.analytics.job.models.JobPipelineRunInformation] - :ivar recurrences: The list of recurrence identifiers representing each - run of this pipeline. - :vartype recurrences: list[str] - """ - - _validation = { - 'pipeline_id': {'readonly': True}, - 'pipeline_name': {'readonly': True, 'max_length': 260}, - 'pipeline_uri': {'readonly': True}, - 'num_jobs_failed': {'readonly': True}, - 'num_jobs_canceled': {'readonly': True}, - 'num_jobs_succeeded': {'readonly': True}, - 'au_hours_failed': {'readonly': True}, - 'au_hours_canceled': {'readonly': True}, - 'au_hours_succeeded': {'readonly': True}, - 'last_submit_time': {'readonly': True}, - 'runs': {'readonly': True}, - 'recurrences': {'readonly': True}, - } - - _attribute_map = { - 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, - 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, - 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, - 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, - 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, - 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, - 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, - 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, - 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, - 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, - 'runs': {'key': 'runs', 'type': '[JobPipelineRunInformation]'}, - 'recurrences': {'key': 'recurrences', 'type': '[str]'}, - } - - def __init__(self, **kwargs) -> None: - super(JobPipelineInformation, self).__init__(**kwargs) - self.pipeline_id = None - self.pipeline_name = None - self.pipeline_uri = None - self.num_jobs_failed = None - self.num_jobs_canceled = None - self.num_jobs_succeeded = None - self.au_hours_failed = None - self.au_hours_canceled = None - self.au_hours_succeeded = None - self.last_submit_time = None - self.runs = None - self.recurrences = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information.py deleted file mode 100644 index 27e7818219e7..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobPipelineRunInformation(Model): - """Run info for a specific job pipeline. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar run_id: The run identifier of an instance of pipeline executions (a - GUID). - :vartype run_id: str - :ivar last_submit_time: The time this instance was last submitted. - :vartype last_submit_time: datetime - """ - - _validation = { - 'run_id': {'readonly': True}, - 'last_submit_time': {'readonly': True}, - } - - _attribute_map = { - 'run_id': {'key': 'runId', 'type': 'str'}, - 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, - } - - def __init__(self, **kwargs): - super(JobPipelineRunInformation, self).__init__(**kwargs) - self.run_id = None - self.last_submit_time = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information_py3.py deleted file mode 100644 index cefd0d4ab04c..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_pipeline_run_information_py3.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobPipelineRunInformation(Model): - """Run info for a specific job pipeline. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar run_id: The run identifier of an instance of pipeline executions (a - GUID). - :vartype run_id: str - :ivar last_submit_time: The time this instance was last submitted. - :vartype last_submit_time: datetime - """ - - _validation = { - 'run_id': {'readonly': True}, - 'last_submit_time': {'readonly': True}, - } - - _attribute_map = { - 'run_id': {'key': 'runId', 'type': 'str'}, - 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, - } - - def __init__(self, **kwargs) -> None: - super(JobPipelineRunInformation, self).__init__(**kwargs) - self.run_id = None - self.last_submit_time = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties.py deleted file mode 100644 index 67330098258f..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobProperties(Model): - """The common Data Lake Analytics job properties. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: USqlJobProperties, HiveJobProperties, ScopeJobProperties - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'USql': 'USqlJobProperties', 'Hive': 'HiveJobProperties', 'Scope': 'ScopeJobProperties'} - } - - def __init__(self, **kwargs): - super(JobProperties, self).__init__(**kwargs) - self.runtime_version = kwargs.get('runtime_version', None) - self.script = kwargs.get('script', None) - self.type = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties_py3.py deleted file mode 100644 index 7470e6271f5b..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_properties_py3.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobProperties(Model): - """The common Data Lake Analytics job properties. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: USqlJobProperties, HiveJobProperties, ScopeJobProperties - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'USql': 'USqlJobProperties', 'Hive': 'HiveJobProperties', 'Scope': 'ScopeJobProperties'} - } - - def __init__(self, *, script: str, runtime_version: str=None, **kwargs) -> None: - super(JobProperties, self).__init__(**kwargs) - self.runtime_version = runtime_version - self.script = script - self.type = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information.py deleted file mode 100644 index 22603aaffdad..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobRecurrenceInformation(Model): - """Recurrence job information for a specific recurrence. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar recurrence_id: The recurrence identifier (a GUID), unique per - activity/script, regardless of iterations. This is something to link - different occurrences of the same job together. - :vartype recurrence_id: str - :ivar recurrence_name: The recurrence name, user friendly name for the - correlation between jobs. - :vartype recurrence_name: str - :ivar num_jobs_failed: The number of jobs in this recurrence that have - failed. - :vartype num_jobs_failed: int - :ivar num_jobs_canceled: The number of jobs in this recurrence that have - been canceled. - :vartype num_jobs_canceled: int - :ivar num_jobs_succeeded: The number of jobs in this recurrence that have - succeeded. - :vartype num_jobs_succeeded: int - :ivar au_hours_failed: The number of job execution hours that resulted in - failed jobs. - :vartype au_hours_failed: float - :ivar au_hours_canceled: The number of job execution hours that resulted - in canceled jobs. - :vartype au_hours_canceled: float - :ivar au_hours_succeeded: The number of job execution hours that resulted - in successful jobs. - :vartype au_hours_succeeded: float - :ivar last_submit_time: The last time a job in this recurrence was - submitted. - :vartype last_submit_time: datetime - """ - - _validation = { - 'recurrence_id': {'readonly': True}, - 'recurrence_name': {'readonly': True}, - 'num_jobs_failed': {'readonly': True}, - 'num_jobs_canceled': {'readonly': True}, - 'num_jobs_succeeded': {'readonly': True}, - 'au_hours_failed': {'readonly': True}, - 'au_hours_canceled': {'readonly': True}, - 'au_hours_succeeded': {'readonly': True}, - 'last_submit_time': {'readonly': True}, - } - - _attribute_map = { - 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, - 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, - 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, - 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, - 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, - 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, - 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, - 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, - 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, - } - - def __init__(self, **kwargs): - super(JobRecurrenceInformation, self).__init__(**kwargs) - self.recurrence_id = None - self.recurrence_name = None - self.num_jobs_failed = None - self.num_jobs_canceled = None - self.num_jobs_succeeded = None - self.au_hours_failed = None - self.au_hours_canceled = None - self.au_hours_succeeded = None - self.last_submit_time = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information_py3.py deleted file mode 100644 index 2d601ac83018..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_recurrence_information_py3.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobRecurrenceInformation(Model): - """Recurrence job information for a specific recurrence. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar recurrence_id: The recurrence identifier (a GUID), unique per - activity/script, regardless of iterations. This is something to link - different occurrences of the same job together. - :vartype recurrence_id: str - :ivar recurrence_name: The recurrence name, user friendly name for the - correlation between jobs. - :vartype recurrence_name: str - :ivar num_jobs_failed: The number of jobs in this recurrence that have - failed. - :vartype num_jobs_failed: int - :ivar num_jobs_canceled: The number of jobs in this recurrence that have - been canceled. - :vartype num_jobs_canceled: int - :ivar num_jobs_succeeded: The number of jobs in this recurrence that have - succeeded. - :vartype num_jobs_succeeded: int - :ivar au_hours_failed: The number of job execution hours that resulted in - failed jobs. - :vartype au_hours_failed: float - :ivar au_hours_canceled: The number of job execution hours that resulted - in canceled jobs. - :vartype au_hours_canceled: float - :ivar au_hours_succeeded: The number of job execution hours that resulted - in successful jobs. - :vartype au_hours_succeeded: float - :ivar last_submit_time: The last time a job in this recurrence was - submitted. - :vartype last_submit_time: datetime - """ - - _validation = { - 'recurrence_id': {'readonly': True}, - 'recurrence_name': {'readonly': True}, - 'num_jobs_failed': {'readonly': True}, - 'num_jobs_canceled': {'readonly': True}, - 'num_jobs_succeeded': {'readonly': True}, - 'au_hours_failed': {'readonly': True}, - 'au_hours_canceled': {'readonly': True}, - 'au_hours_succeeded': {'readonly': True}, - 'last_submit_time': {'readonly': True}, - } - - _attribute_map = { - 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, - 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, - 'num_jobs_failed': {'key': 'numJobsFailed', 'type': 'int'}, - 'num_jobs_canceled': {'key': 'numJobsCanceled', 'type': 'int'}, - 'num_jobs_succeeded': {'key': 'numJobsSucceeded', 'type': 'int'}, - 'au_hours_failed': {'key': 'auHoursFailed', 'type': 'float'}, - 'au_hours_canceled': {'key': 'auHoursCanceled', 'type': 'float'}, - 'au_hours_succeeded': {'key': 'auHoursSucceeded', 'type': 'float'}, - 'last_submit_time': {'key': 'lastSubmitTime', 'type': 'iso-8601'}, - } - - def __init__(self, **kwargs) -> None: - super(JobRecurrenceInformation, self).__init__(**kwargs) - self.recurrence_id = None - self.recurrence_name = None - self.num_jobs_failed = None - self.num_jobs_canceled = None - self.num_jobs_succeeded = None - self.au_hours_failed = None - self.au_hours_canceled = None - self.au_hours_succeeded = None - self.last_submit_time = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties.py deleted file mode 100644 index f407db458a81..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobRelationshipProperties(Model): - """Job relationship information properties including pipeline information, - correlation information, etc. - - All required parameters must be populated in order to send to Azure. - - :param pipeline_id: The job relationship pipeline identifier (a GUID). - :type pipeline_id: str - :param pipeline_name: The friendly name of the job relationship pipeline, - which does not need to be unique. - :type pipeline_name: str - :param pipeline_uri: The pipeline uri, unique, links to the originating - service for this pipeline. - :type pipeline_uri: str - :param run_id: The run identifier (a GUID), unique identifier of the - iteration of this pipeline. - :type run_id: str - :param recurrence_id: Required. The recurrence identifier (a GUID), unique - per activity/script, regardless of iterations. This is something to link - different occurrences of the same job together. - :type recurrence_id: str - :param recurrence_name: The recurrence name, user friendly name for the - correlation between jobs. - :type recurrence_name: str - """ - - _validation = { - 'pipeline_name': {'max_length': 260}, - 'recurrence_id': {'required': True}, - 'recurrence_name': {'max_length': 260}, - } - - _attribute_map = { - 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, - 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, - 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, - 'run_id': {'key': 'runId', 'type': 'str'}, - 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, - 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(JobRelationshipProperties, self).__init__(**kwargs) - self.pipeline_id = kwargs.get('pipeline_id', None) - self.pipeline_name = kwargs.get('pipeline_name', None) - self.pipeline_uri = kwargs.get('pipeline_uri', None) - self.run_id = kwargs.get('run_id', None) - self.recurrence_id = kwargs.get('recurrence_id', None) - self.recurrence_name = kwargs.get('recurrence_name', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties_py3.py deleted file mode 100644 index 876bdc690718..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_relationship_properties_py3.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobRelationshipProperties(Model): - """Job relationship information properties including pipeline information, - correlation information, etc. - - All required parameters must be populated in order to send to Azure. - - :param pipeline_id: The job relationship pipeline identifier (a GUID). - :type pipeline_id: str - :param pipeline_name: The friendly name of the job relationship pipeline, - which does not need to be unique. - :type pipeline_name: str - :param pipeline_uri: The pipeline uri, unique, links to the originating - service for this pipeline. - :type pipeline_uri: str - :param run_id: The run identifier (a GUID), unique identifier of the - iteration of this pipeline. - :type run_id: str - :param recurrence_id: Required. The recurrence identifier (a GUID), unique - per activity/script, regardless of iterations. This is something to link - different occurrences of the same job together. - :type recurrence_id: str - :param recurrence_name: The recurrence name, user friendly name for the - correlation between jobs. - :type recurrence_name: str - """ - - _validation = { - 'pipeline_name': {'max_length': 260}, - 'recurrence_id': {'required': True}, - 'recurrence_name': {'max_length': 260}, - } - - _attribute_map = { - 'pipeline_id': {'key': 'pipelineId', 'type': 'str'}, - 'pipeline_name': {'key': 'pipelineName', 'type': 'str'}, - 'pipeline_uri': {'key': 'pipelineUri', 'type': 'str'}, - 'run_id': {'key': 'runId', 'type': 'str'}, - 'recurrence_id': {'key': 'recurrenceId', 'type': 'str'}, - 'recurrence_name': {'key': 'recurrenceName', 'type': 'str'}, - } - - def __init__(self, *, recurrence_id: str, pipeline_id: str=None, pipeline_name: str=None, pipeline_uri: str=None, run_id: str=None, recurrence_name: str=None, **kwargs) -> None: - super(JobRelationshipProperties, self).__init__(**kwargs) - self.pipeline_id = pipeline_id - self.pipeline_name = pipeline_name - self.pipeline_uri = pipeline_uri - self.run_id = run_id - self.recurrence_id = recurrence_id - self.recurrence_name = recurrence_name diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource.py deleted file mode 100644 index aa33c333446a..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobResource(Model): - """The Data Lake Analytics job resources. - - :param name: The name of the resource. - :type name: str - :param resource_path: The path to the resource. - :type resource_path: str - :param type: The job resource type. Possible values include: - 'VertexResource', 'JobManagerResource', 'StatisticsResource', - 'VertexResourceInUserFolder', 'JobManagerResourceInUserFolder', - 'StatisticsResourceInUserFolder' - :type type: str or - ~azure.mgmt.datalake.analytics.job.models.JobResourceType - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'resource_path': {'key': 'resourcePath', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'JobResourceType'}, - } - - def __init__(self, **kwargs): - super(JobResource, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.resource_path = kwargs.get('resource_path', None) - self.type = kwargs.get('type', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource_py3.py deleted file mode 100644 index d0d2b7c2cd28..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_resource_py3.py +++ /dev/null @@ -1,40 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobResource(Model): - """The Data Lake Analytics job resources. - - :param name: The name of the resource. - :type name: str - :param resource_path: The path to the resource. - :type resource_path: str - :param type: The job resource type. Possible values include: - 'VertexResource', 'JobManagerResource', 'StatisticsResource', - 'VertexResourceInUserFolder', 'JobManagerResourceInUserFolder', - 'StatisticsResourceInUserFolder' - :type type: str or - ~azure.mgmt.datalake.analytics.job.models.JobResourceType - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'resource_path': {'key': 'resourcePath', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'JobResourceType'}, - } - - def __init__(self, *, name: str=None, resource_path: str=None, type=None, **kwargs) -> None: - super(JobResource, self).__init__(**kwargs) - self.name = name - self.resource_path = resource_path - self.type = type diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record.py deleted file mode 100644 index ec299db698bc..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobStateAuditRecord(Model): - """The Data Lake Analytics job state audit records for tracking the lifecycle - of a job. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar new_state: The new state the job is in. - :vartype new_state: str - :ivar time_stamp: The time stamp that the state change took place. - :vartype time_stamp: datetime - :ivar requested_by_user: The user who requests the change. - :vartype requested_by_user: str - :ivar details: The details of the audit log. - :vartype details: str - """ - - _validation = { - 'new_state': {'readonly': True}, - 'time_stamp': {'readonly': True}, - 'requested_by_user': {'readonly': True}, - 'details': {'readonly': True}, - } - - _attribute_map = { - 'new_state': {'key': 'newState', 'type': 'str'}, - 'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'}, - 'requested_by_user': {'key': 'requestedByUser', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(JobStateAuditRecord, self).__init__(**kwargs) - self.new_state = None - self.time_stamp = None - self.requested_by_user = None - self.details = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record_py3.py deleted file mode 100644 index 1465c71edadb..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_state_audit_record_py3.py +++ /dev/null @@ -1,51 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobStateAuditRecord(Model): - """The Data Lake Analytics job state audit records for tracking the lifecycle - of a job. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar new_state: The new state the job is in. - :vartype new_state: str - :ivar time_stamp: The time stamp that the state change took place. - :vartype time_stamp: datetime - :ivar requested_by_user: The user who requests the change. - :vartype requested_by_user: str - :ivar details: The details of the audit log. - :vartype details: str - """ - - _validation = { - 'new_state': {'readonly': True}, - 'time_stamp': {'readonly': True}, - 'requested_by_user': {'readonly': True}, - 'details': {'readonly': True}, - } - - _attribute_map = { - 'new_state': {'key': 'newState', 'type': 'str'}, - 'time_stamp': {'key': 'timeStamp', 'type': 'iso-8601'}, - 'requested_by_user': {'key': 'requestedByUser', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - } - - def __init__(self, **kwargs) -> None: - super(JobStateAuditRecord, self).__init__(**kwargs) - self.new_state = None - self.time_stamp = None - self.requested_by_user = None - self.details = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics.py deleted file mode 100644 index d9b24bbce258..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobStatistics(Model): - """The Data Lake Analytics job execution statistics. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar last_update_time_utc: The last update time for the statistics. - :vartype last_update_time_utc: datetime - :ivar finalizing_time_utc: The job finalizing start time. - :vartype finalizing_time_utc: datetime - :ivar stages: The list of stages for the job. - :vartype stages: - list[~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertexStage] - """ - - _validation = { - 'last_update_time_utc': {'readonly': True}, - 'finalizing_time_utc': {'readonly': True}, - 'stages': {'readonly': True}, - } - - _attribute_map = { - 'last_update_time_utc': {'key': 'lastUpdateTimeUtc', 'type': 'iso-8601'}, - 'finalizing_time_utc': {'key': 'finalizingTimeUtc', 'type': 'iso-8601'}, - 'stages': {'key': 'stages', 'type': '[JobStatisticsVertexStage]'}, - } - - def __init__(self, **kwargs): - super(JobStatistics, self).__init__(**kwargs) - self.last_update_time_utc = None - self.finalizing_time_utc = None - self.stages = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_py3.py deleted file mode 100644 index 2c3c187b2793..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_py3.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobStatistics(Model): - """The Data Lake Analytics job execution statistics. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar last_update_time_utc: The last update time for the statistics. - :vartype last_update_time_utc: datetime - :ivar finalizing_time_utc: The job finalizing start time. - :vartype finalizing_time_utc: datetime - :ivar stages: The list of stages for the job. - :vartype stages: - list[~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertexStage] - """ - - _validation = { - 'last_update_time_utc': {'readonly': True}, - 'finalizing_time_utc': {'readonly': True}, - 'stages': {'readonly': True}, - } - - _attribute_map = { - 'last_update_time_utc': {'key': 'lastUpdateTimeUtc', 'type': 'iso-8601'}, - 'finalizing_time_utc': {'key': 'finalizingTimeUtc', 'type': 'iso-8601'}, - 'stages': {'key': 'stages', 'type': '[JobStatisticsVertexStage]'}, - } - - def __init__(self, **kwargs) -> None: - super(JobStatistics, self).__init__(**kwargs) - self.last_update_time_utc = None - self.finalizing_time_utc = None - self.stages = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex.py deleted file mode 100644 index d3fba8e5e282..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobStatisticsVertex(Model): - """The detailed information for a vertex. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar name: The name of the vertex. - :vartype name: str - :ivar vertex_id: The id of the vertex. - :vartype vertex_id: str - :ivar execution_time: The amount of execution time of the vertex. - :vartype execution_time: timedelta - :ivar data_read: The amount of data read of the vertex, in bytes. - :vartype data_read: long - :ivar peak_mem_usage: The amount of peak memory usage of the vertex, in - bytes. - :vartype peak_mem_usage: long - """ - - _validation = { - 'name': {'readonly': True}, - 'vertex_id': {'readonly': True}, - 'execution_time': {'readonly': True}, - 'data_read': {'readonly': True}, - 'peak_mem_usage': {'readonly': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'vertex_id': {'key': 'vertexId', 'type': 'str'}, - 'execution_time': {'key': 'executionTime', 'type': 'duration'}, - 'data_read': {'key': 'dataRead', 'type': 'long'}, - 'peak_mem_usage': {'key': 'peakMemUsage', 'type': 'long'}, - } - - def __init__(self, **kwargs): - super(JobStatisticsVertex, self).__init__(**kwargs) - self.name = None - self.vertex_id = None - self.execution_time = None - self.data_read = None - self.peak_mem_usage = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_py3.py deleted file mode 100644 index 85b20fc5576c..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_py3.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobStatisticsVertex(Model): - """The detailed information for a vertex. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar name: The name of the vertex. - :vartype name: str - :ivar vertex_id: The id of the vertex. - :vartype vertex_id: str - :ivar execution_time: The amount of execution time of the vertex. - :vartype execution_time: timedelta - :ivar data_read: The amount of data read of the vertex, in bytes. - :vartype data_read: long - :ivar peak_mem_usage: The amount of peak memory usage of the vertex, in - bytes. - :vartype peak_mem_usage: long - """ - - _validation = { - 'name': {'readonly': True}, - 'vertex_id': {'readonly': True}, - 'execution_time': {'readonly': True}, - 'data_read': {'readonly': True}, - 'peak_mem_usage': {'readonly': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'vertex_id': {'key': 'vertexId', 'type': 'str'}, - 'execution_time': {'key': 'executionTime', 'type': 'duration'}, - 'data_read': {'key': 'dataRead', 'type': 'long'}, - 'peak_mem_usage': {'key': 'peakMemUsage', 'type': 'long'}, - } - - def __init__(self, **kwargs) -> None: - super(JobStatisticsVertex, self).__init__(**kwargs) - self.name = None - self.vertex_id = None - self.execution_time = None - self.data_read = None - self.peak_mem_usage = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage.py deleted file mode 100644 index c5567667ea3e..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage.py +++ /dev/null @@ -1,212 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobStatisticsVertexStage(Model): - """The Data Lake Analytics job statistics vertex stage information. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar data_read: The amount of data read, in bytes. - :vartype data_read: long - :ivar data_read_cross_pod: The amount of data read across multiple pods, - in bytes. - :vartype data_read_cross_pod: long - :ivar data_read_intra_pod: The amount of data read in one pod, in bytes. - :vartype data_read_intra_pod: long - :ivar data_to_read: The amount of data remaining to be read, in bytes. - :vartype data_to_read: long - :ivar data_written: The amount of data written, in bytes. - :vartype data_written: long - :ivar duplicate_discard_count: The number of duplicates that were - discarded. - :vartype duplicate_discard_count: int - :ivar failed_count: The number of failures that occured in this stage. - :vartype failed_count: int - :ivar max_vertex_data_read: The maximum amount of data read in a single - vertex, in bytes. - :vartype max_vertex_data_read: long - :ivar min_vertex_data_read: The minimum amount of data read in a single - vertex, in bytes. - :vartype min_vertex_data_read: long - :ivar read_failure_count: The number of read failures in this stage. - :vartype read_failure_count: int - :ivar revocation_count: The number of vertices that were revoked during - this stage. - :vartype revocation_count: int - :ivar running_count: The number of currently running vertices in this - stage. - :vartype running_count: int - :ivar scheduled_count: The number of currently scheduled vertices in this - stage. - :vartype scheduled_count: int - :ivar stage_name: The name of this stage in job execution. - :vartype stage_name: str - :ivar succeeded_count: The number of vertices that succeeded in this - stage. - :vartype succeeded_count: int - :ivar temp_data_written: The amount of temporary data written, in bytes. - :vartype temp_data_written: long - :ivar total_count: The total vertex count for this stage. - :vartype total_count: int - :ivar total_failed_time: The amount of time that failed vertices took up - in this stage. - :vartype total_failed_time: timedelta - :ivar total_progress: The current progress of this stage, as a percentage. - :vartype total_progress: int - :ivar total_succeeded_time: The amount of time all successful vertices - took in this stage. - :vartype total_succeeded_time: timedelta - :ivar total_peak_mem_usage: The sum of the peak memory usage of all the - vertices in the stage, in bytes. - :vartype total_peak_mem_usage: long - :ivar total_execution_time: The sum of the total execution time of all the - vertices in the stage. - :vartype total_execution_time: timedelta - :param max_data_read_vertex: the vertex with the maximum amount of data - read. - :type max_data_read_vertex: - ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex - :param max_execution_time_vertex: the vertex with the maximum execution - time. - :type max_execution_time_vertex: - ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex - :param max_peak_mem_usage_vertex: the vertex with the maximum peak memory - usage. - :type max_peak_mem_usage_vertex: - ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex - :ivar estimated_vertex_cpu_core_count: The estimated vertex CPU core - count. - :vartype estimated_vertex_cpu_core_count: int - :ivar estimated_vertex_peak_cpu_core_count: The estimated vertex peak CPU - core count. - :vartype estimated_vertex_peak_cpu_core_count: int - :ivar estimated_vertex_mem_size: The estimated vertex memory size, in - bytes. - :vartype estimated_vertex_mem_size: long - :param allocated_container_cpu_core_count: The statistics information for - the allocated container CPU core count. - :type allocated_container_cpu_core_count: - ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - :param allocated_container_mem_size: The statistics information for the - allocated container memory size. - :type allocated_container_mem_size: - ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - :param used_vertex_cpu_core_count: The statistics information for the used - vertex CPU core count. - :type used_vertex_cpu_core_count: - ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - :param used_vertex_peak_mem_size: The statistics information for the used - vertex peak memory size. - :type used_vertex_peak_mem_size: - ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - """ - - _validation = { - 'data_read': {'readonly': True}, - 'data_read_cross_pod': {'readonly': True}, - 'data_read_intra_pod': {'readonly': True}, - 'data_to_read': {'readonly': True}, - 'data_written': {'readonly': True}, - 'duplicate_discard_count': {'readonly': True}, - 'failed_count': {'readonly': True}, - 'max_vertex_data_read': {'readonly': True}, - 'min_vertex_data_read': {'readonly': True}, - 'read_failure_count': {'readonly': True}, - 'revocation_count': {'readonly': True}, - 'running_count': {'readonly': True}, - 'scheduled_count': {'readonly': True}, - 'stage_name': {'readonly': True}, - 'succeeded_count': {'readonly': True}, - 'temp_data_written': {'readonly': True}, - 'total_count': {'readonly': True}, - 'total_failed_time': {'readonly': True}, - 'total_progress': {'readonly': True}, - 'total_succeeded_time': {'readonly': True}, - 'total_peak_mem_usage': {'readonly': True}, - 'total_execution_time': {'readonly': True}, - 'estimated_vertex_cpu_core_count': {'readonly': True}, - 'estimated_vertex_peak_cpu_core_count': {'readonly': True}, - 'estimated_vertex_mem_size': {'readonly': True}, - } - - _attribute_map = { - 'data_read': {'key': 'dataRead', 'type': 'long'}, - 'data_read_cross_pod': {'key': 'dataReadCrossPod', 'type': 'long'}, - 'data_read_intra_pod': {'key': 'dataReadIntraPod', 'type': 'long'}, - 'data_to_read': {'key': 'dataToRead', 'type': 'long'}, - 'data_written': {'key': 'dataWritten', 'type': 'long'}, - 'duplicate_discard_count': {'key': 'duplicateDiscardCount', 'type': 'int'}, - 'failed_count': {'key': 'failedCount', 'type': 'int'}, - 'max_vertex_data_read': {'key': 'maxVertexDataRead', 'type': 'long'}, - 'min_vertex_data_read': {'key': 'minVertexDataRead', 'type': 'long'}, - 'read_failure_count': {'key': 'readFailureCount', 'type': 'int'}, - 'revocation_count': {'key': 'revocationCount', 'type': 'int'}, - 'running_count': {'key': 'runningCount', 'type': 'int'}, - 'scheduled_count': {'key': 'scheduledCount', 'type': 'int'}, - 'stage_name': {'key': 'stageName', 'type': 'str'}, - 'succeeded_count': {'key': 'succeededCount', 'type': 'int'}, - 'temp_data_written': {'key': 'tempDataWritten', 'type': 'long'}, - 'total_count': {'key': 'totalCount', 'type': 'int'}, - 'total_failed_time': {'key': 'totalFailedTime', 'type': 'duration'}, - 'total_progress': {'key': 'totalProgress', 'type': 'int'}, - 'total_succeeded_time': {'key': 'totalSucceededTime', 'type': 'duration'}, - 'total_peak_mem_usage': {'key': 'totalPeakMemUsage', 'type': 'long'}, - 'total_execution_time': {'key': 'totalExecutionTime', 'type': 'duration'}, - 'max_data_read_vertex': {'key': 'maxDataReadVertex', 'type': 'JobStatisticsVertex'}, - 'max_execution_time_vertex': {'key': 'maxExecutionTimeVertex', 'type': 'JobStatisticsVertex'}, - 'max_peak_mem_usage_vertex': {'key': 'maxPeakMemUsageVertex', 'type': 'JobStatisticsVertex'}, - 'estimated_vertex_cpu_core_count': {'key': 'estimatedVertexCpuCoreCount', 'type': 'int'}, - 'estimated_vertex_peak_cpu_core_count': {'key': 'estimatedVertexPeakCpuCoreCount', 'type': 'int'}, - 'estimated_vertex_mem_size': {'key': 'estimatedVertexMemSize', 'type': 'long'}, - 'allocated_container_cpu_core_count': {'key': 'allocatedContainerCpuCoreCount', 'type': 'ResourceUsageStatistics'}, - 'allocated_container_mem_size': {'key': 'allocatedContainerMemSize', 'type': 'ResourceUsageStatistics'}, - 'used_vertex_cpu_core_count': {'key': 'usedVertexCpuCoreCount', 'type': 'ResourceUsageStatistics'}, - 'used_vertex_peak_mem_size': {'key': 'usedVertexPeakMemSize', 'type': 'ResourceUsageStatistics'}, - } - - def __init__(self, **kwargs): - super(JobStatisticsVertexStage, self).__init__(**kwargs) - self.data_read = None - self.data_read_cross_pod = None - self.data_read_intra_pod = None - self.data_to_read = None - self.data_written = None - self.duplicate_discard_count = None - self.failed_count = None - self.max_vertex_data_read = None - self.min_vertex_data_read = None - self.read_failure_count = None - self.revocation_count = None - self.running_count = None - self.scheduled_count = None - self.stage_name = None - self.succeeded_count = None - self.temp_data_written = None - self.total_count = None - self.total_failed_time = None - self.total_progress = None - self.total_succeeded_time = None - self.total_peak_mem_usage = None - self.total_execution_time = None - self.max_data_read_vertex = kwargs.get('max_data_read_vertex', None) - self.max_execution_time_vertex = kwargs.get('max_execution_time_vertex', None) - self.max_peak_mem_usage_vertex = kwargs.get('max_peak_mem_usage_vertex', None) - self.estimated_vertex_cpu_core_count = None - self.estimated_vertex_peak_cpu_core_count = None - self.estimated_vertex_mem_size = None - self.allocated_container_cpu_core_count = kwargs.get('allocated_container_cpu_core_count', None) - self.allocated_container_mem_size = kwargs.get('allocated_container_mem_size', None) - self.used_vertex_cpu_core_count = kwargs.get('used_vertex_cpu_core_count', None) - self.used_vertex_peak_mem_size = kwargs.get('used_vertex_peak_mem_size', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage_py3.py deleted file mode 100644 index b8987167dec0..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/job_statistics_vertex_stage_py3.py +++ /dev/null @@ -1,212 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class JobStatisticsVertexStage(Model): - """The Data Lake Analytics job statistics vertex stage information. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar data_read: The amount of data read, in bytes. - :vartype data_read: long - :ivar data_read_cross_pod: The amount of data read across multiple pods, - in bytes. - :vartype data_read_cross_pod: long - :ivar data_read_intra_pod: The amount of data read in one pod, in bytes. - :vartype data_read_intra_pod: long - :ivar data_to_read: The amount of data remaining to be read, in bytes. - :vartype data_to_read: long - :ivar data_written: The amount of data written, in bytes. - :vartype data_written: long - :ivar duplicate_discard_count: The number of duplicates that were - discarded. - :vartype duplicate_discard_count: int - :ivar failed_count: The number of failures that occured in this stage. - :vartype failed_count: int - :ivar max_vertex_data_read: The maximum amount of data read in a single - vertex, in bytes. - :vartype max_vertex_data_read: long - :ivar min_vertex_data_read: The minimum amount of data read in a single - vertex, in bytes. - :vartype min_vertex_data_read: long - :ivar read_failure_count: The number of read failures in this stage. - :vartype read_failure_count: int - :ivar revocation_count: The number of vertices that were revoked during - this stage. - :vartype revocation_count: int - :ivar running_count: The number of currently running vertices in this - stage. - :vartype running_count: int - :ivar scheduled_count: The number of currently scheduled vertices in this - stage. - :vartype scheduled_count: int - :ivar stage_name: The name of this stage in job execution. - :vartype stage_name: str - :ivar succeeded_count: The number of vertices that succeeded in this - stage. - :vartype succeeded_count: int - :ivar temp_data_written: The amount of temporary data written, in bytes. - :vartype temp_data_written: long - :ivar total_count: The total vertex count for this stage. - :vartype total_count: int - :ivar total_failed_time: The amount of time that failed vertices took up - in this stage. - :vartype total_failed_time: timedelta - :ivar total_progress: The current progress of this stage, as a percentage. - :vartype total_progress: int - :ivar total_succeeded_time: The amount of time all successful vertices - took in this stage. - :vartype total_succeeded_time: timedelta - :ivar total_peak_mem_usage: The sum of the peak memory usage of all the - vertices in the stage, in bytes. - :vartype total_peak_mem_usage: long - :ivar total_execution_time: The sum of the total execution time of all the - vertices in the stage. - :vartype total_execution_time: timedelta - :param max_data_read_vertex: the vertex with the maximum amount of data - read. - :type max_data_read_vertex: - ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex - :param max_execution_time_vertex: the vertex with the maximum execution - time. - :type max_execution_time_vertex: - ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex - :param max_peak_mem_usage_vertex: the vertex with the maximum peak memory - usage. - :type max_peak_mem_usage_vertex: - ~azure.mgmt.datalake.analytics.job.models.JobStatisticsVertex - :ivar estimated_vertex_cpu_core_count: The estimated vertex CPU core - count. - :vartype estimated_vertex_cpu_core_count: int - :ivar estimated_vertex_peak_cpu_core_count: The estimated vertex peak CPU - core count. - :vartype estimated_vertex_peak_cpu_core_count: int - :ivar estimated_vertex_mem_size: The estimated vertex memory size, in - bytes. - :vartype estimated_vertex_mem_size: long - :param allocated_container_cpu_core_count: The statistics information for - the allocated container CPU core count. - :type allocated_container_cpu_core_count: - ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - :param allocated_container_mem_size: The statistics information for the - allocated container memory size. - :type allocated_container_mem_size: - ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - :param used_vertex_cpu_core_count: The statistics information for the used - vertex CPU core count. - :type used_vertex_cpu_core_count: - ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - :param used_vertex_peak_mem_size: The statistics information for the used - vertex peak memory size. - :type used_vertex_peak_mem_size: - ~azure.mgmt.datalake.analytics.job.models.ResourceUsageStatistics - """ - - _validation = { - 'data_read': {'readonly': True}, - 'data_read_cross_pod': {'readonly': True}, - 'data_read_intra_pod': {'readonly': True}, - 'data_to_read': {'readonly': True}, - 'data_written': {'readonly': True}, - 'duplicate_discard_count': {'readonly': True}, - 'failed_count': {'readonly': True}, - 'max_vertex_data_read': {'readonly': True}, - 'min_vertex_data_read': {'readonly': True}, - 'read_failure_count': {'readonly': True}, - 'revocation_count': {'readonly': True}, - 'running_count': {'readonly': True}, - 'scheduled_count': {'readonly': True}, - 'stage_name': {'readonly': True}, - 'succeeded_count': {'readonly': True}, - 'temp_data_written': {'readonly': True}, - 'total_count': {'readonly': True}, - 'total_failed_time': {'readonly': True}, - 'total_progress': {'readonly': True}, - 'total_succeeded_time': {'readonly': True}, - 'total_peak_mem_usage': {'readonly': True}, - 'total_execution_time': {'readonly': True}, - 'estimated_vertex_cpu_core_count': {'readonly': True}, - 'estimated_vertex_peak_cpu_core_count': {'readonly': True}, - 'estimated_vertex_mem_size': {'readonly': True}, - } - - _attribute_map = { - 'data_read': {'key': 'dataRead', 'type': 'long'}, - 'data_read_cross_pod': {'key': 'dataReadCrossPod', 'type': 'long'}, - 'data_read_intra_pod': {'key': 'dataReadIntraPod', 'type': 'long'}, - 'data_to_read': {'key': 'dataToRead', 'type': 'long'}, - 'data_written': {'key': 'dataWritten', 'type': 'long'}, - 'duplicate_discard_count': {'key': 'duplicateDiscardCount', 'type': 'int'}, - 'failed_count': {'key': 'failedCount', 'type': 'int'}, - 'max_vertex_data_read': {'key': 'maxVertexDataRead', 'type': 'long'}, - 'min_vertex_data_read': {'key': 'minVertexDataRead', 'type': 'long'}, - 'read_failure_count': {'key': 'readFailureCount', 'type': 'int'}, - 'revocation_count': {'key': 'revocationCount', 'type': 'int'}, - 'running_count': {'key': 'runningCount', 'type': 'int'}, - 'scheduled_count': {'key': 'scheduledCount', 'type': 'int'}, - 'stage_name': {'key': 'stageName', 'type': 'str'}, - 'succeeded_count': {'key': 'succeededCount', 'type': 'int'}, - 'temp_data_written': {'key': 'tempDataWritten', 'type': 'long'}, - 'total_count': {'key': 'totalCount', 'type': 'int'}, - 'total_failed_time': {'key': 'totalFailedTime', 'type': 'duration'}, - 'total_progress': {'key': 'totalProgress', 'type': 'int'}, - 'total_succeeded_time': {'key': 'totalSucceededTime', 'type': 'duration'}, - 'total_peak_mem_usage': {'key': 'totalPeakMemUsage', 'type': 'long'}, - 'total_execution_time': {'key': 'totalExecutionTime', 'type': 'duration'}, - 'max_data_read_vertex': {'key': 'maxDataReadVertex', 'type': 'JobStatisticsVertex'}, - 'max_execution_time_vertex': {'key': 'maxExecutionTimeVertex', 'type': 'JobStatisticsVertex'}, - 'max_peak_mem_usage_vertex': {'key': 'maxPeakMemUsageVertex', 'type': 'JobStatisticsVertex'}, - 'estimated_vertex_cpu_core_count': {'key': 'estimatedVertexCpuCoreCount', 'type': 'int'}, - 'estimated_vertex_peak_cpu_core_count': {'key': 'estimatedVertexPeakCpuCoreCount', 'type': 'int'}, - 'estimated_vertex_mem_size': {'key': 'estimatedVertexMemSize', 'type': 'long'}, - 'allocated_container_cpu_core_count': {'key': 'allocatedContainerCpuCoreCount', 'type': 'ResourceUsageStatistics'}, - 'allocated_container_mem_size': {'key': 'allocatedContainerMemSize', 'type': 'ResourceUsageStatistics'}, - 'used_vertex_cpu_core_count': {'key': 'usedVertexCpuCoreCount', 'type': 'ResourceUsageStatistics'}, - 'used_vertex_peak_mem_size': {'key': 'usedVertexPeakMemSize', 'type': 'ResourceUsageStatistics'}, - } - - def __init__(self, *, max_data_read_vertex=None, max_execution_time_vertex=None, max_peak_mem_usage_vertex=None, allocated_container_cpu_core_count=None, allocated_container_mem_size=None, used_vertex_cpu_core_count=None, used_vertex_peak_mem_size=None, **kwargs) -> None: - super(JobStatisticsVertexStage, self).__init__(**kwargs) - self.data_read = None - self.data_read_cross_pod = None - self.data_read_intra_pod = None - self.data_to_read = None - self.data_written = None - self.duplicate_discard_count = None - self.failed_count = None - self.max_vertex_data_read = None - self.min_vertex_data_read = None - self.read_failure_count = None - self.revocation_count = None - self.running_count = None - self.scheduled_count = None - self.stage_name = None - self.succeeded_count = None - self.temp_data_written = None - self.total_count = None - self.total_failed_time = None - self.total_progress = None - self.total_succeeded_time = None - self.total_peak_mem_usage = None - self.total_execution_time = None - self.max_data_read_vertex = max_data_read_vertex - self.max_execution_time_vertex = max_execution_time_vertex - self.max_peak_mem_usage_vertex = max_peak_mem_usage_vertex - self.estimated_vertex_cpu_core_count = None - self.estimated_vertex_peak_cpu_core_count = None - self.estimated_vertex_mem_size = None - self.allocated_container_cpu_core_count = allocated_container_cpu_core_count - self.allocated_container_mem_size = allocated_container_mem_size - self.used_vertex_cpu_core_count = used_vertex_cpu_core_count - self.used_vertex_peak_mem_size = used_vertex_peak_mem_size diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics.py deleted file mode 100644 index 468561904949..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ResourceUsageStatistics(Model): - """The statistics information for resource usage. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar average: The average value. - :vartype average: float - :ivar minimum: The minimum value. - :vartype minimum: long - :ivar maximum: The maximum value. - :vartype maximum: long - """ - - _validation = { - 'average': {'readonly': True}, - 'minimum': {'readonly': True}, - 'maximum': {'readonly': True}, - } - - _attribute_map = { - 'average': {'key': 'average', 'type': 'float'}, - 'minimum': {'key': 'minimum', 'type': 'long'}, - 'maximum': {'key': 'maximum', 'type': 'long'}, - } - - def __init__(self, **kwargs): - super(ResourceUsageStatistics, self).__init__(**kwargs) - self.average = None - self.minimum = None - self.maximum = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics_py3.py deleted file mode 100644 index a7b64a4926d9..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/resource_usage_statistics_py3.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ResourceUsageStatistics(Model): - """The statistics information for resource usage. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar average: The average value. - :vartype average: float - :ivar minimum: The minimum value. - :vartype minimum: long - :ivar maximum: The maximum value. - :vartype maximum: long - """ - - _validation = { - 'average': {'readonly': True}, - 'minimum': {'readonly': True}, - 'maximum': {'readonly': True}, - } - - _attribute_map = { - 'average': {'key': 'average', 'type': 'float'}, - 'minimum': {'key': 'minimum', 'type': 'long'}, - 'maximum': {'key': 'maximum', 'type': 'long'}, - } - - def __init__(self, **kwargs) -> None: - super(ResourceUsageStatistics, self).__init__(**kwargs) - self.average = None - self.minimum = None - self.maximum = None diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties.py deleted file mode 100644 index 1772c11b91a3..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties.py +++ /dev/null @@ -1,102 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .job_properties import JobProperties - - -class ScopeJobProperties(JobProperties): - """Scope job properties used when submitting and retrieving Scope jobs. (Only - for use internally with Scope job type.). - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :ivar resources: The list of resources that are required by the job. - :vartype resources: - list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] - :ivar user_algebra_path: The algebra file path after the job has - completed. - :vartype user_algebra_path: str - :param notifier: The list of email addresses, separated by semi-colons, to - notify when the job reaches a terminal state. - :type notifier: str - :ivar total_compilation_time: The total time this job spent compiling. - This value should not be set by the user and will be ignored if it is. - :vartype total_compilation_time: timedelta - :ivar total_queued_time: The total time this job spent queued. This value - should not be set by the user and will be ignored if it is. - :vartype total_queued_time: timedelta - :ivar total_running_time: The total time this job spent executing. This - value should not be set by the user and will be ignored if it is. - :vartype total_running_time: timedelta - :ivar total_paused_time: The total time this job spent paused. This value - should not be set by the user and will be ignored if it is. - :vartype total_paused_time: timedelta - :ivar root_process_node_id: The ID used to identify the job manager - coordinating job execution. This value should not be set by the user and - will be ignored if it is. - :vartype root_process_node_id: str - :ivar yarn_application_id: The ID used to identify the yarn application - executing the job. This value should not be set by the user and will be - ignored if it is. - :vartype yarn_application_id: str - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - 'resources': {'readonly': True}, - 'user_algebra_path': {'readonly': True}, - 'total_compilation_time': {'readonly': True}, - 'total_queued_time': {'readonly': True}, - 'total_running_time': {'readonly': True}, - 'total_paused_time': {'readonly': True}, - 'root_process_node_id': {'readonly': True}, - 'yarn_application_id': {'readonly': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, - 'user_algebra_path': {'key': 'userAlgebraPath', 'type': 'str'}, - 'notifier': {'key': 'notifier', 'type': 'str'}, - 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, - 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, - 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, - 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, - 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, - 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(ScopeJobProperties, self).__init__(**kwargs) - self.resources = None - self.user_algebra_path = None - self.notifier = kwargs.get('notifier', None) - self.total_compilation_time = None - self.total_queued_time = None - self.total_running_time = None - self.total_paused_time = None - self.root_process_node_id = None - self.yarn_application_id = None - self.type = 'Scope' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties_py3.py deleted file mode 100644 index 8a16deab8305..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_properties_py3.py +++ /dev/null @@ -1,102 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .job_properties_py3 import JobProperties - - -class ScopeJobProperties(JobProperties): - """Scope job properties used when submitting and retrieving Scope jobs. (Only - for use internally with Scope job type.). - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :ivar resources: The list of resources that are required by the job. - :vartype resources: - list[~azure.mgmt.datalake.analytics.job.models.ScopeJobResource] - :ivar user_algebra_path: The algebra file path after the job has - completed. - :vartype user_algebra_path: str - :param notifier: The list of email addresses, separated by semi-colons, to - notify when the job reaches a terminal state. - :type notifier: str - :ivar total_compilation_time: The total time this job spent compiling. - This value should not be set by the user and will be ignored if it is. - :vartype total_compilation_time: timedelta - :ivar total_queued_time: The total time this job spent queued. This value - should not be set by the user and will be ignored if it is. - :vartype total_queued_time: timedelta - :ivar total_running_time: The total time this job spent executing. This - value should not be set by the user and will be ignored if it is. - :vartype total_running_time: timedelta - :ivar total_paused_time: The total time this job spent paused. This value - should not be set by the user and will be ignored if it is. - :vartype total_paused_time: timedelta - :ivar root_process_node_id: The ID used to identify the job manager - coordinating job execution. This value should not be set by the user and - will be ignored if it is. - :vartype root_process_node_id: str - :ivar yarn_application_id: The ID used to identify the yarn application - executing the job. This value should not be set by the user and will be - ignored if it is. - :vartype yarn_application_id: str - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - 'resources': {'readonly': True}, - 'user_algebra_path': {'readonly': True}, - 'total_compilation_time': {'readonly': True}, - 'total_queued_time': {'readonly': True}, - 'total_running_time': {'readonly': True}, - 'total_paused_time': {'readonly': True}, - 'root_process_node_id': {'readonly': True}, - 'yarn_application_id': {'readonly': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'resources': {'key': 'resources', 'type': '[ScopeJobResource]'}, - 'user_algebra_path': {'key': 'userAlgebraPath', 'type': 'str'}, - 'notifier': {'key': 'notifier', 'type': 'str'}, - 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, - 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, - 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, - 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, - 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, - 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, - } - - def __init__(self, *, script: str, runtime_version: str=None, notifier: str=None, **kwargs) -> None: - super(ScopeJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) - self.resources = None - self.user_algebra_path = None - self.notifier = notifier - self.total_compilation_time = None - self.total_queued_time = None - self.total_running_time = None - self.total_paused_time = None - self.root_process_node_id = None - self.yarn_application_id = None - self.type = 'Scope' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource.py deleted file mode 100644 index fec138b82ac7..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ScopeJobResource(Model): - """The Scope job resources. (Only for use internally with Scope job type.). - - :param name: The name of the resource. - :type name: str - :param path: The path to the resource. - :type path: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'path': {'key': 'path', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(ScopeJobResource, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.path = kwargs.get('path', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource_py3.py deleted file mode 100644 index 731eb3bf417a..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/scope_job_resource_py3.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class ScopeJobResource(Model): - """The Scope job resources. (Only for use internally with Scope job type.). - - :param name: The name of the resource. - :type name: str - :param path: The path to the resource. - :type path: str - """ - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'path': {'key': 'path', 'type': 'str'}, - } - - def __init__(self, *, name: str=None, path: str=None, **kwargs) -> None: - super(ScopeJobResource, self).__init__(**kwargs) - self.name = name - self.path = path diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters.py deleted file mode 100644 index 9bbfe71de463..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class UpdateJobParameters(Model): - """The parameters that can be used to update existing Data Lake Analytics job - information properties. (Only for use internally with Scope job type.). - - :param degree_of_parallelism: The degree of parallelism used for this job. - This must be greater than 0, if set to less than 0 it will default to 1. - :type degree_of_parallelism: int - :param priority: The priority value for the current job. Lower numbers - have a higher priority. By default, a job has a priority of 1000. This - must be greater than 0. - :type priority: int - :param tags: The key-value pairs used to add additional metadata to the - job information. - :type tags: dict[str, str] - """ - - _attribute_map = { - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - } - - def __init__(self, **kwargs): - super(UpdateJobParameters, self).__init__(**kwargs) - self.degree_of_parallelism = kwargs.get('degree_of_parallelism', None) - self.priority = kwargs.get('priority', None) - self.tags = kwargs.get('tags', None) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters_py3.py deleted file mode 100644 index f06fe9385c3b..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/update_job_parameters_py3.py +++ /dev/null @@ -1,41 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class UpdateJobParameters(Model): - """The parameters that can be used to update existing Data Lake Analytics job - information properties. (Only for use internally with Scope job type.). - - :param degree_of_parallelism: The degree of parallelism used for this job. - This must be greater than 0, if set to less than 0 it will default to 1. - :type degree_of_parallelism: int - :param priority: The priority value for the current job. Lower numbers - have a higher priority. By default, a job has a priority of 1000. This - must be greater than 0. - :type priority: int - :param tags: The key-value pairs used to add additional metadata to the - job information. - :type tags: dict[str, str] - """ - - _attribute_map = { - 'degree_of_parallelism': {'key': 'degreeOfParallelism', 'type': 'int'}, - 'priority': {'key': 'priority', 'type': 'int'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - } - - def __init__(self, *, degree_of_parallelism: int=None, priority: int=None, tags=None, **kwargs) -> None: - super(UpdateJobParameters, self).__init__(**kwargs) - self.degree_of_parallelism = degree_of_parallelism - self.priority = priority - self.tags = tags diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties.py deleted file mode 100644 index fe9b57ec64ca..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .job_properties import JobProperties - - -class USqlJobProperties(JobProperties): - """U-SQL job properties used when retrieving U-SQL jobs. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :ivar resources: The list of resources that are required by the job. - :vartype resources: - list[~azure.mgmt.datalake.analytics.job.models.JobResource] - :param statistics: The job specific statistics. - :type statistics: ~azure.mgmt.datalake.analytics.job.models.JobStatistics - :param debug_data: The job specific debug data locations. - :type debug_data: ~azure.mgmt.datalake.analytics.job.models.JobDataPath - :ivar diagnostics: The diagnostics for the job. - :vartype diagnostics: - list[~azure.mgmt.datalake.analytics.job.models.Diagnostics] - :ivar algebra_file_path: The algebra file path after the job has - completed. - :vartype algebra_file_path: str - :ivar total_compilation_time: The total time this job spent compiling. - This value should not be set by the user and will be ignored if it is. - :vartype total_compilation_time: timedelta - :ivar total_queued_time: The total time this job spent queued. This value - should not be set by the user and will be ignored if it is. - :vartype total_queued_time: timedelta - :ivar total_running_time: The total time this job spent executing. This - value should not be set by the user and will be ignored if it is. - :vartype total_running_time: timedelta - :ivar total_paused_time: The total time this job spent paused. This value - should not be set by the user and will be ignored if it is. - :vartype total_paused_time: timedelta - :ivar root_process_node_id: The ID used to identify the job manager - coordinating job execution. This value should not be set by the user and - will be ignored if it is. - :vartype root_process_node_id: str - :ivar yarn_application_id: The ID used to identify the yarn application - executing the job. This value should not be set by the user and will be - ignored if it is. - :vartype yarn_application_id: str - :ivar yarn_application_time_stamp: The timestamp (in ticks) for the yarn - application executing the job. This value should not be set by the user - and will be ignored if it is. - :vartype yarn_application_time_stamp: long - :ivar compile_mode: The specific compilation mode for the job used during - execution. If this is not specified during submission, the server will - determine the optimal compilation mode. Possible values include: - 'Semantic', 'Full', 'SingleBox' - :vartype compile_mode: str or - ~azure.mgmt.datalake.analytics.job.models.CompileMode - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - 'resources': {'readonly': True}, - 'diagnostics': {'readonly': True}, - 'algebra_file_path': {'readonly': True}, - 'total_compilation_time': {'readonly': True}, - 'total_queued_time': {'readonly': True}, - 'total_running_time': {'readonly': True}, - 'total_paused_time': {'readonly': True}, - 'root_process_node_id': {'readonly': True}, - 'yarn_application_id': {'readonly': True}, - 'yarn_application_time_stamp': {'readonly': True}, - 'compile_mode': {'readonly': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'resources': {'key': 'resources', 'type': '[JobResource]'}, - 'statistics': {'key': 'statistics', 'type': 'JobStatistics'}, - 'debug_data': {'key': 'debugData', 'type': 'JobDataPath'}, - 'diagnostics': {'key': 'diagnostics', 'type': '[Diagnostics]'}, - 'algebra_file_path': {'key': 'algebraFilePath', 'type': 'str'}, - 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, - 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, - 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, - 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, - 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, - 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, - 'yarn_application_time_stamp': {'key': 'yarnApplicationTimeStamp', 'type': 'long'}, - 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, - } - - def __init__(self, **kwargs): - super(USqlJobProperties, self).__init__(**kwargs) - self.resources = None - self.statistics = kwargs.get('statistics', None) - self.debug_data = kwargs.get('debug_data', None) - self.diagnostics = None - self.algebra_file_path = None - self.total_compilation_time = None - self.total_queued_time = None - self.total_running_time = None - self.total_paused_time = None - self.root_process_node_id = None - self.yarn_application_id = None - self.yarn_application_time_stamp = None - self.compile_mode = None - self.type = 'USql' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties_py3.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties_py3.py deleted file mode 100644 index 9cfcbb25799a..000000000000 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/models/usql_job_properties_py3.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from .job_properties_py3 import JobProperties - - -class USqlJobProperties(JobProperties): - """U-SQL job properties used when retrieving U-SQL jobs. - - Variables are only populated by the server, and will be ignored when - sending a request. - - All required parameters must be populated in order to send to Azure. - - :param runtime_version: The runtime version of the Data Lake Analytics - engine to use for the specific type of job being run. - :type runtime_version: str - :param script: Required. The script to run. Please note that the maximum - script size is 3 MB. - :type script: str - :param type: Required. Constant filled by server. - :type type: str - :ivar resources: The list of resources that are required by the job. - :vartype resources: - list[~azure.mgmt.datalake.analytics.job.models.JobResource] - :param statistics: The job specific statistics. - :type statistics: ~azure.mgmt.datalake.analytics.job.models.JobStatistics - :param debug_data: The job specific debug data locations. - :type debug_data: ~azure.mgmt.datalake.analytics.job.models.JobDataPath - :ivar diagnostics: The diagnostics for the job. - :vartype diagnostics: - list[~azure.mgmt.datalake.analytics.job.models.Diagnostics] - :ivar algebra_file_path: The algebra file path after the job has - completed. - :vartype algebra_file_path: str - :ivar total_compilation_time: The total time this job spent compiling. - This value should not be set by the user and will be ignored if it is. - :vartype total_compilation_time: timedelta - :ivar total_queued_time: The total time this job spent queued. This value - should not be set by the user and will be ignored if it is. - :vartype total_queued_time: timedelta - :ivar total_running_time: The total time this job spent executing. This - value should not be set by the user and will be ignored if it is. - :vartype total_running_time: timedelta - :ivar total_paused_time: The total time this job spent paused. This value - should not be set by the user and will be ignored if it is. - :vartype total_paused_time: timedelta - :ivar root_process_node_id: The ID used to identify the job manager - coordinating job execution. This value should not be set by the user and - will be ignored if it is. - :vartype root_process_node_id: str - :ivar yarn_application_id: The ID used to identify the yarn application - executing the job. This value should not be set by the user and will be - ignored if it is. - :vartype yarn_application_id: str - :ivar yarn_application_time_stamp: The timestamp (in ticks) for the yarn - application executing the job. This value should not be set by the user - and will be ignored if it is. - :vartype yarn_application_time_stamp: long - :ivar compile_mode: The specific compilation mode for the job used during - execution. If this is not specified during submission, the server will - determine the optimal compilation mode. Possible values include: - 'Semantic', 'Full', 'SingleBox' - :vartype compile_mode: str or - ~azure.mgmt.datalake.analytics.job.models.CompileMode - """ - - _validation = { - 'script': {'required': True}, - 'type': {'required': True}, - 'resources': {'readonly': True}, - 'diagnostics': {'readonly': True}, - 'algebra_file_path': {'readonly': True}, - 'total_compilation_time': {'readonly': True}, - 'total_queued_time': {'readonly': True}, - 'total_running_time': {'readonly': True}, - 'total_paused_time': {'readonly': True}, - 'root_process_node_id': {'readonly': True}, - 'yarn_application_id': {'readonly': True}, - 'yarn_application_time_stamp': {'readonly': True}, - 'compile_mode': {'readonly': True}, - } - - _attribute_map = { - 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, - 'script': {'key': 'script', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'resources': {'key': 'resources', 'type': '[JobResource]'}, - 'statistics': {'key': 'statistics', 'type': 'JobStatistics'}, - 'debug_data': {'key': 'debugData', 'type': 'JobDataPath'}, - 'diagnostics': {'key': 'diagnostics', 'type': '[Diagnostics]'}, - 'algebra_file_path': {'key': 'algebraFilePath', 'type': 'str'}, - 'total_compilation_time': {'key': 'totalCompilationTime', 'type': 'duration'}, - 'total_queued_time': {'key': 'totalQueuedTime', 'type': 'duration'}, - 'total_running_time': {'key': 'totalRunningTime', 'type': 'duration'}, - 'total_paused_time': {'key': 'totalPausedTime', 'type': 'duration'}, - 'root_process_node_id': {'key': 'rootProcessNodeId', 'type': 'str'}, - 'yarn_application_id': {'key': 'yarnApplicationId', 'type': 'str'}, - 'yarn_application_time_stamp': {'key': 'yarnApplicationTimeStamp', 'type': 'long'}, - 'compile_mode': {'key': 'compileMode', 'type': 'CompileMode'}, - } - - def __init__(self, *, script: str, runtime_version: str=None, statistics=None, debug_data=None, **kwargs) -> None: - super(USqlJobProperties, self).__init__(runtime_version=runtime_version, script=script, **kwargs) - self.resources = None - self.statistics = statistics - self.debug_data = debug_data - self.diagnostics = None - self.algebra_file_path = None - self.total_compilation_time = None - self.total_queued_time = None - self.total_running_time = None - self.total_paused_time = None - self.root_process_node_id = None - self.yarn_application_id = None - self.yarn_application_time_stamp = None - self.compile_mode = None - self.type = 'USql' diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/__init__.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/__init__.py index 2fc119680834..83abab6a2843 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/__init__.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/__init__.py @@ -9,9 +9,9 @@ # regenerated. # -------------------------------------------------------------------------- -from .job_operations import JobOperations -from .pipeline_operations import PipelineOperations -from .recurrence_operations import RecurrenceOperations +from ._job_operations import JobOperations +from ._pipeline_operations import PipelineOperations +from ._recurrence_operations import RecurrenceOperations __all__ = [ 'JobOperations', diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/job_operations.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/_job_operations.py similarity index 93% rename from sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/job_operations.py rename to sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/_job_operations.py index ce404d09af04..da3023a2f45a 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/job_operations.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/_job_operations.py @@ -21,6 +21,8 @@ class JobOperations(object): """JobOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -78,8 +80,7 @@ def list( ~azure.mgmt.datalake.analytics.job.models.JobInformationBasicPaged[~azure.mgmt.datalake.analytics.job.models.JobInformationBasic] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] @@ -111,7 +112,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -120,9 +121,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -132,12 +137,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.JobInformationBasicPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.JobInformationBasicPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.JobInformationBasicPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/jobs'} @@ -180,6 +183,7 @@ def create( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -192,9 +196,8 @@ def create( body_content = self._serialize.body(parameters, 'CreateJobParameters') # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -202,7 +205,6 @@ def create( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('JobInformation', response) @@ -247,7 +249,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -256,8 +258,8 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -265,7 +267,6 @@ def get( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('JobInformation', response) @@ -294,6 +295,7 @@ def _update_initial( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -309,9 +311,8 @@ def _update_initial( body_content = None # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 201, 202]: exp = CloudError(response) @@ -417,7 +418,7 @@ def get_statistics( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -426,8 +427,8 @@ def get_statistics( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -435,7 +436,6 @@ def get_statistics( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('JobStatistics', response) @@ -481,7 +481,7 @@ def get_debug_data_path( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -490,8 +490,8 @@ def get_debug_data_path( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -499,7 +499,6 @@ def get_debug_data_path( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('JobDataPath', response) @@ -528,7 +527,6 @@ def _cancel_initial( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -537,8 +535,8 @@ def _cancel_initial( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) @@ -610,7 +608,6 @@ def _yield_method_initial( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -619,8 +616,8 @@ def _yield_method_initial( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) @@ -711,6 +708,7 @@ def build( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -723,9 +721,8 @@ def build( body_content = self._serialize.body(parameters, 'BuildJobParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -733,7 +730,6 @@ def build( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('JobInformation', response) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/pipeline_operations.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/_pipeline_operations.py similarity index 90% rename from sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/pipeline_operations.py rename to sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/_pipeline_operations.py index 42f4963ea1cf..7fc269401383 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/pipeline_operations.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/_pipeline_operations.py @@ -19,6 +19,8 @@ class PipelineOperations(object): """PipelineOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -62,8 +64,7 @@ def list( ~azure.mgmt.datalake.analytics.job.models.JobPipelineInformationPaged[~azure.mgmt.datalake.analytics.job.models.JobPipelineInformation] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] @@ -87,7 +88,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -96,9 +97,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -108,12 +113,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.JobPipelineInformationPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.JobPipelineInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.JobPipelineInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/pipelines'} @@ -165,7 +168,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -174,8 +177,8 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -183,7 +186,6 @@ def get( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('JobPipelineInformation', response) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/recurrence_operations.py b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/_recurrence_operations.py similarity index 90% rename from sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/recurrence_operations.py rename to sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/_recurrence_operations.py index 7366b03bd0e0..5b33d7559e42 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/recurrence_operations.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/job/operations/_recurrence_operations.py @@ -19,6 +19,8 @@ class RecurrenceOperations(object): """RecurrenceOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -62,8 +64,7 @@ def list( ~azure.mgmt.datalake.analytics.job.models.JobRecurrenceInformationPaged[~azure.mgmt.datalake.analytics.job.models.JobRecurrenceInformation] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] @@ -87,7 +88,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -96,9 +97,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -108,12 +113,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.JobRecurrenceInformationPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.JobRecurrenceInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.JobRecurrenceInformationPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/recurrences'} @@ -165,7 +168,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -174,8 +177,8 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -183,7 +186,6 @@ def get( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('JobRecurrenceInformation', response) diff --git a/sdk/datalake/azure-mgmt-datalake-analytics/setup.py b/sdk/datalake/azure-mgmt-datalake-analytics/setup.py index 68f340388091..4ba2e4e83639 100644 --- a/sdk/datalake/azure-mgmt-datalake-analytics/setup.py +++ b/sdk/datalake/azure-mgmt-datalake-analytics/setup.py @@ -53,6 +53,7 @@ version=version, description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME), long_description=readme + '\n\n' + history, + long_description_content_type='text/x-rst', license='MIT License', author='Microsoft Corporation', author_email='azpysdkhelp@microsoft.com', @@ -63,7 +64,6 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',