diff --git a/sdk/datafactory/azure-mgmt-datafactory/_meta.json b/sdk/datafactory/azure-mgmt-datafactory/_meta.json index 9d25a2b50221..8dce3b01039e 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/_meta.json +++ b/sdk/datafactory/azure-mgmt-datafactory/_meta.json @@ -4,7 +4,7 @@ "@autorest/python@5.8.4", "@autorest/modelerfour@4.19.2" ], - "commit": "2ce915398bfadd5333820487595a9623187dcb59", + "commit": "ee728155b49d4c684b267354cddafd08324894b4", "repository_url": "https://github.com/Azure/azure-rest-api-specs", "autorest_command": "autorest specification/datafactory/resource-manager/readme.md --multiapi --python --python-mode=update --python-sdks-folder=/home/vsts/work/1/s/azure-sdk-for-python/sdk --track2 --use=@autorest/python@5.8.4 --use=@autorest/modelerfour@4.19.2 --version=3.4.5", "readme": "specification/datafactory/resource-manager/readme.md" diff --git a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/_version.py b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/_version.py index 48944bf3938a..c47f66669f1b 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/_version.py +++ b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/_version.py @@ -6,4 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "2.0.0" +VERSION = "1.0.0" diff --git a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/__init__.py b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/__init__.py index d05e600bf0b6..d9c701f986d4 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/__init__.py +++ b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/__init__.py @@ -196,22 +196,16 @@ from ._models_py3 import DatabricksSparkJarActivity from ._models_py3 import DatabricksSparkPythonActivity from ._models_py3 import Dataset - from ._models_py3 import DatasetBZip2Compression from ._models_py3 import DatasetCompression from ._models_py3 import DatasetDataElement from ._models_py3 import DatasetDebugResource - from ._models_py3 import DatasetDeflateCompression from ._models_py3 import DatasetFolder - from ._models_py3 import DatasetGZipCompression from ._models_py3 import DatasetListResponse from ._models_py3 import DatasetLocation from ._models_py3 import DatasetReference from ._models_py3 import DatasetResource from ._models_py3 import DatasetSchemaDataElement from ._models_py3 import DatasetStorageFormat - from ._models_py3 import DatasetTarCompression - from ._models_py3 import DatasetTarGZipCompression - from ._models_py3 import DatasetZipDeflateCompression from ._models_py3 import Db2LinkedService from ._models_py3 import Db2Source from ._models_py3 import Db2TableDataset @@ -279,6 +273,7 @@ from ._models_py3 import FileSystemSink from ._models_py3 import FileSystemSource from ._models_py3 import FilterActivity + from ._models_py3 import Flowlet from ._models_py3 import ForEachActivity from ._models_py3 import FormatReadSettings from ._models_py3 import FormatWriteSettings @@ -891,22 +886,16 @@ from ._models import DatabricksSparkJarActivity # type: ignore from ._models import DatabricksSparkPythonActivity # type: ignore from ._models import Dataset # type: ignore - from ._models import DatasetBZip2Compression # type: ignore from ._models import DatasetCompression # type: ignore from ._models import DatasetDataElement # type: ignore from ._models import DatasetDebugResource # type: ignore - from ._models import DatasetDeflateCompression # type: ignore from ._models import DatasetFolder # type: ignore - from ._models import DatasetGZipCompression # type: ignore from ._models import DatasetListResponse # type: ignore from ._models import DatasetLocation # type: ignore from ._models import DatasetReference # type: ignore from ._models import DatasetResource # type: ignore from ._models import DatasetSchemaDataElement # type: ignore from ._models import DatasetStorageFormat # type: ignore - from ._models import DatasetTarCompression # type: ignore - from ._models import DatasetTarGZipCompression # type: ignore - from ._models import DatasetZipDeflateCompression # type: ignore from ._models import Db2LinkedService # type: ignore from ._models import Db2Source # type: ignore from ._models import Db2TableDataset # type: ignore @@ -974,6 +963,7 @@ from ._models import FileSystemSink # type: ignore from ._models import FileSystemSource # type: ignore from ._models import FilterActivity # type: ignore + from ._models import Flowlet # type: ignore from ._models import ForEachActivity # type: ignore from ._models import FormatReadSettings # type: ignore from ._models import FormatWriteSettings # type: ignore @@ -1685,22 +1675,16 @@ 'DatabricksSparkJarActivity', 'DatabricksSparkPythonActivity', 'Dataset', - 'DatasetBZip2Compression', 'DatasetCompression', 'DatasetDataElement', 'DatasetDebugResource', - 'DatasetDeflateCompression', 'DatasetFolder', - 'DatasetGZipCompression', 'DatasetListResponse', 'DatasetLocation', 'DatasetReference', 'DatasetResource', 'DatasetSchemaDataElement', 'DatasetStorageFormat', - 'DatasetTarCompression', - 'DatasetTarGZipCompression', - 'DatasetZipDeflateCompression', 'Db2LinkedService', 'Db2Source', 'Db2TableDataset', @@ -1768,6 +1752,7 @@ 'FileSystemSink', 'FileSystemSource', 'FilterActivity', + 'Flowlet', 'ForEachActivity', 'FormatReadSettings', 'FormatWriteSettings', diff --git a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models.py b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models.py index 5f4f0c460837..1f446f8f3c60 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models.py +++ b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models.py @@ -11051,7 +11051,7 @@ class DataFlow(msrest.serialization.Model): """Azure Data Factory nested object which contains a flow with data movements and transformations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MappingDataFlow, WranglingDataFlow. + sub-classes are: Flowlet, MappingDataFlow, WranglingDataFlow. All required parameters must be populated in order to send to Azure. @@ -11078,7 +11078,7 @@ class DataFlow(msrest.serialization.Model): } _subtype_map = { - 'type': {'MappingDataFlow': 'MappingDataFlow', 'WranglingDataFlow': 'WranglingDataFlow'} + 'type': {'Flowlet': 'Flowlet', 'MappingDataFlow': 'MappingDataFlow', 'WranglingDataFlow': 'WranglingDataFlow'} } def __init__( @@ -11190,6 +11190,8 @@ class DataFlowDebugPackage(msrest.serialization.Model): :type session_id: str :param data_flow: Data flow instance. :type data_flow: ~azure.mgmt.datafactory.models.DataFlowDebugResource + :param data_flows: List of Data flows. + :type data_flows: list[~azure.mgmt.datafactory.models.DataFlowDebugResource] :param datasets: List of datasets. :type datasets: list[~azure.mgmt.datafactory.models.DatasetDebugResource] :param linked_services: List of linked services. @@ -11204,6 +11206,7 @@ class DataFlowDebugPackage(msrest.serialization.Model): 'additional_properties': {'key': '', 'type': '{object}'}, 'session_id': {'key': 'sessionId', 'type': 'str'}, 'data_flow': {'key': 'dataFlow', 'type': 'DataFlowDebugResource'}, + 'data_flows': {'key': 'dataFlows', 'type': '[DataFlowDebugResource]'}, 'datasets': {'key': 'datasets', 'type': '[DatasetDebugResource]'}, 'linked_services': {'key': 'linkedServices', 'type': '[LinkedServiceDebugResource]'}, 'staging': {'key': 'staging', 'type': 'DataFlowStagingInfo'}, @@ -11218,6 +11221,7 @@ def __init__( self.additional_properties = kwargs.get('additional_properties', None) self.session_id = kwargs.get('session_id', None) self.data_flow = kwargs.get('data_flow', None) + self.data_flows = kwargs.get('data_flows', None) self.datasets = kwargs.get('datasets', None) self.linked_services = kwargs.get('linked_services', None) self.staging = kwargs.get('staging', None) @@ -11496,6 +11500,8 @@ class Transformation(msrest.serialization.Model): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference """ _validation = { @@ -11505,6 +11511,7 @@ class Transformation(msrest.serialization.Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, } def __init__( @@ -11514,6 +11521,7 @@ def __init__( super(Transformation, self).__init__(**kwargs) self.name = kwargs['name'] self.description = kwargs.get('description', None) + self.flowlet = kwargs.get('flowlet', None) class DataFlowSink(Transformation): @@ -11525,6 +11533,8 @@ class DataFlowSink(Transformation): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference :param dataset: Dataset reference. :type dataset: ~azure.mgmt.datafactory.models.DatasetReference :param linked_service: Linked service reference. @@ -11540,6 +11550,7 @@ class DataFlowSink(Transformation): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, 'dataset': {'key': 'dataset', 'type': 'DatasetReference'}, 'linked_service': {'key': 'linkedService', 'type': 'LinkedServiceReference'}, 'schema_linked_service': {'key': 'schemaLinkedService', 'type': 'LinkedServiceReference'}, @@ -11564,6 +11575,8 @@ class DataFlowSource(Transformation): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference :param dataset: Dataset reference. :type dataset: ~azure.mgmt.datafactory.models.DatasetReference :param linked_service: Linked service reference. @@ -11579,6 +11592,7 @@ class DataFlowSource(Transformation): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, 'dataset': {'key': 'dataset', 'type': 'DatasetReference'}, 'linked_service': {'key': 'linkedService', 'type': 'LinkedServiceReference'}, 'schema_linked_service': {'key': 'schemaLinkedService', 'type': 'LinkedServiceReference'}, @@ -11733,17 +11747,17 @@ def __init__( class DatasetCompression(msrest.serialization.Model): """The compression method used on a dataset. - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DatasetBZip2Compression, DatasetDeflateCompression, DatasetGZipCompression, DatasetTarCompression, DatasetTarGZipCompression, DatasetZipDeflateCompression. - All required parameters must be populated in order to send to Azure. :param additional_properties: Unmatched properties from the message are deserialized to this collection. :type additional_properties: dict[str, any] :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. + string). :type type: any + :param level: The dataset compression level. Type: string (or Expression with resultType + string). + :type level: any """ _validation = { @@ -11753,10 +11767,7 @@ class DatasetCompression(msrest.serialization.Model): _attribute_map = { 'additional_properties': {'key': '', 'type': '{object}'}, 'type': {'key': 'type', 'type': 'object'}, - } - - _subtype_map = { - 'type': {'BZip2': 'DatasetBZip2Compression', 'Deflate': 'DatasetDeflateCompression', 'GZip': 'DatasetGZipCompression', 'Tar': 'DatasetTarCompression', 'TarGZip': 'DatasetTarGZipCompression', 'ZipDeflate': 'DatasetZipDeflateCompression'} + 'level': {'key': 'level', 'type': 'object'}, } def __init__( @@ -11765,37 +11776,8 @@ def __init__( ): super(DatasetCompression, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) - self.type = 'DatasetCompression' # type: str - - -class DatasetBZip2Compression(DatasetCompression): - """The BZip2 compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - } - - def __init__( - self, - **kwargs - ): - super(DatasetBZip2Compression, self).__init__(**kwargs) - self.type = 'BZip2' # type: str + self.type = kwargs['type'] + self.level = kwargs.get('level', None) class DatasetDataElement(msrest.serialization.Model): @@ -11849,40 +11831,6 @@ def __init__( self.properties = kwargs['properties'] -class DatasetDeflateCompression(DatasetCompression): - """The Deflate compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - :param level: The Deflate compression level. - :type level: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - 'level': {'key': 'level', 'type': 'object'}, - } - - def __init__( - self, - **kwargs - ): - super(DatasetDeflateCompression, self).__init__(**kwargs) - self.type = 'Deflate' # type: str - self.level = kwargs.get('level', None) - - class DatasetFolder(msrest.serialization.Model): """The folder that this Dataset is in. If not specified, Dataset will appear at the root level. @@ -11902,40 +11850,6 @@ def __init__( self.name = kwargs.get('name', None) -class DatasetGZipCompression(DatasetCompression): - """The GZip compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - :param level: The GZip compression level. - :type level: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - 'level': {'key': 'level', 'type': 'object'}, - } - - def __init__( - self, - **kwargs - ): - super(DatasetGZipCompression, self).__init__(**kwargs) - self.type = 'GZip' # type: str - self.level = kwargs.get('level', None) - - class DatasetListResponse(msrest.serialization.Model): """A list of dataset resources. @@ -12073,104 +11987,6 @@ def __init__( self.type = kwargs.get('type', None) -class DatasetTarCompression(DatasetCompression): - """The Tar archive method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - } - - def __init__( - self, - **kwargs - ): - super(DatasetTarCompression, self).__init__(**kwargs) - self.type = 'Tar' # type: str - - -class DatasetTarGZipCompression(DatasetCompression): - """The TarGZip compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - :param level: The TarGZip compression level. - :type level: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - 'level': {'key': 'level', 'type': 'object'}, - } - - def __init__( - self, - **kwargs - ): - super(DatasetTarGZipCompression, self).__init__(**kwargs) - self.type = 'TarGZip' # type: str - self.level = kwargs.get('level', None) - - -class DatasetZipDeflateCompression(DatasetCompression): - """The ZipDeflate compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - :param level: The ZipDeflate compression level. - :type level: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - 'level': {'key': 'level', 'type': 'object'}, - } - - def __init__( - self, - **kwargs - ): - super(DatasetZipDeflateCompression, self).__init__(**kwargs) - self.type = 'ZipDeflate' # type: str - self.level = kwargs.get('level', None) - - class Db2LinkedService(LinkedService): """Linked service for DB2 data source. @@ -16009,6 +15825,61 @@ def __init__( self.condition = kwargs['condition'] +class Flowlet(DataFlow): + """Data flow flowlet. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Type of data flow.Constant filled by server. + :type type: str + :param description: The description of the data flow. + :type description: str + :param annotations: List of tags that can be used for describing the data flow. + :type annotations: list[any] + :param folder: The folder that this data flow is in. If not specified, Data flow will appear at + the root level. + :type folder: ~azure.mgmt.datafactory.models.DataFlowFolder + :param sources: List of sources in Flowlet. + :type sources: list[~azure.mgmt.datafactory.models.DataFlowSource] + :param sinks: List of sinks in Flowlet. + :type sinks: list[~azure.mgmt.datafactory.models.DataFlowSink] + :param transformations: List of transformations in Flowlet. + :type transformations: list[~azure.mgmt.datafactory.models.Transformation] + :param script: Flowlet script. + :type script: str + :param script_lines: Flowlet script lines. + :type script_lines: list[str] + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'annotations': {'key': 'annotations', 'type': '[object]'}, + 'folder': {'key': 'folder', 'type': 'DataFlowFolder'}, + 'sources': {'key': 'typeProperties.sources', 'type': '[DataFlowSource]'}, + 'sinks': {'key': 'typeProperties.sinks', 'type': '[DataFlowSink]'}, + 'transformations': {'key': 'typeProperties.transformations', 'type': '[Transformation]'}, + 'script': {'key': 'typeProperties.script', 'type': 'str'}, + 'script_lines': {'key': 'typeProperties.scriptLines', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(Flowlet, self).__init__(**kwargs) + self.type = 'Flowlet' # type: str + self.sources = kwargs.get('sources', None) + self.sinks = kwargs.get('sinks', None) + self.transformations = kwargs.get('transformations', None) + self.script = kwargs.get('script', None) + self.script_lines = kwargs.get('script_lines', None) + + class ForEachActivity(ControlActivity): """This activity is used for iterating over a collection and execute given activities. @@ -22502,6 +22373,8 @@ class MappingDataFlow(DataFlow): :type transformations: list[~azure.mgmt.datafactory.models.Transformation] :param script: DataFlow script. :type script: str + :param script_lines: Data flow script lines. + :type script_lines: list[str] """ _validation = { @@ -22517,6 +22390,7 @@ class MappingDataFlow(DataFlow): 'sinks': {'key': 'typeProperties.sinks', 'type': '[DataFlowSink]'}, 'transformations': {'key': 'typeProperties.transformations', 'type': '[Transformation]'}, 'script': {'key': 'typeProperties.script', 'type': 'str'}, + 'script_lines': {'key': 'typeProperties.scriptLines', 'type': '[str]'}, } def __init__( @@ -22529,6 +22403,7 @@ def __init__( self.sinks = kwargs.get('sinks', None) self.transformations = kwargs.get('transformations', None) self.script = kwargs.get('script', None) + self.script_lines = kwargs.get('script_lines', None) class MariaDBLinkedService(LinkedService): @@ -27506,6 +27381,8 @@ class PowerQuerySink(DataFlowSink): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference :param dataset: Dataset reference. :type dataset: ~azure.mgmt.datafactory.models.DatasetReference :param linked_service: Linked service reference. @@ -27523,6 +27400,7 @@ class PowerQuerySink(DataFlowSink): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, 'dataset': {'key': 'dataset', 'type': 'DatasetReference'}, 'linked_service': {'key': 'linkedService', 'type': 'LinkedServiceReference'}, 'schema_linked_service': {'key': 'schemaLinkedService', 'type': 'LinkedServiceReference'}, @@ -27569,6 +27447,8 @@ class PowerQuerySource(DataFlowSource): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference :param dataset: Dataset reference. :type dataset: ~azure.mgmt.datafactory.models.DatasetReference :param linked_service: Linked service reference. @@ -27586,6 +27466,7 @@ class PowerQuerySource(DataFlowSource): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, 'dataset': {'key': 'dataset', 'type': 'DatasetReference'}, 'linked_service': {'key': 'linkedService', 'type': 'LinkedServiceReference'}, 'schema_linked_service': {'key': 'schemaLinkedService', 'type': 'LinkedServiceReference'}, diff --git a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models_py3.py b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models_py3.py index 963145597ea9..24d58ed22fcf 100644 --- a/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models_py3.py +++ b/sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/models/_models_py3.py @@ -12696,7 +12696,7 @@ class DataFlow(msrest.serialization.Model): """Azure Data Factory nested object which contains a flow with data movements and transformations. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MappingDataFlow, WranglingDataFlow. + sub-classes are: Flowlet, MappingDataFlow, WranglingDataFlow. All required parameters must be populated in order to send to Azure. @@ -12723,7 +12723,7 @@ class DataFlow(msrest.serialization.Model): } _subtype_map = { - 'type': {'MappingDataFlow': 'MappingDataFlow', 'WranglingDataFlow': 'WranglingDataFlow'} + 'type': {'Flowlet': 'Flowlet', 'MappingDataFlow': 'MappingDataFlow', 'WranglingDataFlow': 'WranglingDataFlow'} } def __init__( @@ -12851,6 +12851,8 @@ class DataFlowDebugPackage(msrest.serialization.Model): :type session_id: str :param data_flow: Data flow instance. :type data_flow: ~azure.mgmt.datafactory.models.DataFlowDebugResource + :param data_flows: List of Data flows. + :type data_flows: list[~azure.mgmt.datafactory.models.DataFlowDebugResource] :param datasets: List of datasets. :type datasets: list[~azure.mgmt.datafactory.models.DatasetDebugResource] :param linked_services: List of linked services. @@ -12865,6 +12867,7 @@ class DataFlowDebugPackage(msrest.serialization.Model): 'additional_properties': {'key': '', 'type': '{object}'}, 'session_id': {'key': 'sessionId', 'type': 'str'}, 'data_flow': {'key': 'dataFlow', 'type': 'DataFlowDebugResource'}, + 'data_flows': {'key': 'dataFlows', 'type': '[DataFlowDebugResource]'}, 'datasets': {'key': 'datasets', 'type': '[DatasetDebugResource]'}, 'linked_services': {'key': 'linkedServices', 'type': '[LinkedServiceDebugResource]'}, 'staging': {'key': 'staging', 'type': 'DataFlowStagingInfo'}, @@ -12877,6 +12880,7 @@ def __init__( additional_properties: Optional[Dict[str, Any]] = None, session_id: Optional[str] = None, data_flow: Optional["DataFlowDebugResource"] = None, + data_flows: Optional[List["DataFlowDebugResource"]] = None, datasets: Optional[List["DatasetDebugResource"]] = None, linked_services: Optional[List["LinkedServiceDebugResource"]] = None, staging: Optional["DataFlowStagingInfo"] = None, @@ -12887,6 +12891,7 @@ def __init__( self.additional_properties = additional_properties self.session_id = session_id self.data_flow = data_flow + self.data_flows = data_flows self.datasets = datasets self.linked_services = linked_services self.staging = staging @@ -13196,6 +13201,8 @@ class Transformation(msrest.serialization.Model): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference """ _validation = { @@ -13205,6 +13212,7 @@ class Transformation(msrest.serialization.Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, } def __init__( @@ -13212,11 +13220,13 @@ def __init__( *, name: str, description: Optional[str] = None, + flowlet: Optional["DataFlowReference"] = None, **kwargs ): super(Transformation, self).__init__(**kwargs) self.name = name self.description = description + self.flowlet = flowlet class DataFlowSink(Transformation): @@ -13228,6 +13238,8 @@ class DataFlowSink(Transformation): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference :param dataset: Dataset reference. :type dataset: ~azure.mgmt.datafactory.models.DatasetReference :param linked_service: Linked service reference. @@ -13243,6 +13255,7 @@ class DataFlowSink(Transformation): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, 'dataset': {'key': 'dataset', 'type': 'DatasetReference'}, 'linked_service': {'key': 'linkedService', 'type': 'LinkedServiceReference'}, 'schema_linked_service': {'key': 'schemaLinkedService', 'type': 'LinkedServiceReference'}, @@ -13253,12 +13266,13 @@ def __init__( *, name: str, description: Optional[str] = None, + flowlet: Optional["DataFlowReference"] = None, dataset: Optional["DatasetReference"] = None, linked_service: Optional["LinkedServiceReference"] = None, schema_linked_service: Optional["LinkedServiceReference"] = None, **kwargs ): - super(DataFlowSink, self).__init__(name=name, description=description, **kwargs) + super(DataFlowSink, self).__init__(name=name, description=description, flowlet=flowlet, **kwargs) self.dataset = dataset self.linked_service = linked_service self.schema_linked_service = schema_linked_service @@ -13273,6 +13287,8 @@ class DataFlowSource(Transformation): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference :param dataset: Dataset reference. :type dataset: ~azure.mgmt.datafactory.models.DatasetReference :param linked_service: Linked service reference. @@ -13288,6 +13304,7 @@ class DataFlowSource(Transformation): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, 'dataset': {'key': 'dataset', 'type': 'DatasetReference'}, 'linked_service': {'key': 'linkedService', 'type': 'LinkedServiceReference'}, 'schema_linked_service': {'key': 'schemaLinkedService', 'type': 'LinkedServiceReference'}, @@ -13298,12 +13315,13 @@ def __init__( *, name: str, description: Optional[str] = None, + flowlet: Optional["DataFlowReference"] = None, dataset: Optional["DatasetReference"] = None, linked_service: Optional["LinkedServiceReference"] = None, schema_linked_service: Optional["LinkedServiceReference"] = None, **kwargs ): - super(DataFlowSource, self).__init__(name=name, description=description, **kwargs) + super(DataFlowSource, self).__init__(name=name, description=description, flowlet=flowlet, **kwargs) self.dataset = dataset self.linked_service = linked_service self.schema_linked_service = schema_linked_service @@ -13470,17 +13488,17 @@ def __init__( class DatasetCompression(msrest.serialization.Model): """The compression method used on a dataset. - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DatasetBZip2Compression, DatasetDeflateCompression, DatasetGZipCompression, DatasetTarCompression, DatasetTarGZipCompression, DatasetZipDeflateCompression. - All required parameters must be populated in order to send to Azure. :param additional_properties: Unmatched properties from the message are deserialized to this collection. :type additional_properties: dict[str, any] :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. + string). :type type: any + :param level: The dataset compression level. Type: string (or Expression with resultType + string). + :type level: any """ _validation = { @@ -13490,53 +13508,21 @@ class DatasetCompression(msrest.serialization.Model): _attribute_map = { 'additional_properties': {'key': '', 'type': '{object}'}, 'type': {'key': 'type', 'type': 'object'}, - } - - _subtype_map = { - 'type': {'BZip2': 'DatasetBZip2Compression', 'Deflate': 'DatasetDeflateCompression', 'GZip': 'DatasetGZipCompression', 'Tar': 'DatasetTarCompression', 'TarGZip': 'DatasetTarGZipCompression', 'ZipDeflate': 'DatasetZipDeflateCompression'} + 'level': {'key': 'level', 'type': 'object'}, } def __init__( self, *, + type: Any, additional_properties: Optional[Dict[str, Any]] = None, + level: Optional[Any] = None, **kwargs ): super(DatasetCompression, self).__init__(**kwargs) self.additional_properties = additional_properties - self.type = 'DatasetCompression' # type: str - - -class DatasetBZip2Compression(DatasetCompression): - """The BZip2 compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - **kwargs - ): - super(DatasetBZip2Compression, self).__init__(additional_properties=additional_properties, **kwargs) - self.type = 'BZip2' # type: str + self.type = type + self.level = level class DatasetDataElement(msrest.serialization.Model): @@ -13596,43 +13582,6 @@ def __init__( self.properties = properties -class DatasetDeflateCompression(DatasetCompression): - """The Deflate compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - :param level: The Deflate compression level. - :type level: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - 'level': {'key': 'level', 'type': 'object'}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - level: Optional[Any] = None, - **kwargs - ): - super(DatasetDeflateCompression, self).__init__(additional_properties=additional_properties, **kwargs) - self.type = 'Deflate' # type: str - self.level = level - - class DatasetFolder(msrest.serialization.Model): """The folder that this Dataset is in. If not specified, Dataset will appear at the root level. @@ -13654,43 +13603,6 @@ def __init__( self.name = name -class DatasetGZipCompression(DatasetCompression): - """The GZip compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - :param level: The GZip compression level. - :type level: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - 'level': {'key': 'level', 'type': 'object'}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - level: Optional[Any] = None, - **kwargs - ): - super(DatasetGZipCompression, self).__init__(additional_properties=additional_properties, **kwargs) - self.type = 'GZip' # type: str - self.level = level - - class DatasetListResponse(msrest.serialization.Model): """A list of dataset resources. @@ -13840,112 +13752,6 @@ def __init__( self.type = type -class DatasetTarCompression(DatasetCompression): - """The Tar archive method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - **kwargs - ): - super(DatasetTarCompression, self).__init__(additional_properties=additional_properties, **kwargs) - self.type = 'Tar' # type: str - - -class DatasetTarGZipCompression(DatasetCompression): - """The TarGZip compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - :param level: The TarGZip compression level. - :type level: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - 'level': {'key': 'level', 'type': 'object'}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - level: Optional[Any] = None, - **kwargs - ): - super(DatasetTarGZipCompression, self).__init__(additional_properties=additional_properties, **kwargs) - self.type = 'TarGZip' # type: str - self.level = level - - -class DatasetZipDeflateCompression(DatasetCompression): - """The ZipDeflate compression method used on a dataset. - - All required parameters must be populated in order to send to Azure. - - :param additional_properties: Unmatched properties from the message are deserialized to this - collection. - :type additional_properties: dict[str, any] - :param type: Required. Type of dataset compression. Type: string (or Expression with resultType - string).Constant filled by server. - :type type: any - :param level: The ZipDeflate compression level. - :type level: any - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'type': {'key': 'type', 'type': 'object'}, - 'level': {'key': 'level', 'type': 'object'}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - level: Optional[Any] = None, - **kwargs - ): - super(DatasetZipDeflateCompression, self).__init__(additional_properties=additional_properties, **kwargs) - self.type = 'ZipDeflate' # type: str - self.level = level - - class Db2LinkedService(LinkedService): """Linked service for DB2 data source. @@ -18359,6 +18165,70 @@ def __init__( self.condition = condition +class Flowlet(DataFlow): + """Data flow flowlet. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Type of data flow.Constant filled by server. + :type type: str + :param description: The description of the data flow. + :type description: str + :param annotations: List of tags that can be used for describing the data flow. + :type annotations: list[any] + :param folder: The folder that this data flow is in. If not specified, Data flow will appear at + the root level. + :type folder: ~azure.mgmt.datafactory.models.DataFlowFolder + :param sources: List of sources in Flowlet. + :type sources: list[~azure.mgmt.datafactory.models.DataFlowSource] + :param sinks: List of sinks in Flowlet. + :type sinks: list[~azure.mgmt.datafactory.models.DataFlowSink] + :param transformations: List of transformations in Flowlet. + :type transformations: list[~azure.mgmt.datafactory.models.Transformation] + :param script: Flowlet script. + :type script: str + :param script_lines: Flowlet script lines. + :type script_lines: list[str] + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'annotations': {'key': 'annotations', 'type': '[object]'}, + 'folder': {'key': 'folder', 'type': 'DataFlowFolder'}, + 'sources': {'key': 'typeProperties.sources', 'type': '[DataFlowSource]'}, + 'sinks': {'key': 'typeProperties.sinks', 'type': '[DataFlowSink]'}, + 'transformations': {'key': 'typeProperties.transformations', 'type': '[Transformation]'}, + 'script': {'key': 'typeProperties.script', 'type': 'str'}, + 'script_lines': {'key': 'typeProperties.scriptLines', 'type': '[str]'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + annotations: Optional[List[Any]] = None, + folder: Optional["DataFlowFolder"] = None, + sources: Optional[List["DataFlowSource"]] = None, + sinks: Optional[List["DataFlowSink"]] = None, + transformations: Optional[List["Transformation"]] = None, + script: Optional[str] = None, + script_lines: Optional[List[str]] = None, + **kwargs + ): + super(Flowlet, self).__init__(description=description, annotations=annotations, folder=folder, **kwargs) + self.type = 'Flowlet' # type: str + self.sources = sources + self.sinks = sinks + self.transformations = transformations + self.script = script + self.script_lines = script_lines + + class ForEachActivity(ControlActivity): """This activity is used for iterating over a collection and execute given activities. @@ -25763,6 +25633,8 @@ class MappingDataFlow(DataFlow): :type transformations: list[~azure.mgmt.datafactory.models.Transformation] :param script: DataFlow script. :type script: str + :param script_lines: Data flow script lines. + :type script_lines: list[str] """ _validation = { @@ -25778,6 +25650,7 @@ class MappingDataFlow(DataFlow): 'sinks': {'key': 'typeProperties.sinks', 'type': '[DataFlowSink]'}, 'transformations': {'key': 'typeProperties.transformations', 'type': '[Transformation]'}, 'script': {'key': 'typeProperties.script', 'type': 'str'}, + 'script_lines': {'key': 'typeProperties.scriptLines', 'type': '[str]'}, } def __init__( @@ -25790,6 +25663,7 @@ def __init__( sinks: Optional[List["DataFlowSink"]] = None, transformations: Optional[List["Transformation"]] = None, script: Optional[str] = None, + script_lines: Optional[List[str]] = None, **kwargs ): super(MappingDataFlow, self).__init__(description=description, annotations=annotations, folder=folder, **kwargs) @@ -25798,6 +25672,7 @@ def __init__( self.sinks = sinks self.transformations = transformations self.script = script + self.script_lines = script_lines class MariaDBLinkedService(LinkedService): @@ -31531,6 +31406,8 @@ class PowerQuerySink(DataFlowSink): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference :param dataset: Dataset reference. :type dataset: ~azure.mgmt.datafactory.models.DatasetReference :param linked_service: Linked service reference. @@ -31548,6 +31425,7 @@ class PowerQuerySink(DataFlowSink): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, 'dataset': {'key': 'dataset', 'type': 'DatasetReference'}, 'linked_service': {'key': 'linkedService', 'type': 'LinkedServiceReference'}, 'schema_linked_service': {'key': 'schemaLinkedService', 'type': 'LinkedServiceReference'}, @@ -31559,13 +31437,14 @@ def __init__( *, name: str, description: Optional[str] = None, + flowlet: Optional["DataFlowReference"] = None, dataset: Optional["DatasetReference"] = None, linked_service: Optional["LinkedServiceReference"] = None, schema_linked_service: Optional["LinkedServiceReference"] = None, script: Optional[str] = None, **kwargs ): - super(PowerQuerySink, self).__init__(name=name, description=description, dataset=dataset, linked_service=linked_service, schema_linked_service=schema_linked_service, **kwargs) + super(PowerQuerySink, self).__init__(name=name, description=description, flowlet=flowlet, dataset=dataset, linked_service=linked_service, schema_linked_service=schema_linked_service, **kwargs) self.script = script @@ -31604,6 +31483,8 @@ class PowerQuerySource(DataFlowSource): :type name: str :param description: Transformation description. :type description: str + :param flowlet: Flowlet Reference. + :type flowlet: ~azure.mgmt.datafactory.models.DataFlowReference :param dataset: Dataset reference. :type dataset: ~azure.mgmt.datafactory.models.DatasetReference :param linked_service: Linked service reference. @@ -31621,6 +31502,7 @@ class PowerQuerySource(DataFlowSource): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, + 'flowlet': {'key': 'flowlet', 'type': 'DataFlowReference'}, 'dataset': {'key': 'dataset', 'type': 'DatasetReference'}, 'linked_service': {'key': 'linkedService', 'type': 'LinkedServiceReference'}, 'schema_linked_service': {'key': 'schemaLinkedService', 'type': 'LinkedServiceReference'}, @@ -31632,13 +31514,14 @@ def __init__( *, name: str, description: Optional[str] = None, + flowlet: Optional["DataFlowReference"] = None, dataset: Optional["DatasetReference"] = None, linked_service: Optional["LinkedServiceReference"] = None, schema_linked_service: Optional["LinkedServiceReference"] = None, script: Optional[str] = None, **kwargs ): - super(PowerQuerySource, self).__init__(name=name, description=description, dataset=dataset, linked_service=linked_service, schema_linked_service=schema_linked_service, **kwargs) + super(PowerQuerySource, self).__init__(name=name, description=description, flowlet=flowlet, dataset=dataset, linked_service=linked_service, schema_linked_service=schema_linked_service, **kwargs) self.script = script