diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureDatabricksLinkedService.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureDatabricksLinkedService.cs
index 1ac3e9ad74f8..76f63b301128 100644
--- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureDatabricksLinkedService.cs
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureDatabricksLinkedService.cs
@@ -83,6 +83,9 @@ public AzureDatabricksLinkedService()
/// Additional tags for cluster
/// resources. This property is ignored in instance pool
/// configurations.
+ /// Specify a location to
+ /// deliver Spark driver, worker, and event logs. Type: string (or
+ /// Expression with resultType string).
/// The driver node type for the
/// new job cluster. This property is ignored in instance pool
/// configurations. Type: string (or Expression with resultType
@@ -99,7 +102,7 @@ public AzureDatabricksLinkedService()
/// authentication. Credentials are encrypted using the integration
/// runtime credential manager. Type: string (or Expression with
/// resultType string).
- public AzureDatabricksLinkedService(object domain, SecretBase accessToken, IDictionary additionalProperties = default(IDictionary), IntegrationRuntimeReference connectVia = default(IntegrationRuntimeReference), string description = default(string), IDictionary parameters = default(IDictionary), IList annotations = default(IList), object existingClusterId = default(object), object instancePoolId = default(object), object newClusterVersion = default(object), object newClusterNumOfWorker = default(object), object newClusterNodeType = default(object), IDictionary newClusterSparkConf = default(IDictionary), IDictionary newClusterSparkEnvVars = default(IDictionary), IDictionary newClusterCustomTags = default(IDictionary), object newClusterDriverNodeType = default(object), object newClusterInitScripts = default(object), object newClusterEnableElasticDisk = default(object), object encryptedCredential = default(object))
+ public AzureDatabricksLinkedService(object domain, SecretBase accessToken, IDictionary additionalProperties = default(IDictionary), IntegrationRuntimeReference connectVia = default(IntegrationRuntimeReference), string description = default(string), IDictionary parameters = default(IDictionary), IList annotations = default(IList), object existingClusterId = default(object), object instancePoolId = default(object), object newClusterVersion = default(object), object newClusterNumOfWorker = default(object), object newClusterNodeType = default(object), IDictionary newClusterSparkConf = default(IDictionary), IDictionary newClusterSparkEnvVars = default(IDictionary), IDictionary newClusterCustomTags = default(IDictionary), object newClusterLogDestination = default(object), object newClusterDriverNodeType = default(object), object newClusterInitScripts = default(object), object newClusterEnableElasticDisk = default(object), object encryptedCredential = default(object))
: base(additionalProperties, connectVia, description, parameters, annotations)
{
Domain = domain;
@@ -112,6 +115,7 @@ public AzureDatabricksLinkedService()
NewClusterSparkConf = newClusterSparkConf;
NewClusterSparkEnvVars = newClusterSparkEnvVars;
NewClusterCustomTags = newClusterCustomTags;
+ NewClusterLogDestination = newClusterLogDestination;
NewClusterDriverNodeType = newClusterDriverNodeType;
NewClusterInitScripts = newClusterInitScripts;
NewClusterEnableElasticDisk = newClusterEnableElasticDisk;
@@ -209,6 +213,14 @@ public AzureDatabricksLinkedService()
[JsonProperty(PropertyName = "typeProperties.newClusterCustomTags")]
public IDictionary NewClusterCustomTags { get; set; }
+ ///
+ /// Gets or sets specify a location to deliver Spark driver, worker,
+ /// and event logs. Type: string (or Expression with resultType
+ /// string).
+ ///
+ [JsonProperty(PropertyName = "typeProperties.newClusterLogDestination")]
+ public object NewClusterLogDestination { get; set; }
+
///
/// Gets or sets the driver node type for the new job cluster. This
/// property is ignored in instance pool configurations. Type: string
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureSqlSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureSqlSource.cs
index 7efd3dd7e98a..f0f16e36471c 100644
--- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureSqlSource.cs
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/AzureSqlSource.cs
@@ -58,13 +58,20 @@ public AzureSqlSource()
/// type: "int"}}".
/// Which additional types to
/// produce.
- public AzureSqlSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), IDictionary storedProcedureParameters = default(IDictionary), object produceAdditionalTypes = default(object))
+ /// The partition mechanism that will be
+ /// used for Sql read in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ /// The settings that will be leveraged
+ /// for Sql source partitioning.
+ public AzureSqlSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), IDictionary storedProcedureParameters = default(IDictionary), object produceAdditionalTypes = default(object), string partitionOption = default(string), SqlPartitionSettings partitionSettings = default(SqlPartitionSettings))
: base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections, queryTimeout, additionalColumns)
{
SqlReaderQuery = sqlReaderQuery;
SqlReaderStoredProcedureName = sqlReaderStoredProcedureName;
StoredProcedureParameters = storedProcedureParameters;
ProduceAdditionalTypes = produceAdditionalTypes;
+ PartitionOption = partitionOption;
+ PartitionSettings = partitionSettings;
CustomInit();
}
@@ -101,5 +108,20 @@ public AzureSqlSource()
[JsonProperty(PropertyName = "produceAdditionalTypes")]
public object ProduceAdditionalTypes { get; set; }
+ ///
+ /// Gets or sets the partition mechanism that will be used for Sql read
+ /// in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ ///
+ [JsonProperty(PropertyName = "partitionOption")]
+ public string PartitionOption { get; set; }
+
+ ///
+ /// Gets or sets the settings that will be leveraged for Sql source
+ /// partitioning.
+ ///
+ [JsonProperty(PropertyName = "partitionSettings")]
+ public SqlPartitionSettings PartitionSettings { get; set; }
+
}
}
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/DataFlowStagingInfo.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/DataFlowStagingInfo.cs
index b77877ea33a0..67e7c7e5fd5d 100644
--- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/DataFlowStagingInfo.cs
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/DataFlowStagingInfo.cs
@@ -31,8 +31,9 @@ public DataFlowStagingInfo()
///
/// Staging linked service
/// reference.
- /// Folder path for staging blob.
- public DataFlowStagingInfo(LinkedServiceReference linkedService = default(LinkedServiceReference), string folderPath = default(string))
+ /// Folder path for staging blob. Type: string
+ /// (or Expression with resultType string)
+ public DataFlowStagingInfo(LinkedServiceReference linkedService = default(LinkedServiceReference), object folderPath = default(object))
{
LinkedService = linkedService;
FolderPath = folderPath;
@@ -51,10 +52,11 @@ public DataFlowStagingInfo()
public LinkedServiceReference LinkedService { get; set; }
///
- /// Gets or sets folder path for staging blob.
+ /// Gets or sets folder path for staging blob. Type: string (or
+ /// Expression with resultType string)
///
[JsonProperty(PropertyName = "folderPath")]
- public string FolderPath { get; set; }
+ public object FolderPath { get; set; }
///
/// Validate the object.
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SsisPackageLocation.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SSISPackageLocation.cs
similarity index 100%
rename from sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SsisPackageLocation.cs
rename to sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SSISPackageLocation.cs
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapOpenHubLinkedService.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapOpenHubLinkedService.cs
index 4b53d397b7a6..9d570f5a6737 100644
--- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapOpenHubLinkedService.cs
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SapOpenHubLinkedService.cs
@@ -35,6 +35,13 @@ public SapOpenHubLinkedService()
///
/// Initializes a new instance of the SapOpenHubLinkedService class.
///
+ /// Unmatched properties from the
+ /// message are deserialized this collection
+ /// The integration runtime reference.
+ /// Linked service description.
+ /// Parameters for linked service.
+ /// List of tags that can be used for
+ /// describing the linked service.
/// Host name of the SAP BW instance where the
/// open hub destination is located. Type: string (or Expression with
/// resultType string).
@@ -46,34 +53,41 @@ public SapOpenHubLinkedService()
/// where the open hub destination is located. (Usually a three-digit
/// decimal number represented as a string) Type: string (or Expression
/// with resultType string).
- /// Unmatched properties from the
- /// message are deserialized this collection
- /// The integration runtime reference.
- /// Linked service description.
- /// Parameters for linked service.
- /// List of tags that can be used for
- /// describing the linked service.
/// Language of the BW system where the open hub
/// destination is located. The default value is EN. Type: string (or
/// Expression with resultType string).
+ /// SystemID of the SAP system where the table
+ /// is located. Type: string (or Expression with resultType
+ /// string).
/// Username to access the SAP BW server where
/// the open hub destination is located. Type: string (or Expression
/// with resultType string).
/// Password to access the SAP BW server where
/// the open hub destination is located.
+ /// The hostname of the SAP Message Server.
+ /// Type: string (or Expression with resultType string).
+ /// The service name or port number
+ /// of the Message Server. Type: string (or Expression with resultType
+ /// string).
+ /// The Logon Group for the SAP System. Type:
+ /// string (or Expression with resultType string).
/// The encrypted credential used for
/// authentication. Credentials are encrypted using the integration
/// runtime credential manager. Type: string (or Expression with
/// resultType string).
- public SapOpenHubLinkedService(object server, object systemNumber, object clientId, IDictionary additionalProperties = default(IDictionary), IntegrationRuntimeReference connectVia = default(IntegrationRuntimeReference), string description = default(string), IDictionary parameters = default(IDictionary), IList annotations = default(IList), object language = default(object), object userName = default(object), SecretBase password = default(SecretBase), object encryptedCredential = default(object))
+ public SapOpenHubLinkedService(IDictionary additionalProperties = default(IDictionary), IntegrationRuntimeReference connectVia = default(IntegrationRuntimeReference), string description = default(string), IDictionary parameters = default(IDictionary), IList annotations = default(IList), object server = default(object), object systemNumber = default(object), object clientId = default(object), object language = default(object), object systemId = default(object), object userName = default(object), SecretBase password = default(SecretBase), object messageServer = default(object), object messageServerService = default(object), object logonGroup = default(object), object encryptedCredential = default(object))
: base(additionalProperties, connectVia, description, parameters, annotations)
{
Server = server;
SystemNumber = systemNumber;
ClientId = clientId;
Language = language;
+ SystemId = systemId;
UserName = userName;
Password = password;
+ MessageServer = messageServer;
+ MessageServerService = messageServerService;
+ LogonGroup = logonGroup;
EncryptedCredential = encryptedCredential;
CustomInit();
}
@@ -117,6 +131,13 @@ public SapOpenHubLinkedService()
[JsonProperty(PropertyName = "typeProperties.language")]
public object Language { get; set; }
+ ///
+ /// Gets or sets systemID of the SAP system where the table is located.
+ /// Type: string (or Expression with resultType string).
+ ///
+ [JsonProperty(PropertyName = "typeProperties.systemId")]
+ public object SystemId { get; set; }
+
///
/// Gets or sets username to access the SAP BW server where the open
/// hub destination is located. Type: string (or Expression with
@@ -132,6 +153,27 @@ public SapOpenHubLinkedService()
[JsonProperty(PropertyName = "typeProperties.password")]
public SecretBase Password { get; set; }
+ ///
+ /// Gets or sets the hostname of the SAP Message Server. Type: string
+ /// (or Expression with resultType string).
+ ///
+ [JsonProperty(PropertyName = "typeProperties.messageServer")]
+ public object MessageServer { get; set; }
+
+ ///
+ /// Gets or sets the service name or port number of the Message Server.
+ /// Type: string (or Expression with resultType string).
+ ///
+ [JsonProperty(PropertyName = "typeProperties.messageServerService")]
+ public object MessageServerService { get; set; }
+
+ ///
+ /// Gets or sets the Logon Group for the SAP System. Type: string (or
+ /// Expression with resultType string).
+ ///
+ [JsonProperty(PropertyName = "typeProperties.logonGroup")]
+ public object LogonGroup { get; set; }
+
///
/// Gets or sets the encrypted credential used for authentication.
/// Credentials are encrypted using the integration runtime credential
@@ -149,18 +191,6 @@ public SapOpenHubLinkedService()
public override void Validate()
{
base.Validate();
- if (Server == null)
- {
- throw new ValidationException(ValidationRules.CannotBeNull, "Server");
- }
- if (SystemNumber == null)
- {
- throw new ValidationException(ValidationRules.CannotBeNull, "SystemNumber");
- }
- if (ClientId == null)
- {
- throw new ValidationException(ValidationRules.CannotBeNull, "ClientId");
- }
}
}
}
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlDWSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlDWSource.cs
index 2a3fd9dba735..118d41686a6c 100644
--- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlDWSource.cs
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlDWSource.cs
@@ -57,12 +57,19 @@ public SqlDWSource()
/// stored procedure parameters. Example: "{Parameter1: {value: "1",
/// type: "int"}}". Type: object (or Expression with resultType
/// object), itemType: StoredProcedureParameter.
- public SqlDWSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), object storedProcedureParameters = default(object))
+ /// The partition mechanism that will be
+ /// used for Sql read in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ /// The settings that will be leveraged
+ /// for Sql source partitioning.
+ public SqlDWSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), object storedProcedureParameters = default(object), string partitionOption = default(string), SqlPartitionSettings partitionSettings = default(SqlPartitionSettings))
: base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections, queryTimeout, additionalColumns)
{
SqlReaderQuery = sqlReaderQuery;
SqlReaderStoredProcedureName = sqlReaderStoredProcedureName;
StoredProcedureParameters = storedProcedureParameters;
+ PartitionOption = partitionOption;
+ PartitionSettings = partitionSettings;
CustomInit();
}
@@ -95,5 +102,20 @@ public SqlDWSource()
[JsonProperty(PropertyName = "storedProcedureParameters")]
public object StoredProcedureParameters { get; set; }
+ ///
+ /// Gets or sets the partition mechanism that will be used for Sql read
+ /// in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ ///
+ [JsonProperty(PropertyName = "partitionOption")]
+ public string PartitionOption { get; set; }
+
+ ///
+ /// Gets or sets the settings that will be leveraged for Sql source
+ /// partitioning.
+ ///
+ [JsonProperty(PropertyName = "partitionSettings")]
+ public SqlPartitionSettings PartitionSettings { get; set; }
+
}
}
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlMISource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlMISource.cs
index 96d8c313fa39..9ce8caf7515c 100644
--- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlMISource.cs
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlMISource.cs
@@ -58,13 +58,20 @@ public SqlMISource()
/// type: "int"}}".
/// Which additional types to
/// produce.
- public SqlMISource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), IDictionary storedProcedureParameters = default(IDictionary), object produceAdditionalTypes = default(object))
+ /// The partition mechanism that will be
+ /// used for Sql read in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ /// The settings that will be leveraged
+ /// for Sql source partitioning.
+ public SqlMISource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), IDictionary storedProcedureParameters = default(IDictionary), object produceAdditionalTypes = default(object), string partitionOption = default(string), SqlPartitionSettings partitionSettings = default(SqlPartitionSettings))
: base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections, queryTimeout, additionalColumns)
{
SqlReaderQuery = sqlReaderQuery;
SqlReaderStoredProcedureName = sqlReaderStoredProcedureName;
StoredProcedureParameters = storedProcedureParameters;
ProduceAdditionalTypes = produceAdditionalTypes;
+ PartitionOption = partitionOption;
+ PartitionSettings = partitionSettings;
CustomInit();
}
@@ -102,5 +109,20 @@ public SqlMISource()
[JsonProperty(PropertyName = "produceAdditionalTypes")]
public object ProduceAdditionalTypes { get; set; }
+ ///
+ /// Gets or sets the partition mechanism that will be used for Sql read
+ /// in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ ///
+ [JsonProperty(PropertyName = "partitionOption")]
+ public string PartitionOption { get; set; }
+
+ ///
+ /// Gets or sets the settings that will be leveraged for Sql source
+ /// partitioning.
+ ///
+ [JsonProperty(PropertyName = "partitionSettings")]
+ public SqlPartitionSettings PartitionSettings { get; set; }
+
}
}
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlPartitionOption.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlPartitionOption.cs
new file mode 100644
index 000000000000..f65428093808
--- /dev/null
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlPartitionOption.cs
@@ -0,0 +1,23 @@
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for
+// license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+//
+
+namespace Microsoft.Azure.Management.DataFactory.Models
+{
+
+ ///
+ /// Defines values for SqlPartitionOption.
+ ///
+ public static class SqlPartitionOption
+ {
+ public const string None = "None";
+ public const string PhysicalPartitionsOfTable = "PhysicalPartitionsOfTable";
+ public const string DynamicRange = "DynamicRange";
+ }
+}
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlPartitionSettings.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlPartitionSettings.cs
new file mode 100644
index 000000000000..f7f7d7b3be9b
--- /dev/null
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlPartitionSettings.cs
@@ -0,0 +1,93 @@
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for
+// license information.
+//
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is
+// regenerated.
+//
+
+namespace Microsoft.Azure.Management.DataFactory.Models
+{
+ using Newtonsoft.Json;
+ using System.Linq;
+
+ ///
+ /// The settings that will be leveraged for Sql source partitioning.
+ ///
+ public partial class SqlPartitionSettings
+ {
+ ///
+ /// Initializes a new instance of the SqlPartitionSettings class.
+ ///
+ public SqlPartitionSettings()
+ {
+ CustomInit();
+ }
+
+ ///
+ /// Initializes a new instance of the SqlPartitionSettings class.
+ ///
+ /// The name of the column in integer
+ /// or datetime type that will be used for proceeding partitioning. If
+ /// not specified, the primary key of the table is auto-detected and
+ /// used as the partition column. Type: string (or Expression with
+ /// resultType string).
+ /// The maximum value of the
+ /// partition column for partition range splitting. This value is used
+ /// to decide the partition stride, not for filtering the rows in
+ /// table. All rows in the table or query result will be partitioned
+ /// and copied. Type: string (or Expression with resultType
+ /// string).
+ /// The minimum value of the
+ /// partition column for partition range splitting. This value is used
+ /// to decide the partition stride, not for filtering the rows in
+ /// table. All rows in the table or query result will be partitioned
+ /// and copied. Type: string (or Expression with resultType
+ /// string).
+ public SqlPartitionSettings(object partitionColumnName = default(object), object partitionUpperBound = default(object), object partitionLowerBound = default(object))
+ {
+ PartitionColumnName = partitionColumnName;
+ PartitionUpperBound = partitionUpperBound;
+ PartitionLowerBound = partitionLowerBound;
+ CustomInit();
+ }
+
+ ///
+ /// An initialization method that performs custom operations like setting defaults
+ ///
+ partial void CustomInit();
+
+ ///
+ /// Gets or sets the name of the column in integer or datetime type
+ /// that will be used for proceeding partitioning. If not specified,
+ /// the primary key of the table is auto-detected and used as the
+ /// partition column. Type: string (or Expression with resultType
+ /// string).
+ ///
+ [JsonProperty(PropertyName = "partitionColumnName")]
+ public object PartitionColumnName { get; set; }
+
+ ///
+ /// Gets or sets the maximum value of the partition column for
+ /// partition range splitting. This value is used to decide the
+ /// partition stride, not for filtering the rows in table. All rows in
+ /// the table or query result will be partitioned and copied. Type:
+ /// string (or Expression with resultType string).
+ ///
+ [JsonProperty(PropertyName = "partitionUpperBound")]
+ public object PartitionUpperBound { get; set; }
+
+ ///
+ /// Gets or sets the minimum value of the partition column for
+ /// partition range splitting. This value is used to decide the
+ /// partition stride, not for filtering the rows in table. All rows in
+ /// the table or query result will be partitioned and copied. Type:
+ /// string (or Expression with resultType string).
+ ///
+ [JsonProperty(PropertyName = "partitionLowerBound")]
+ public object PartitionLowerBound { get; set; }
+
+ }
+}
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlServerSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlServerSource.cs
index c1cbb89d9202..fc60faec5577 100644
--- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlServerSource.cs
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlServerSource.cs
@@ -58,13 +58,20 @@ public SqlServerSource()
/// type: "int"}}".
/// Which additional types to
/// produce.
- public SqlServerSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), IDictionary storedProcedureParameters = default(IDictionary), object produceAdditionalTypes = default(object))
+ /// The partition mechanism that will be
+ /// used for Sql read in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ /// The settings that will be leveraged
+ /// for Sql source partitioning.
+ public SqlServerSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), IDictionary storedProcedureParameters = default(IDictionary), object produceAdditionalTypes = default(object), string partitionOption = default(string), SqlPartitionSettings partitionSettings = default(SqlPartitionSettings))
: base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections, queryTimeout, additionalColumns)
{
SqlReaderQuery = sqlReaderQuery;
SqlReaderStoredProcedureName = sqlReaderStoredProcedureName;
StoredProcedureParameters = storedProcedureParameters;
ProduceAdditionalTypes = produceAdditionalTypes;
+ PartitionOption = partitionOption;
+ PartitionSettings = partitionSettings;
CustomInit();
}
@@ -101,5 +108,20 @@ public SqlServerSource()
[JsonProperty(PropertyName = "produceAdditionalTypes")]
public object ProduceAdditionalTypes { get; set; }
+ ///
+ /// Gets or sets the partition mechanism that will be used for Sql read
+ /// in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ ///
+ [JsonProperty(PropertyName = "partitionOption")]
+ public string PartitionOption { get; set; }
+
+ ///
+ /// Gets or sets the settings that will be leveraged for Sql source
+ /// partitioning.
+ ///
+ [JsonProperty(PropertyName = "partitionSettings")]
+ public SqlPartitionSettings PartitionSettings { get; set; }
+
}
}
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlSource.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlSource.cs
index 7fab6677c3f0..3edbbb834988 100644
--- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlSource.cs
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/Models/SqlSource.cs
@@ -61,13 +61,20 @@ public SqlSource()
/// ReadCommitted/ReadUncommitted/RepeatableRead/Serializable/Snapshot.
/// The default value is ReadCommitted. Type: string (or Expression
/// with resultType string).
- public SqlSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), IDictionary storedProcedureParameters = default(IDictionary), object isolationLevel = default(object))
+ /// The partition mechanism that will be
+ /// used for Sql read in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ /// The settings that will be leveraged
+ /// for Sql source partitioning.
+ public SqlSource(IDictionary additionalProperties = default(IDictionary), object sourceRetryCount = default(object), object sourceRetryWait = default(object), object maxConcurrentConnections = default(object), object queryTimeout = default(object), IList additionalColumns = default(IList), object sqlReaderQuery = default(object), object sqlReaderStoredProcedureName = default(object), IDictionary storedProcedureParameters = default(IDictionary), object isolationLevel = default(object), string partitionOption = default(string), SqlPartitionSettings partitionSettings = default(SqlPartitionSettings))
: base(additionalProperties, sourceRetryCount, sourceRetryWait, maxConcurrentConnections, queryTimeout, additionalColumns)
{
SqlReaderQuery = sqlReaderQuery;
SqlReaderStoredProcedureName = sqlReaderStoredProcedureName;
StoredProcedureParameters = storedProcedureParameters;
IsolationLevel = isolationLevel;
+ PartitionOption = partitionOption;
+ PartitionSettings = partitionSettings;
CustomInit();
}
@@ -108,5 +115,20 @@ public SqlSource()
[JsonProperty(PropertyName = "isolationLevel")]
public object IsolationLevel { get; set; }
+ ///
+ /// Gets or sets the partition mechanism that will be used for Sql read
+ /// in parallel. Possible values include: 'None',
+ /// 'PhysicalPartitionsOfTable', 'DynamicRange'
+ ///
+ [JsonProperty(PropertyName = "partitionOption")]
+ public string PartitionOption { get; set; }
+
+ ///
+ /// Gets or sets the settings that will be leveraged for Sql source
+ /// partitioning.
+ ///
+ [JsonProperty(PropertyName = "partitionSettings")]
+ public SqlPartitionSettings PartitionSettings { get; set; }
+
}
}
diff --git a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/SdkInfo_DataFactoryManagementClient.cs b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/SdkInfo_DataFactoryManagementClient.cs
index a1c453f7fb67..b31348b3961b 100644
--- a/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/SdkInfo_DataFactoryManagementClient.cs
+++ b/sdk/datafactory/Microsoft.Azure.Management.DataFactory/src/Generated/SdkInfo_DataFactoryManagementClient.cs
@@ -37,16 +37,5 @@ public static IEnumerable> ApiInfo_DataFactoryMana
}.AsEnumerable();
}
}
- // BEGIN: Code Generation Metadata Section
- public static readonly String AutoRestVersion = "v2";
- public static readonly String AutoRestBootStrapperVersion = "autorest@2.0.4413";
- public static readonly String AutoRestCmdExecuted = "cmd.exe /c autorest.cmd https://github.com/Azure/azure-rest-api-specs/blob/master/specification/datafactory/resource-manager/readme.md --csharp --version=v2 --reflect-api-versions --tag=package-2018-06 --csharp-sdks-folder=C:\\Users\\rizh\\Documents\\azure-sdk-for-net\\sdk";
- public static readonly String GithubForkName = "Azure";
- public static readonly String GithubBranchName = "master";
- public static readonly String GithubCommidId = "09ac2b33d780dbf2f74de2083012f62aaa33c451";
- public static readonly String CodeGenerationErrors = "";
- public static readonly String GithubRepoName = "azure-rest-api-specs";
- // END: Code Generation Metadata Section
}
}
-