diff --git a/RawOpsGenerated.swift b/RawOpsGenerated.swift index 5d725a4..deddb9f 100644 --- a/RawOpsGenerated.swift +++ b/RawOpsGenerated.swift @@ -16,8 +16,8 @@ public enum Raw { -static let generatedTensorFlowVersion = "1.12.0" -static let generatedTensorFlowGitVersion = "v1.12.0-0-ga6d8ffae09" +static let generatedTensorFlowVersion = "1.13.1" +static let generatedTensorFlowGitVersion = "v1.12.0-11444-g8b2884c9cb" // @_frozen // SR-9739 public enum A { @@ -71,7 +71,7 @@ public enum DataFormat1 { } // @_frozen // SR-9739 -public enum DataFormat3 { +public enum DataFormat4 { case nchw case nchwVectC case nhwc @@ -123,6 +123,25 @@ public enum Direction { } } +// @_frozen // SR-9739 +public enum Errors { + case ignore + case replace + case strict + + @inlinable + var cName: String { + @inline(__always) + get { + switch self { + case .ignore: return "ignore" + case .replace: return "replace" + case .strict: return "strict" + } + } + } +} + // @_frozen // SR-9739 public enum FinalOp { case div @@ -240,7 +259,7 @@ public enum Method { } // @_frozen // SR-9739 -public enum Method2 { +public enum Method3 { case bilinear @inlinable @@ -274,7 +293,7 @@ public enum Mode { } // @_frozen // SR-9739 -public enum Mode4 { +public enum Mode5 { case reflect case symmetric @@ -291,23 +310,19 @@ public enum Mode4 { } // @_frozen // SR-9739 -public enum OutputStream { - case logError - case logInfo - case logWarning - case stderr - case stdout +public enum OutputEncoding { + case utf-16-be + case utf-32-be + case utf-8 @inlinable var cName: String { @inline(__always) get { switch self { - case .logError: return "log(error)" - case .logInfo: return "log(info)" - case .logWarning: return "log(warning)" - case .stderr: return "stderr" - case .stdout: return "stdout" + case .utf-16-be: return "UTF-16-BE" + case .utf-32-be: return "UTF-32-BE" + case .utf-8: return "UTF-8" } } } @@ -330,6 +345,46 @@ public enum Padding { } } +// @_frozen // SR-9739 +public enum Padding2 { + case explicit + case same + case valid + + @inlinable + var cName: String { + @inline(__always) + get { + switch self { + case .explicit: return "EXPLICIT" + case .same: return "SAME" + case .valid: return "VALID" + } + } + } +} + +// @_frozen // SR-9739 +public enum Reduction { + case max + case min + case prod + case sum + + @inlinable + var cName: String { + @inline(__always) + get { + switch self { + case .max: return "max" + case .min: return "min" + case .prod: return "prod" + case .sum: return "sum" + } + } + } +} + // @_frozen // SR-9739 public enum RnnMode { case gru @@ -353,6 +408,23 @@ public enum RnnMode { // @_frozen // SR-9739 public enum RoundMode { + case halfToEven + case halfUp + + @inlinable + var cName: String { + @inline(__always) + get { + switch self { + case .halfToEven: return "HALF_TO_EVEN" + case .halfUp: return "HALF_UP" + } + } + } +} + +// @_frozen // SR-9739 +public enum RoundMode6 { case halfAwayFromZero case halfToEven @@ -425,90 +497,6 @@ public static func abs( return Tensor(handle: ret) } -/// Applies a gradient to a given accumulator. -/// -/// Does not add if local_step is lesser than the accumulator's global_step. -/// -/// - Parameters: -/// - handle: The handle to a accumulator. -/// - local_step: The local_step value at which the gradient was computed. -/// - gradient: A tensor of the gradient to be accumulated. -/// -/// - Attr dtype: The data type of accumulated gradients. Needs to correspond to the type -/// of the accumulator. -@inlinable @inline(__always) -public static func accumulatorApplyGradient( - handle: StringTensor, - localStep: Tensor, - gradient: Tensor -) { - return #tfop("AccumulatorApplyGradient", - handle, - localStep, - gradient, - dtype$dtype: Dtype.tensorFlowDataType) -} - -/// Returns the number of gradients aggregated in the given accumulators. -/// -/// - Parameter handle: The handle to an accumulator. -/// -/// - Output num_accumulated: The number of gradients aggregated in the given accumulator. -@inlinable @inline(__always) -public static func accumulatorNumAccumulated( - handle: StringTensor -) -> Tensor { - let ret: TensorHandle = #tfop("AccumulatorNumAccumulated", - handle) - return Tensor(handle: ret) -} - -/// Updates the accumulator with a new value for global_step. -/// -/// Logs warning if the accumulator's value is already higher than -/// new_global_step. -/// -/// - Parameters: -/// - handle: The handle to an accumulator. -/// - new_global_step: The new global_step value to set. -@inlinable @inline(__always) -public static func accumulatorSetGlobalStep( - handle: StringTensor, - newGlobalStep: Tensor -) { - return #tfop("AccumulatorSetGlobalStep", - handle, - newGlobalStep) -} - -/// Extracts the average gradient in the given ConditionalAccumulator. -/// -/// The op blocks until sufficient (i.e., more than num_required) -/// gradients have been accumulated. If the accumulator has already -/// aggregated more than num_required gradients, it returns the average of -/// the accumulated gradients. Also automatically increments the recorded -/// global_step in the accumulator by 1, and resets the aggregate to 0. -/// -/// - Parameters: -/// - handle: The handle to an accumulator. -/// - num_required: Number of gradients required before we return an aggregate. -/// -/// - Attr dtype: The data type of accumulated gradients. Needs to correspond to the type -/// of the accumulator. -/// -/// - Output average: The average of the accumulated gradients. -@inlinable @inline(__always) -public static func accumulatorTakeGradient( - handle: StringTensor, - numRequired: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("AccumulatorTakeGradient", - handle, - numRequired, - dtype$dtype: Dtype.tensorFlowDataType) - return Tensor(handle: ret) -} - /// Computes acos of x element-wise. @inlinable @inline(__always) public static func acos( @@ -715,13 +703,14 @@ public static func adjustContrast( /// /// - Output output: The contrast-adjusted image or images. @inlinable @inline(__always) -public static func adjustContrastv2( - images: Tensor, +public static func adjustContrastv2( + images: Tensor, contrastFactor: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("AdjustContrastv2", +) -> Tensor { + let ret: TensorHandle = #tfop("AdjustContrastv2", images, - contrastFactor) + contrastFactor, + T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) } @@ -740,13 +729,14 @@ public static func adjustContrastv2( /// /// - Output output: The hue-adjusted image or images. @inlinable @inline(__always) -public static func adjustHue( - images: Tensor, +public static func adjustHue( + images: Tensor, delta: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("AdjustHue", +) -> Tensor { + let ret: TensorHandle = #tfop("AdjustHue", images, - delta) + delta, + T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) } @@ -765,13 +755,14 @@ public static func adjustHue( /// /// - Output output: The hue-adjusted image or images. @inlinable @inline(__always) -public static func adjustSaturation( - images: Tensor, +public static func adjustSaturation( + images: Tensor, scale: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("AdjustSaturation", +) -> Tensor { + let ret: TensorHandle = #tfop("AdjustSaturation", images, - scale) + scale, + T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) } @@ -859,6 +850,57 @@ public static func allCandidateSampler( return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) } +/// An Op to exchange data across TPU replicas. +/// +/// On each replica, the input is split into `split_count` blocks along +/// `split_dimension` and send to the other replicas given group_assignment. After +/// receiving `split_count` - 1 blocks from other replicas, we concatenate the +/// blocks along `concat_dimension` as the output. +/// +/// For example, suppose there are 2 TPU replicas: +/// replica 0 receives input: `[[A, B]]` +/// replica 1 receives input: `[[C, D]]` +/// +/// group_assignment=`[[0, 1]]` +/// concat_dimension=0 +/// split_dimension=1 +/// split_count=2 +/// +/// replica 0's output: `[[A], [C]]` +/// replica 1's output: `[[B], [D]]` +/// +/// - Parameters: +/// - input: The local input to the sum. +/// - group_assignment: An int32 tensor with shape +/// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the +/// replica ids in the ith subgroup. +/// +/// - Attrs: +/// - T: The type of elements to be exchanged. +/// - concat_dimension: The dimension number to concatenate. +/// - split_dimension: The dimension number to split. +/// - split_count: The number of splits, this number must equal to the sub-group +/// size(group_assignment.get_shape()[1]) +/// +/// - Output output: The exchanged result. +@inlinable @inline(__always) +public static func allToAll( + _ input: Tensor, + groupAssignment: Tensor, + concatDimension: Int64, + splitDimension: Int64, + splitCount: Int64 +) -> Tensor { + let ret: TensorHandle = #tfop("AllToAll", + input, + groupAssignment, + T$dtype: T.tensorFlowDataType, + concat_dimension: concatDimension, + split_dimension: splitDimension, + split_count: splitCount) + return Tensor(handle: ret) +} + /// Returns the argument of a complex number. /// /// Given a tensor `input` of complex numbers, this operation returns a tensor of @@ -918,1020 +960,299 @@ public static func any( return Tensor(handle: ret) } -/// Update '*var' according to the AdaMax algorithm. -/// -/// m_t <- beta1 * m_{t-1} + (1 - beta1) * g -/// v_t <- max(beta2 * v_{t-1}, abs(g)) -/// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon) -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - m: Should be from a Variable(). -/// - v: Should be from a Variable(). -/// - beta1_power: Must be a scalar. -/// - lr: Scaling factor. Must be a scalar. -/// - beta1: Momentum factor. Must be a scalar. -/// - beta2: Momentum factor. Must be a scalar. -/// - epsilon: Ridge term. Must be a scalar. -/// - grad: The gradient. -/// -/// - Attr use_locking: If `True`, updating of the var, m, and v tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// -/// - Output out: Same as "var". +/// Returns the truth value of abs(x-y) < tolerance element-wise. @inlinable @inline(__always) -public static func applyAdaMax( - var_: Tensor, - m: Tensor, - v: Tensor, - beta1Power: Tensor, - lr: Tensor, - beta1: Tensor, - beta2: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyAdaMax", - var_, - m, - v, - beta1Power, - lr, - beta1, - beta2, - epsilon, - grad, +public static func approximateEqual( + _ x: Tensor, + _ y: Tensor, + tolerance: Double = 1e-05 +) -> Tensor { + let ret: TensorHandle = #tfop("ApproximateEqual", + x, + y, T$dtype: T.tensorFlowDataType, - use_locking: useLocking) + tolerance: tolerance) return Tensor(handle: ret) } -/// Update '*var' according to the adadelta scheme. -/// -/// accum = rho() * accum + (1 - rho()) * grad.square(); -/// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; -/// update_accum = rho() * update_accum + (1 - rho()) * update.square(); -/// var -= update; +/// Returns the index with the largest value across dimensions of a tensor. /// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - accum_update: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - rho: Decay factor. Must be a scalar. -/// - epsilon: Constant factor. Must be a scalar. -/// - grad: The gradient. +/// Note that in case of ties the identity of the return value is not guaranteed. /// -/// - Attr use_locking: If True, updating of the var, accum and update_accum tensors will be protected by -/// a lock; otherwise the behavior is undefined, but may exhibit less contention. +/// Usage: +/// ```python +/// import tensorflow as tf +/// a = [1, 10, 26.9, 2.8, 166.32, 62.3] +/// b = tf.math.argmax(input = a) +/// c = tf.keras.backend.eval(b) +/// # c = 4 +/// # here a[4] = 166.32 which is the largest element of a across axis 0 +/// ``` /// -/// - Output out: Same as "var". +/// - Parameter dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`. +/// Describes which dimension of the input Tensor to reduce across. For vectors, +/// use dimension = 0. @inlinable @inline(__always) -public static func applyAdadelta( - var_: Tensor, - accum: Tensor, - accumUpdate: Tensor, - lr: Tensor, - rho: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyAdadelta", - var_, - accum, - accumUpdate, - lr, - rho, - epsilon, - grad, +public static func argMax( + _ input: Tensor, + dimension: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("ArgMax", + input, + dimension, T$dtype: T.tensorFlowDataType, - use_locking: useLocking) + Tidx$dtype: Tidx.tensorFlowDataType, + output_type$dtype: OutputType.tensorFlowDataType) return Tensor(handle: ret) } -/// Update '*var' according to the adagrad scheme. -/// -/// accum += grad * grad -/// var -= lr * grad * (1 / sqrt(accum)) +/// Returns the index with the smallest value across dimensions of a tensor. /// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - grad: The gradient. +/// Note that in case of ties the identity of the return value is not guaranteed. /// -/// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. +/// Usage: +/// ```python +/// import tensorflow as tf +/// a = [1, 10, 26.9, 2.8, 166.32, 62.3] +/// b = tf.math.argmin(input = a) +/// c = tf.keras.backend.eval(b) +/// # c = 0 +/// # here a[0] = 1 which is the smallest element of a across axis 0 +/// ``` /// -/// - Output out: Same as "var". +/// - Parameter dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`. +/// Describes which dimension of the input Tensor to reduce across. For vectors, +/// use dimension = 0. @inlinable @inline(__always) -public static func applyAdagrad( - var_: Tensor, - accum: Tensor, - lr: Tensor, - grad: Tensor, - useLocking: Bool = false, - updateSlots: Bool = true -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyAdagrad", - var_, - accum, - lr, - grad, +public static func argMin( + _ input: Tensor, + dimension: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("ArgMin", + input, + dimension, T$dtype: T.tensorFlowDataType, - use_locking: useLocking, - update_slots: updateSlots) + Tidx$dtype: Tidx.tensorFlowDataType, + output_type$dtype: OutputType.tensorFlowDataType) return Tensor(handle: ret) } -/// Update '*var' according to the proximal adagrad scheme. -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - gradient_accumulator: Should be from a Variable(). -/// - gradient_squared_accumulator: Should be from a Variable(). -/// - grad: The gradient. -/// - lr: Scaling factor. Must be a scalar. -/// - l1: L1 regularization. Must be a scalar. -/// - l2: L2 regularization. Must be a scalar. -/// - global_step: Training step number. Must be a scalar. +/// Converts each entry in the given tensor to strings. Supports many numeric /// -/// - Attr use_locking: If True, updating of the var and accum tensors will be protected by -/// a lock; otherwise the behavior is undefined, but may exhibit less contention. +/// types and boolean. /// -/// - Output out: Same as "var". +/// - Attrs: +/// - precision: The post-decimal precision to use for floating point numbers. +/// Only used if precision > -1. +/// - scientific: Use scientific notation for floating point numbers. +/// - shortest: Use shortest representation (either scientific or standard) for +/// floating point numbers. +/// - width: Pad pre-decimal numbers to this width. +/// Applies to both floating point and integer numbers. +/// Only used if width > -1. +/// - fill: The value to pad if width > -1. If empty, pads with spaces. +/// Another typical value is '0'. String cannot be longer than 1 character. @inlinable @inline(__always) -public static func applyAdagradDA( - var_: Tensor, - gradientAccumulator: Tensor, - gradientSquaredAccumulator: Tensor, - grad: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - globalStep: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyAdagradDA", - var_, - gradientAccumulator, - gradientSquaredAccumulator, - grad, - lr, - l1, - l2, - globalStep, +public static func asString( + _ input: Tensor, + precision: Int64 = -1, + scientific: Bool = false, + shortest: Bool = false, + width: Int64 = -1, + fill: String +) -> StringTensor { + let ret: TensorHandle = #tfop("AsString", + input, T$dtype: T.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) + precision: precision, + scientific: scientific, + shortest: shortest, + width: width, + fill: fill) + return StringTensor(handle: ret) } -/// Update '*var' according to the Adam algorithm. +/// Computes the trignometric inverse sine of x element-wise. /// -/// $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ -/// $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ -/// $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ -/// $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ +/// The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that +/// if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. /// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - m: Should be from a Variable(). -/// - v: Should be from a Variable(). -/// - beta1_power: Must be a scalar. -/// - beta2_power: Must be a scalar. -/// - lr: Scaling factor. Must be a scalar. -/// - beta1: Momentum factor. Must be a scalar. -/// - beta2: Momentum factor. Must be a scalar. -/// - epsilon: Ridge term. Must be a scalar. -/// - grad: The gradient. +/// **Note**: The output of `tf.math.asin` will lie within the invertible range +/// of sine, i.e [-pi/2, pi/2]. /// -/// - Attrs: -/// - use_locking: If `True`, updating of the var, m, and v tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// - use_nesterov: If `True`, uses the nesterov update. +/// For example: +/// +/// ```python +/// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] +/// x = tf.constant([1.047, 0.785]) +/// y = tf.math.sin(x) # [0.8659266, 0.7068252] +/// +/// tf.math.asin(y) # [1.047, 0.785] = x +/// ``` /// -/// - Output out: Same as "var". @inlinable @inline(__always) -public static func applyAdam( - var_: Tensor, - m: Tensor, - v: Tensor, - beta1Power: Tensor, - beta2Power: Tensor, - lr: Tensor, - beta1: Tensor, - beta2: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false, - useNesterov: Bool = false +public static func asin( + _ x: Tensor ) -> Tensor { - let ret: TensorHandle = #tfop("ApplyAdam", - var_, - m, - v, - beta1Power, - beta2Power, - lr, - beta1, - beta2, - epsilon, - grad, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking, - use_nesterov: useNesterov) + let ret: TensorHandle = #tfop("Asin", + x, + T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) } -/// Update '*var' according to the AddSign update. -/// -/// m_t <- beta1 * m_{t-1} + (1 - beta1) * g -/// update <- (alpha + sign_decay * sign(g) *sign(m)) * g -/// variable <- variable - lr_t * update -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - m: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - alpha: Must be a scalar. -/// - sign_decay: Must be a scalar. -/// - beta: Must be a scalar. -/// - grad: The gradient. -/// -/// - Attr use_locking: If `True`, updating of the var and m tensors is -/// protected by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// -/// - Output out: Same as "var". +/// Computes inverse hyperbolic sine of x element-wise. @inlinable @inline(__always) -public static func applyAddSign( - var_: Tensor, - m: Tensor, - lr: Tensor, - alpha: Tensor, - signDecay: Tensor, - beta: Tensor, - grad: Tensor, - useLocking: Bool = false +public static func asinh( + _ x: Tensor ) -> Tensor { - let ret: TensorHandle = #tfop("ApplyAddSign", - var_, - m, - lr, - alpha, - signDecay, - beta, - grad, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) + let ret: TensorHandle = #tfop("Asinh", + x, + T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) } -/// Update '*var' according to the centered RMSProp algorithm. -/// -/// The centered RMSProp algorithm uses an estimate of the centered second moment -/// (i.e., the variance) for normalization, as opposed to regular RMSProp, which -/// uses the (uncentered) second moment. This often helps with training, but is -/// slightly more expensive in terms of computation and memory. -/// -/// Note that in dense implementation of this algorithm, mg, ms, and mom will -/// update even if the grad is zero, but in this sparse implementation, mg, ms, -/// and mom will not update in iterations during which the grad is zero. -/// -/// mean_square = decay * mean_square + (1-decay) * gradient ** 2 -/// mean_grad = decay * mean_grad + (1-decay) * gradient -/// -/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) +/// Asserts that the given condition is true. /// -/// mg <- rho * mg_{t-1} + (1-rho) * grad -/// ms <- rho * ms_{t-1} + (1-rho) * grad * grad -/// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) -/// var <- var - mom +/// If `condition` evaluates to false, print the list of tensors in `data`. +/// `summarize` determines how many entries of the tensors to print. /// /// - Parameters: -/// - var: Should be from a Variable(). -/// - mg: Should be from a Variable(). -/// - ms: Should be from a Variable(). -/// - mom: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - rho: Decay rate. Must be a scalar. -/// - epsilon: Ridge term. Must be a scalar. -/// - grad: The gradient. -/// -/// - Attr use_locking: If `True`, updating of the var, mg, ms, and mom tensors is -/// protected by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. +/// - condition: The condition to evaluate. +/// - data: The tensors to print out when condition is false. /// -/// - Output out: Same as "var". +/// - Attr summarize: Print this many entries of each tensor. @inlinable @inline(__always) -public static func applyCenteredRMSProp( - var_: Tensor, - mg: Tensor, - ms: Tensor, - mom: Tensor, - lr: Tensor, - rho: Tensor, - momentum: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyCenteredRMSProp", - var_, - mg, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) +public static func assert( + condition: Tensor, + data: [Tensor], + summarize: Int64 = 3 +) { + return #tfop("Assert", + condition, + data, + summarize: summarize) } -/// Update '*var' according to the Ftrl-proximal scheme. +/// Computes the trignometric inverse tangent of x element-wise. /// -/// accum_new = accum + grad * grad -/// linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var -/// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 -/// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 -/// accum = accum_new +/// The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that +/// if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. /// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - linear: Should be from a Variable(). -/// - grad: The gradient. -/// - lr: Scaling factor. Must be a scalar. -/// - l1: L1 regulariation. Must be a scalar. -/// - l2: L2 regulariation. Must be a scalar. -/// - lr_power: Scaling factor. Must be a scalar. +/// **Note**: The output of `tf.math.atan` will lie within the invertible range +/// of tan, i.e (-pi/2, pi/2). +/// +/// For example: +/// +/// ```python +/// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] +/// x = tf.constant([1.047, 0.785]) +/// y = tf.math.tan(x) # [1.731261, 0.99920404] /// -/// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. +/// tf.math.atan(y) # [1.047, 0.785] = x +/// ``` /// -/// - Output out: Same as "var". @inlinable @inline(__always) -public static func applyFtrl( - var_: Tensor, - accum: Tensor, - linear: Tensor, - grad: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - lrPower: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyFtrl", - var_, - accum, - linear, - grad, - lr, - l1, - l2, - lrPower, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) +public static func atan( + _ x: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("Atan", + x, + T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) } -/// Update '*var' according to the Ftrl-proximal scheme. -/// -/// grad_with_shrinkage = grad + 2 * l2_shrinkage * var -/// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage -/// linear += grad_with_shrinkage + -/// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var -/// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 -/// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 -/// accum = accum_new -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - linear: Should be from a Variable(). -/// - grad: The gradient. -/// - lr: Scaling factor. Must be a scalar. -/// - l1: L1 regulariation. Must be a scalar. -/// - l2: L2 shrinkage regulariation. Must be a scalar. -/// - lr_power: Scaling factor. Must be a scalar. -/// -/// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. +/// Computes arctangent of `y/x` element-wise, respecting signs of the arguments. /// -/// - Output out: Same as "var". +/// This is the angle \( \theta \in [-\pi, \pi] \) such that +/// \[ x = r \cos(\theta) \] +/// and +/// \[ y = r \sin(\theta) \] +/// where \(r = \sqrt(x^2 + y^2) \). @inlinable @inline(__always) -public static func applyFtrlV2( - var_: Tensor, - accum: Tensor, - linear: Tensor, - grad: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - l2Shrinkage: Tensor, - lrPower: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyFtrlV2", - var_, - accum, - linear, - grad, - lr, - l1, - l2, - l2Shrinkage, - lrPower, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) +public static func atan2( + _ y: Tensor, + _ x: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("Atan2", + y, + x, + T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) } -/// Update '*var' by subtracting 'alpha' * 'delta' from it. -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - alpha: Scaling factor. Must be a scalar. -/// - delta: The change. -/// -/// - Attr use_locking: If `True`, the subtraction will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output out: Same as "var". +/// Computes inverse hyperbolic tangent of x element-wise. @inlinable @inline(__always) -public static func applyGradientDescent( - var_: Tensor, - alpha: Tensor, - delta: Tensor, - useLocking: Bool = false +public static func atanh( + _ x: Tensor ) -> Tensor { - let ret: TensorHandle = #tfop("ApplyGradientDescent", - var_, - alpha, - delta, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) + let ret: TensorHandle = #tfop("Atanh", + x, + T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) } -/// Update '*var' according to the momentum scheme. Set use_nesterov = True if you -/// -/// want to use Nesterov momentum. -/// -/// accum = accum * momentum + grad -/// var -= lr * accum -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - grad: The gradient. -/// - momentum: Momentum. Must be a scalar. -/// -/// - Attrs: -/// - use_locking: If `True`, updating of the var and accum tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// - use_nesterov: If `True`, the tensor passed to compute grad will be -/// var - lr * momentum * accum, so in the end, the var you get is actually -/// var - lr * momentum * accum. -/// -/// - Output out: Same as "var". -@inlinable @inline(__always) -public static func applyMomentum( - var_: Tensor, - accum: Tensor, - lr: Tensor, - grad: Tensor, - momentum: Tensor, - useLocking: Bool = false, - useNesterov: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyMomentum", - var_, - accum, - lr, - grad, - momentum, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking, - use_nesterov: useNesterov) - return Tensor(handle: ret) +@inlinable @inline(__always) +public static func attr( + _ a: Int64 +) { + return #tfop("Attr", + a: a) } -/// Update '*var' according to the AddSign update. -/// -/// m_t <- beta1 * m_{t-1} + (1 - beta1) * g -/// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g -/// variable <- variable - lr_t * update -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - m: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - logbase: Must be a scalar. -/// - sign_decay: Must be a scalar. -/// - beta: Must be a scalar. -/// - grad: The gradient. -/// -/// - Attr use_locking: If `True`, updating of the var and m tensors is -/// protected by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// -/// - Output out: Same as "var". @inlinable @inline(__always) -public static func applyPowerSign( - var_: Tensor, - m: Tensor, - lr: Tensor, - logbase: Tensor, - signDecay: Tensor, - beta: Tensor, - grad: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyPowerSign", - var_, - m, - lr, - logbase, - signDecay, - beta, - grad, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) +public static func attrBool( + _ a: Bool +) { + return #tfop("AttrBool", + a: a) } -/// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. -/// -/// accum += grad * grad -/// prox_v = var - lr * grad * (1 / sqrt(accum)) -/// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0} -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - l1: L1 regularization. Must be a scalar. -/// - l2: L2 regularization. Must be a scalar. -/// - grad: The gradient. -/// -/// - Attr use_locking: If True, updating of the var and accum tensors will be protected by -/// a lock; otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output out: Same as "var". @inlinable @inline(__always) -public static func applyProximalAdagrad( - var_: Tensor, - accum: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - grad: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyProximalAdagrad", - var_, - accum, - lr, - l1, - l2, - grad, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) +public static func attrBoolList( + _ a: [Bool] +) { + return #tfop("AttrBoolList", + a: a) } -/// Update '*var' as FOBOS algorithm with fixed learning rate. -/// -/// prox_v = var - alpha * delta -/// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0} -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - alpha: Scaling factor. Must be a scalar. -/// - l1: L1 regularization. Must be a scalar. -/// - l2: L2 regularization. Must be a scalar. -/// - delta: The change. -/// -/// - Attr use_locking: If True, the subtraction will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output out: Same as "var". @inlinable @inline(__always) -public static func applyProximalGradientDescent( - var_: Tensor, - alpha: Tensor, - l1: Tensor, - l2: Tensor, - delta: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyProximalGradientDescent", - var_, - alpha, - l1, - l2, - delta, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) +public static func attrDefault( + _ a: String = "b'banana'" +) { + return #tfop("AttrDefault", + a: a) } -/// Update '*var' according to the RMSProp algorithm. -/// -/// Note that in dense implementation of this algorithm, ms and mom will -/// update even if the grad is zero, but in this sparse implementation, ms -/// and mom will not update in iterations during which the grad is zero. -/// -/// mean_square = decay * mean_square + (1-decay) * gradient ** 2 -/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) -/// -/// ms <- rho * ms_{t-1} + (1-rho) * grad * grad -/// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) -/// var <- var - mom -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - ms: Should be from a Variable(). -/// - mom: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - rho: Decay rate. Must be a scalar. -/// - epsilon: Ridge term. Must be a scalar. -/// - grad: The gradient. -/// -/// - Attr use_locking: If `True`, updating of the var, ms, and mom tensors is protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// -/// - Output out: Same as "var". @inlinable @inline(__always) -public static func applyRMSProp( - var_: Tensor, - ms: Tensor, - mom: Tensor, - lr: Tensor, - rho: Tensor, - momentum: Tensor, - epsilon: Tensor, - grad: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ApplyRMSProp", - var_, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) +public static func attrEmptyListDefault( + _ a: [Double] +) { + return #tfop("AttrEmptyListDefault", + a: a) } -/// Returns the truth value of abs(x-y) < tolerance element-wise. @inlinable @inline(__always) -public static func approximateEqual( - _ x: Tensor, - _ y: Tensor, - tolerance: Double = 1e-05 -) -> Tensor { - let ret: TensorHandle = #tfop("ApproximateEqual", - x, - y, - T$dtype: T.tensorFlowDataType, - tolerance: tolerance) - return Tensor(handle: ret) +public static func attrEnum( + _ a: A +) { + return #tfop("AttrEnum", + a: a.cName) } -/// Returns the index with the largest value across dimensions of a tensor. -/// -/// Note that in case of ties the identity of the return value is not guaranteed. -/// -/// - Parameter dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`. -/// Describes which dimension of the input Tensor to reduce across. For vectors, -/// use dimension = 0. @inlinable @inline(__always) -public static func argMax( - _ input: Tensor, - dimension: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("ArgMax", - input, - dimension, - T$dtype: T.tensorFlowDataType, - Tidx$dtype: Tidx.tensorFlowDataType, - output_type$dtype: OutputType.tensorFlowDataType) - return Tensor(handle: ret) +public static func attrEnumList( + _ a: [String] +) { + return #tfop("AttrEnumList", + a: a) } -/// Returns the index with the smallest value across dimensions of a tensor. -/// -/// Note that in case of ties the identity of the return value is not guaranteed. -/// -/// - Parameter dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`. -/// Describes which dimension of the input Tensor to reduce across. For vectors, -/// use dimension = 0. @inlinable @inline(__always) -public static func argMin( - _ input: Tensor, - dimension: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("ArgMin", - input, - dimension, - T$dtype: T.tensorFlowDataType, - Tidx$dtype: Tidx.tensorFlowDataType, - output_type$dtype: OutputType.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Converts each entry in the given tensor to strings. Supports many numeric -/// -/// types and boolean. -/// -/// - Attrs: -/// - precision: The post-decimal precision to use for floating point numbers. -/// Only used if precision > -1. -/// - scientific: Use scientific notation for floating point numbers. -/// - shortest: Use shortest representation (either scientific or standard) for -/// floating point numbers. -/// - width: Pad pre-decimal numbers to this width. -/// Applies to both floating point and integer numbers. -/// Only used if width > -1. -/// - fill: The value to pad if width > -1. If empty, pads with spaces. -/// Another typical value is '0'. String cannot be longer than 1 character. -@inlinable @inline(__always) -public static func asString( - _ input: Tensor, - precision: Int64 = -1, - scientific: Bool = false, - shortest: Bool = false, - width: Int64 = -1, - fill: String -) -> StringTensor { - let ret: TensorHandle = #tfop("AsString", - input, - T$dtype: T.tensorFlowDataType, - precision: precision, - scientific: scientific, - shortest: shortest, - width: width, - fill: fill) - return StringTensor(handle: ret) -} - -/// Computes asin of x element-wise. -@inlinable @inline(__always) -public static func asin( - _ x: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("Asin", - x, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Computes inverse hyperbolic sine of x element-wise. -@inlinable @inline(__always) -public static func asinh( - _ x: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("Asinh", - x, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Asserts that the given condition is true. -/// -/// If `condition` evaluates to false, print the list of tensors in `data`. -/// `summarize` determines how many entries of the tensors to print. -/// -/// - Parameters: -/// - condition: The condition to evaluate. -/// - data: The tensors to print out when condition is false. -/// -/// - Attr summarize: Print this many entries of each tensor. -@inlinable @inline(__always) -public static func assert( - condition: Tensor, - data: [Tensor], - summarize: Int64 = 3 -) { - return #tfop("Assert", - condition, - data, - summarize: summarize) -} - -/// Update 'ref' by assigning 'value' to it. -/// -/// This operation outputs "ref" after the assignment is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// - Parameters: -/// - ref: Should be from a `Variable` node. May be uninitialized. -/// - value: The value to be assigned to the variable. -/// -/// - Attrs: -/// - validate_shape: If true, the operation will validate that the shape -/// of 'value' matches the shape of the Tensor being assigned to. If false, -/// 'ref' will take on the shape of 'value'. -/// - use_locking: If True, the assignment will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output output_ref: = Same as "ref". Returned as a convenience for operations that want -/// to use the new value after the variable has been reset. -@inlinable @inline(__always) -public static func assign( - ref: Tensor, - value: Tensor, - validateShape: Bool = true, - useLocking: Bool = true -) -> Tensor { - let ret: TensorHandle = #tfop("Assign", - ref, - value, - T$dtype: T.tensorFlowDataType, - validate_shape: validateShape, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Update 'ref' by adding 'value' to it. -/// -/// This operation outputs "ref" after the update is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// - Parameters: -/// - ref: Should be from a `Variable` node. -/// - value: The value to be added to the variable. -/// -/// - Attr use_locking: If True, the addition will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output output_ref: = Same as "ref". Returned as a convenience for operations that want -/// to use the new value after the variable has been updated. -@inlinable @inline(__always) -public static func assignAdd( - ref: Tensor, - value: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("AssignAdd", - ref, - value, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Update 'ref' by subtracting 'value' from it. -/// -/// This operation outputs "ref" after the update is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// - Parameters: -/// - ref: Should be from a `Variable` node. -/// - value: The value to be subtracted to the variable. -/// -/// - Attr use_locking: If True, the subtraction will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output output_ref: = Same as "ref". Returned as a convenience for operations that want -/// to use the new value after the variable has been updated. -@inlinable @inline(__always) -public static func assignSub( - ref: Tensor, - value: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("AssignSub", - ref, - value, - T$dtype: T.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Computes atan of x element-wise. -@inlinable @inline(__always) -public static func atan( - _ x: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("Atan", - x, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Computes arctangent of `y/x` element-wise, respecting signs of the arguments. -/// -/// This is the angle \( \theta \in [-\pi, \pi] \) such that -/// \[ x = r \cos(\theta) \] -/// and -/// \[ y = r \sin(\theta) \] -/// where \(r = \sqrt(x^2 + y^2) \). -@inlinable @inline(__always) -public static func atan2( - _ y: Tensor, - _ x: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("Atan2", - y, - x, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Computes inverse hyperbolic tangent of x element-wise. -@inlinable @inline(__always) -public static func atanh( - _ x: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("Atanh", - x, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -@inlinable @inline(__always) -public static func attr( - _ a: Int64 -) { - return #tfop("Attr", - a: a) -} - -@inlinable @inline(__always) -public static func attrBool( - _ a: Bool -) { - return #tfop("AttrBool", - a: a) -} - -@inlinable @inline(__always) -public static func attrBoolList( - _ a: [Bool] -) { - return #tfop("AttrBoolList", - a: a) -} - -@inlinable @inline(__always) -public static func attrDefault( - _ a: String = "banana" -) { - return #tfop("AttrDefault", - a: a) -} - -@inlinable @inline(__always) -public static func attrEmptyListDefault( - _ a: [Double] -) { - return #tfop("AttrEmptyListDefault", - a: a) -} - -@inlinable @inline(__always) -public static func attrEnum( - _ a: A -) { - return #tfop("AttrEnum", - a: a.cName) -} - -@inlinable @inline(__always) -public static func attrEnumList( - _ a: [String] -) { - return #tfop("AttrEnumList", - a: a) -} - -@inlinable @inline(__always) -public static func attrFloat( - _ a: Double -) { - return #tfop("AttrFloat", - a: a) +public static func attrFloat( + _ a: Double +) { + return #tfop("AttrFloat", + a: a) } @inlinable @inline(__always) @@ -2130,7 +1451,7 @@ public static func avgPool( ksize: [Int32], strides: [Int32], padding: Padding, - dataFormat: DataFormat = .nhwc + dataFormat: DataFormat = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("AvgPool", value, @@ -2165,7 +1486,7 @@ public static func avgPool3D( ksize: [Int32], strides: [Int32], padding: Padding, - dataFormat: DataFormat1 = .ndhwc + dataFormat: DataFormat1 = .b'ndhwc' ) -> Tensor { let ret: TensorHandle = #tfop("AvgPool3D", input, @@ -2203,7 +1524,7 @@ public static func avgPool3DGrad( ksize: [Int32], strides: [Int32], padding: Padding, - dataFormat: DataFormat1 = .ndhwc + dataFormat: DataFormat1 = .b'ndhwc' ) -> Tensor { let ret: TensorHandle = #tfop("AvgPool3DGrad", origInputShape, @@ -2241,7 +1562,7 @@ public static func avgPoolGrad( ksize: [Int32], strides: [Int32], padding: Padding, - dataFormat: DataFormat = .nhwc + dataFormat: DataFormat = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("AvgPoolGrad", origInputShape, @@ -2261,89 +1582,6 @@ public static func b( return Tensor(handle: ret) } -/// Closes the given barrier. -/// -/// This operation signals that no more new elements will be inserted in the -/// given barrier. Subsequent InsertMany that try to introduce a new key will fail. -/// Subsequent InsertMany operations that just add missing components to already -/// existing elements will continue to succeed. Subsequent TakeMany operations will -/// continue to succeed if sufficient completed elements remain in the barrier. -/// Subsequent TakeMany operations that would block will fail immediately. -/// -/// - Parameter handle: The handle to a barrier. -/// -/// - Attr cancel_pending_enqueues: If true, all pending enqueue requests that are -/// blocked on the barrier's queue will be canceled. InsertMany will fail, even -/// if no new key is introduced. -@inlinable @inline(__always) -public static func barrierClose( - handle: StringTensor, - cancelPendingEnqueues: Bool = false -) { - return #tfop("BarrierClose", - handle, - cancel_pending_enqueues: cancelPendingEnqueues) -} - -/// Computes the number of incomplete elements in the given barrier. -/// -/// - Parameter handle: The handle to a barrier. -/// -/// - Output size: The number of incomplete elements (i.e. those with some of their value -/// components not set) in the barrier. -@inlinable @inline(__always) -public static func barrierIncompleteSize( - handle: StringTensor -) -> Tensor { - let ret: TensorHandle = #tfop("BarrierIncompleteSize", - handle) - return Tensor(handle: ret) -} - -/// For each key, assigns the respective value to the specified component. -/// -/// If a key is not found in the barrier, this operation will create a new -/// incomplete element. If a key is found in the barrier, and the element -/// already has a value at component_index, this operation will fail with -/// INVALID_ARGUMENT, and leave the barrier in an undefined state. -/// -/// - Parameters: -/// - handle: The handle to a barrier. -/// - keys: A one-dimensional tensor of keys, with length n. -/// - values: An any-dimensional tensor of values, which are associated with the -/// respective keys. The 0th dimension must have length n. -/// -/// - Attr component_index: The component of the barrier elements that is being assigned. -@inlinable @inline(__always) -public static func barrierInsertMany( - handle: StringTensor, - keys: StringTensor, - _ values: Tensor, - componentIndex: Int64 -) { - return #tfop("BarrierInsertMany", - handle, - keys, - values, - T$dtype: T.tensorFlowDataType, - component_index: componentIndex) -} - -/// Computes the number of complete elements in the given barrier. -/// -/// - Parameter handle: The handle to a barrier. -/// -/// - Output size: The number of complete elements (i.e. those with all of their value -/// components set) in the barrier. -@inlinable @inline(__always) -public static func barrierReadySize( - handle: StringTensor -) -> Tensor { - let ret: TensorHandle = #tfop("BarrierReadySize", - handle) - return Tensor(handle: ret) -} - @inlinable @inline(__always) public static func batchCholesky( _ input: Tensor @@ -2412,25 +1650,76 @@ public static func batchMatMul( return Tensor(handle: ret) } -@inlinable @inline(__always) -public static func batchMatrixBandPart( - _ input: Tensor, - numLower: Tensor, - numUpper: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("BatchMatrixBandPart", - input, - numLower, - numUpper, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -@inlinable @inline(__always) -public static func batchMatrixDeterminant( - _ input: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("BatchMatrixDeterminant", +/// Multiplies slices of two tensors in batches. +/// +/// Multiplies all slices of `Tensor` `x` and `y` (each slice can be +/// viewed as an element of a batch), and arranges the individual results +/// in a single output tensor of the same batch size. Each of the +/// individual slices can optionally be adjointed (to adjoint a matrix +/// means to transpose and conjugate it) before multiplication by setting +/// the `adj_x` or `adj_y` flag to `True`, which are by default `False`. +/// +/// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` +/// and `[..., r_y, c_y]`. +/// +/// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: +/// +/// r_o = c_x if adj_x else r_x +/// c_o = r_y if adj_y else c_y +/// +/// It is computed as: +/// +/// output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) +/// +/// *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More +/// about broadcasting +/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). +/// +/// +/// - Parameters: +/// - x: 2-D or higher with shape `[..., r_x, c_x]`. +/// - y: 2-D or higher with shape `[..., r_y, c_y]`. +/// +/// - Attrs: +/// - adj_x: If `True`, adjoint the slices of `x`. Defaults to `False`. +/// - adj_y: If `True`, adjoint the slices of `y`. Defaults to `False`. +/// +/// - Output output: 3-D or higher with shape `[..., r_o, c_o]` +@inlinable @inline(__always) +public static func batchMatMulV2( + _ x: Tensor, + _ y: Tensor, + adjX: Bool = false, + adjY: Bool = false +) -> Tensor { + let ret: TensorHandle = #tfop("BatchMatMulV2", + x, + y, + T$dtype: T.tensorFlowDataType, + adj_x: adjX, + adj_y: adjY) + return Tensor(handle: ret) +} + +@inlinable @inline(__always) +public static func batchMatrixBandPart( + _ input: Tensor, + numLower: Tensor, + numUpper: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("BatchMatrixBandPart", + input, + numLower, + numUpper, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) +} + +@inlinable @inline(__always) +public static func batchMatrixDeterminant( + _ input: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("BatchMatrixDeterminant", input, T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) @@ -2980,7 +2269,7 @@ public static func betainc( public static func biasAdd( value: Tensor, bias: Tensor, - dataFormat: DataFormat = .nhwc + dataFormat: DataFormat = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("BiasAdd", value, @@ -3010,7 +2299,7 @@ public static func biasAdd( @inlinable @inline(__always) public static func biasAddGrad( outBackprop: Tensor, - dataFormat: DataFormat = .nhwc + dataFormat: DataFormat = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("BiasAddGrad", outBackprop, @@ -3043,46 +2332,6 @@ public static func biasAddV1( return Tensor(handle: ret) } -/// A Reader that outputs rows from a BigQuery table as tensorflow Examples. -/// -/// - Attrs: -/// - container: If non-empty, this reader is placed in the given container. -/// Otherwise, a default container is used. -/// - shared_name: If non-empty, this reader is named in the given bucket -/// with this shared_name. Otherwise, the node name is used instead. -/// - project_id: GCP project ID. -/// - dataset_id: BigQuery Dataset ID. -/// - table_id: Table to read. -/// - columns: List of columns to read. Leave empty to read all columns. -/// - timestamp_millis: Table snapshot timestamp in millis since epoch. Relative -/// (negative or zero) snapshot times are not allowed. For more details, see -/// 'Table Decorators' in BigQuery docs. -/// - test_end_point: Do not use. For testing purposes only. -/// -/// - Output reader_handle: The handle to reference the Reader. -@inlinable @inline(__always) -public static func bigQueryReader( - container: String, - sharedName: String, - projectId: String, - datasetId: String, - tableId: String, - columns: [String], - timestampMillis: Int64, - testEndPoint: String -) -> StringTensor { - let ret: TensorHandle = #tfop("BigQueryReader", - container: container, - shared_name: sharedName, - project_id: projectId, - dataset_id: datasetId, - table_id: tableId, - columns: columns, - timestamp_millis: timestampMillis, - test_end_point: testEndPoint) - return StringTensor(handle: ret) -} - @inlinable @inline(__always) public static func binary( _ a: Tensor, @@ -3713,6 +2962,34 @@ public static func clipByValue( return Tensor(handle: ret) } +/// An Op to permute tensors across replicated TPU instances. +/// +/// Each instance supplies its own input. +/// +/// For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing +/// source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs: +/// `[D, A, B, C]`. +/// +/// - Parameters: +/// - input: The local input to be permuted. Currently only supports float and +/// bfloat16. +/// - source_target_pairs: A tensor with shape [num_pairs, 2]. +/// +/// - Attr T: The type of elements to be exchanged. +/// +/// - Output output: The permuted input. +@inlinable @inline(__always) +public static func collectivePermute( + _ input: Tensor, + sourceTargetPairs: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("CollectivePermute", + input, + sourceTargetPairs, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) +} + /// Mutually reduces multiple tensors of identical type and shape. @inlinable @inline(__always) public static func collectiveReduce( @@ -3722,7 +2999,8 @@ public static func collectiveReduce( instanceKey: Int64, mergeOp: MergeOp, finalOp: FinalOp, - subdivOffsets: [Int32] + subdivOffsets: [Int32], + waitFor: [Int32] ) -> Tensor { let ret: TensorHandle = #tfop("CollectiveReduce", input, @@ -3732,10 +3010,79 @@ public static func collectiveReduce( instance_key: instanceKey, merge_op: mergeOp.cName, final_op: finalOp.cName, - subdiv_offsets: subdivOffsets) + subdiv_offsets: subdivOffsets, + wait_for: waitFor) return Tensor(handle: ret) } +/// Greedily selects a subset of bounding boxes in descending order of score, +/// +/// This operation performs non_max_suppression on the inputs per batch, across +/// all classes. +/// Prunes away boxes that have high intersection-over-union (IOU) overlap +/// with previously selected boxes. Bounding boxes are supplied as +/// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any +/// diagonal pair of box corners and the coordinates can be provided as normalized +/// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm +/// is agnostic to where the origin is in the coordinate system. Also note that +/// this algorithm is invariant to orthogonal transformations and translations +/// of the coordinate system; thus translating or reflections of the coordinate +/// system result in the same boxes being selected by the algorithm. +/// The output of this operation is the final boxes, scores and classes tensor +/// returned after performing non_max_suppression. +/// +/// - Parameters: +/// - boxes: A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then +/// same boxes are used for all classes otherwise, if `q` is equal to number of +/// classes, class-specific boxes are used. +/// - scores: A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]` +/// representing a single score corresponding to each box (each row of boxes). +/// - max_output_size_per_class: A scalar integer tensor representing the maximum number of +/// boxes to be selected by non max suppression per class +/// - max_total_size: A scalar representing maximum number of boxes retained over all classes. +/// - iou_threshold: A 0-D float tensor representing the threshold for deciding whether +/// boxes overlap too much with respect to IOU. +/// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove +/// boxes based on score. +/// +/// - Attr pad_per_class: If false, the output nmsed boxes, scores and classes +/// are padded/clipped to `max_total_size`. If true, the +/// output nmsed boxes, scores and classes are padded to be of length +/// `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in +/// which case it is clipped to `max_total_size`. Defaults to false. +/// +/// - Outputs: +/// - nmsed_boxes: A [batch_size, max_detections, 4] float32 tensor +/// containing the non-max suppressed boxes. +/// - nmsed_scores: A [batch_size, max_detections] float32 tensor +/// containing the scores for the boxes. +/// - nmsed_classes: A [batch_size, max_detections] float32 tensor +/// containing the classes for the boxes. +/// - valid_detections: A [batch_size] int32 tensor indicating the number of +/// valid detections per batch item. Only the top num_detections[i] entries in +/// nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the +/// entries are zero paddings. +@inlinable @inline(__always) +public static func combinedNonMaxSuppression( + boxes: Tensor, + scores: Tensor, + maxOutputSizePerClass: Tensor, + maxTotalSize: Tensor, + iouThreshold: Tensor, + scoreThreshold: Tensor, + padPerClass: Bool = false +) -> (nmsedBoxes: Tensor, nmsedScores: Tensor, nmsedClasses: Tensor, validDetections: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("CombinedNonMaxSuppression", + boxes, + scores, + maxOutputSizePerClass, + maxTotalSize, + iouThreshold, + scoreThreshold, + pad_per_class: padPerClass) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3)) +} + /// Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. /// /// Each comparison returns a boolean `true` (if `input_value > threshold`) @@ -3915,6 +3262,29 @@ public static func concatV2 StringTensor { + let ret: TensorHandle = #tfop("ConfigureDistributedTPU", + embedding_config: embeddingConfig, + tpu_embedding_config: tpuEmbeddingConfig, + is_global_init: isGlobalInit) + return StringTensor(handle: ret) +} + /// Returns the complex conjugate of a complex number. /// /// Given a tensor `input` of complex numbers, this operation returns a tensor of @@ -3973,6 +3343,29 @@ public static func controlTrigger( return #tfop("ControlTrigger") } +/// Computes a 2-D convolution given 4-D `input` and `filter` tensors. +/// +/// Given an input tensor of shape `[batch, in_height, in_width, in_channels]` +/// and a filter / kernel tensor of shape +/// `[filter_height, filter_width, in_channels, out_channels]`, this op +/// performs the following: +/// +/// 1. Flattens the filter to a 2-D matrix with shape +/// `[filter_height * filter_width * in_channels, output_channels]`. +/// 2. Extracts image patches from the input tensor to form a *virtual* +/// tensor of shape `[batch, out_height, out_width, +/// filter_height * filter_width * in_channels]`. +/// 3. For each patch, right-multiplies the filter matrix and the image patch +/// vector. +/// +/// In detail, with the default NHWC format, +/// +/// output[b, i, j, k] = +/// sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * +/// filter[di, dj, q, k] +/// +/// Must have `strides[0] = strides[3] = 1`. For the most common case of the same +/// horizontal and vertices strides, `strides = [1, stride, stride, 1]`. /// /// - Parameters: /// - input: A 4-D tensor. The dimension order is interpreted according to the value @@ -3985,6 +3378,20 @@ public static func controlTrigger( /// dimension of `input`. The dimension order is determined by the value of /// `data_format`, see below for details. /// - padding: The type of padding algorithm to use. +/// - explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith +/// dimension, the amount of padding inserted before and after the dimension is +/// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If +/// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. +/// - data_format: Specify the data format of the input and output data. With the +/// default format "NHWC", the data is stored in the order of: +/// [batch, height, width, channels]. +/// Alternatively, the format could be "NCHW", the data storage order of: +/// [batch, channels, height, width]. +/// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of +/// `input`. If set to k > 1, there will be k-1 skipped cells between each +/// filter element on that dimension. The dimension order is determined by the +/// value of `data_format`, see above for details. Dilations in the batch and +/// depth dimensions must be 1. /// /// - Output output: A 4-D tensor. The dimension order is determined by the value of /// `data_format`, see below for details. @@ -3994,8 +3401,9 @@ public static func conv2D( filter: Tensor, strides: [Int32], useCudnnOnGpu: Bool = true, - padding: Padding, - dataFormat: DataFormat = .nhwc, + padding: Padding2, + explicitPaddings: [Int32], + dataFormat: DataFormat = .b'nhwc', dilations: [Int32] = [1, 1, 1, 1] ) -> Tensor { let ret: TensorHandle = #tfop("Conv2D", @@ -4005,11 +3413,13 @@ public static func conv2D( strides: strides, use_cudnn_on_gpu: useCudnnOnGpu, padding: padding.cName, + explicit_paddings: explicitPaddings, data_format: dataFormat.cName, dilations: dilations) return Tensor(handle: ret) } +/// Computes the gradients of convolution with respect to the filter. /// /// - Parameters: /// - input: 4-D with shape `[batch, in_height, in_width, in_channels]`. @@ -4024,6 +3434,20 @@ public static func conv2D( /// of the convolution. Must be in the same order as the dimension specified with /// format. /// - padding: The type of padding algorithm to use. +/// - explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith +/// dimension, the amount of padding inserted before and after the dimension is +/// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If +/// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. +/// - data_format: Specify the data format of the input and output data. With the +/// default format "NHWC", the data is stored in the order of: +/// [batch, in_height, in_width, in_channels]. +/// Alternatively, the format could be "NCHW", the data storage order of: +/// [batch, in_channels, in_height, in_width]. +/// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of +/// `input`. If set to k > 1, there will be k-1 skipped cells between each filter +/// element on that dimension. The dimension order is determined by the value of +/// `data_format`, see above for details. Dilations in the batch and depth +/// dimensions must be 1. /// /// - Output output: 4-D with shape /// `[filter_height, filter_width, in_channels, out_channels]`. Gradient w.r.t. @@ -4035,8 +3459,9 @@ public static func conv2DBackpropFilter( outBackprop: Tensor, strides: [Int32], useCudnnOnGpu: Bool = true, - padding: Padding, - dataFormat: DataFormat = .nhwc, + padding: Padding2, + explicitPaddings: [Int32], + dataFormat: DataFormat = .b'nhwc', dilations: [Int32] = [1, 1, 1, 1] ) -> Tensor { let ret: TensorHandle = #tfop("Conv2DBackpropFilter", @@ -4047,11 +3472,13 @@ public static func conv2DBackpropFilter( strides: strides, use_cudnn_on_gpu: useCudnnOnGpu, padding: padding.cName, + explicit_paddings: explicitPaddings, data_format: dataFormat.cName, dilations: dilations) return Tensor(handle: ret) } +/// Computes the gradients of convolution with respect to the input. /// /// - Parameters: /// - input_sizes: An integer vector representing the shape of `input`, @@ -4066,6 +3493,20 @@ public static func conv2DBackpropFilter( /// of the convolution. Must be in the same order as the dimension specified with /// format. /// - padding: The type of padding algorithm to use. +/// - explicit_paddings: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith +/// dimension, the amount of padding inserted before and after the dimension is +/// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If +/// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. +/// - data_format: Specify the data format of the input and output data. With the +/// default format "NHWC", the data is stored in the order of: +/// [batch, in_height, in_width, in_channels]. +/// Alternatively, the format could be "NCHW", the data storage order of: +/// [batch, in_channels, in_height, in_width]. +/// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of +/// `input`. If set to k > 1, there will be k-1 skipped cells between each filter +/// element on that dimension. The dimension order is determined by the value of +/// `data_format`, see above for details. Dilations in the batch and depth +/// dimensions must be 1. /// /// - Output output: 4-D with shape `[batch, in_height, in_width, in_channels]`. Gradient /// w.r.t. the input of the convolution. @@ -4076,8 +3517,9 @@ public static func conv2DBackpropInput( outBackprop: Tensor, strides: [Int32], useCudnnOnGpu: Bool = true, - padding: Padding, - dataFormat: DataFormat = .nhwc, + padding: Padding2, + explicitPaddings: [Int32], + dataFormat: DataFormat = .b'nhwc', dilations: [Int32] = [1, 1, 1, 1] ) -> Tensor { let ret: TensorHandle = #tfop("Conv2DBackpropInput", @@ -4088,6 +3530,7 @@ public static func conv2DBackpropInput( strides: strides, use_cudnn_on_gpu: useCudnnOnGpu, padding: padding.cName, + explicit_paddings: explicitPaddings, data_format: dataFormat.cName, dilations: dilations) return Tensor(handle: ret) @@ -4126,7 +3569,7 @@ public static func conv3D( filter: Tensor, strides: [Int32], padding: Padding, - dataFormat: DataFormat1 = .ndhwc, + dataFormat: DataFormat1 = .b'ndhwc', dilations: [Int32] = [1, 1, 1, 1, 1] ) -> Tensor { let ret: TensorHandle = #tfop("Conv3D", @@ -4205,7 +3648,7 @@ public static func conv3DBackpropFilterV2( outBackprop: Tensor, strides: [Int32], padding: Padding, - dataFormat: DataFormat1 = .ndhwc, + dataFormat: DataFormat1 = .b'ndhwc', dilations: [Int32] = [1, 1, 1, 1, 1] ) -> Tensor { let ret: TensorHandle = #tfop("Conv3DBackpropFilterV2", @@ -4285,7 +3728,7 @@ public static func conv3DBackpropInputV2, strides: [Int32], padding: Padding, - dataFormat: DataFormat1 = .ndhwc, + dataFormat: DataFormat1 = .b'ndhwc', dilations: [Int32] = [1, 1, 1, 1, 1] ) -> Tensor { let ret: TensorHandle = #tfop("Conv3DBackpropInputV2", @@ -4403,27 +3846,6 @@ public static func cosh( return Tensor(handle: ret) } -/// Increments 'ref' until it reaches 'limit'. -/// -/// - Parameter ref: Should be from a scalar `Variable` node. -/// -/// - Attr limit: If incrementing ref would bring it above limit, instead generates an -/// 'OutOfRange' error. -/// -/// - Output output: A copy of the input before increment. If nothing else modifies the -/// input, the values produced will all be distinct. -@inlinable @inline(__always) -public static func countUpTo( - ref: Tensor, - limit: Int64 -) -> Tensor { - let ret: TensorHandle = #tfop("CountUpTo", - ref, - T$dtype: T.tensorFlowDataType, - limit: limit) - return Tensor(handle: ret) -} - /// Extracts crops from the input image tensor and resizes them. /// /// Extracts crops from the input image tensor and resizes them using bilinear @@ -4475,7 +3897,7 @@ public static func cropAndResize( boxes: Tensor, boxInd: Tensor, cropSize: Tensor, - method: Method = .bilinear, + method: Method = .b'bilinear', extrapolationValue: Double = 0 ) -> Tensor { let ret: TensorHandle = #tfop("CropAndResize", @@ -4518,7 +3940,7 @@ public static func cropAndResizeGradBoxes( image: Tensor, boxes: Tensor, boxInd: Tensor, - method: Method2 = .bilinear + method: Method3 = .b'bilinear' ) -> Tensor { let ret: TensorHandle = #tfop("CropAndResizeGradBoxes", grads, @@ -4560,7 +3982,7 @@ public static func cropAndResizeGradImage( boxes: Tensor, boxInd: Tensor, imageSize: Tensor, - method: Method = .bilinear + method: Method = .b'bilinear' ) -> Tensor { let ret: TensorHandle = #tfop("CropAndResizeGradImage", grads, @@ -4595,6 +4017,36 @@ public static func cross( return Tensor(handle: ret) } +/// An Op to sum inputs across replicated TPU instances. +/// +/// Each instance supplies its own input. +/// +/// For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`. +/// Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0, +/// and `B, D, F, H` as group 1. Thus we get the outputs: +/// `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`. +/// +/// - Parameters: +/// - input: The local input to the sum. +/// - group_assignment: An int32 tensor with shape +/// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the +/// replica ids in the ith subgroup. +/// +/// - Attr T: The type of elements to be summed. +/// +/// - Output output: The sum of all the distributed inputs. +@inlinable @inline(__always) +public static func crossReplicaSum( + _ input: Tensor, + groupAssignment: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("CrossReplicaSum", + input, + groupAssignment, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) +} + /// A RNN backed by cuDNN. /// /// Computes the RNN from the input and initial states, with respect to the params @@ -4633,9 +4085,9 @@ public static func cudnnRNN( inputH: Tensor, inputC: Tensor, params: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, + rnnMode: RnnMode = .b'lstm', + inputMode: InputMode = .b'linearInput', + direction: Direction = .b'unidirectional', dropout: Double = 0, seed: Int64 = 0, seed2: Int64 = 0, @@ -4711,9 +4163,9 @@ public static func cudnnRNNBackprop( outputHBackprop: Tensor, outputCBackprop: Tensor, reserveSpace: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, + rnnMode: RnnMode = .b'lstm', + inputMode: InputMode = .b'linearInput', + direction: Direction = .b'unidirectional', dropout: Double = 0, seed: Int64 = 0, seed2: Int64 = 0 @@ -4798,9 +4250,9 @@ public static func cudnnRNNBackpropV2( outputCBackprop: Tensor, reserveSpace: Tensor, hostReserved: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, + rnnMode: RnnMode = .b'lstm', + inputMode: InputMode = .b'linearInput', + direction: Direction = .b'unidirectional', dropout: Double = 0, seed: Int64 = 0, seed2: Int64 = 0 @@ -4828,60 +4280,157 @@ public static func cudnnRNNBackpropV2( return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3)) } -/// Converts CudnnRNN params from canonical form to usable form. +/// Backprop step of CudnnRNNV3. /// -/// Writes a set of weights into the opaque params buffer so they can be used in -/// upcoming training or inferences. -/// -/// Note that the params buffer may not be compatible across different GPUs. So any -/// save and restoration should be converted to and from the canonical weights and -/// biases. +/// Compute the backprop of both data and weights in a RNN. Takes an extra +/// "sequence_lengths" input than CudnnRNNBackprop. /// -/// num_layers: Specifies the number of layers in the RNN model. -/// num_units: Specifies the size of the hidden state. -/// input_size: Specifies the size of the input state. -/// weights: the canonical form of weights that can be used for saving -/// and restoration. They are more likely to be compatible across different -/// generations. -/// biases: the canonical form of biases that can be used for saving -/// and restoration. They are more likely to be compatible across different -/// generations. -/// num_params: number of parameter sets for all layers. -/// Each layer may contain multiple parameter sets, with each set consisting of -/// a weight matrix and a bias vector. /// rnn_mode: Indicates the type of the RNN model. -/// input_mode: Indicate whether there is a linear projection between the input and -/// The actual computation before the first layer. 'skip_input' is only allowed +/// input_mode: Indicates whether there is a linear projection between the input and +/// the actual computation before the first layer. 'skip_input' is only allowed /// when input_size == num_units; 'auto_select' implies 'skip_input' when /// input_size == num_units; otherwise, it implies 'linear_input'. -/// direction: Indicates whether a bidirectional model will be used. -/// dir = (direction == bidirectional) ? 2 : 1 -/// dropout: dropout probability. When set to 0., dropout is disabled. -/// seed: the 1st part of a seed to initialize dropout. -/// seed2: the 2nd part of a seed to initialize dropout. -@inlinable @inline(__always) -public static func cudnnRNNCanonicalToParams( - numLayers: Tensor, - numUnits: Tensor, - inputSize: Tensor, - weights: [Tensor], - biases: [Tensor], - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, - dropout: Double = 0, - seed: Int64 = 0, - seed2: Int64 = 0 -) -> Tensor { - let ret: TensorHandle = #tfop("CudnnRNNCanonicalToParams", - numLayers, - numUnits, - inputSize, - weights, - biases, - T$dtype: T.tensorFlowDataType, - rnn_mode: rnnMode.cName, - input_mode: inputMode.cName, +/// direction: Indicates whether a bidirectional model will be used. Should be +/// "unidirectional" or "bidirectional". +/// dropout: Dropout probability. When set to 0., dropout is disabled. +/// seed: The 1st part of a seed to initialize dropout. +/// seed2: The 2nd part of a seed to initialize dropout. +/// input: If time_major is true, this is a 3-D tensor with the shape of +/// [seq_length, batch_size, input_size]. If time_major is false, the shape is +/// [batch_size, seq_length, input_size]. +/// input_h: If time_major is true, this is a 3-D tensor with the shape of +/// [num_layer * dir, batch_size, num_units]. If time_major is false, the shape +/// is [batch_size, num_layer * dir, num_units]. +/// input_c: For LSTM, a 3-D tensor with the shape of +/// [num_layer * dir, batch, num_units]. For other models, it is ignored. +/// params: A 1-D tensor that contains the weights and biases in an opaque layout. +/// The size must be created through CudnnRNNParamsSize, and initialized +/// separately. Note that they might not be compatible across different +/// generations. So it is a good idea to save and restore +/// sequence_lengths: a vector of lengths of each input sequence. +/// output: If time_major is true, this is a 3-D tensor with the shape of +/// [seq_length, batch_size, dir * num_units]. If time_major is false, the +/// shape is [batch_size, seq_length, dir * num_units]. +/// output_h: The same shape has input_h. +/// output_c: The same shape as input_c for LSTM. An empty tensor for other models. +/// output_backprop: A 3-D tensor with the same shape as output in the forward pass. +/// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward +/// pass. +/// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward +/// pass. +/// time_major: Indicates whether the input/output format is time major or batch +/// major. +/// reserve_space: The same reserve_space produced in the forward operation. +/// input_backprop: The backprop to input in the forward pass. Has the same shape +/// as input. +/// input_h_backprop: The backprop to input_h in the forward pass. Has the same +/// shape as input_h. +/// input_c_backprop: The backprop to input_c in the forward pass. Has the same +/// shape as input_c. +/// params_backprop: The backprop to the params buffer in the forward pass. Has the +/// same shape as params. +@inlinable @inline(__always) +public static func cudnnRNNBackpropV3( + _ input: Tensor, + inputH: Tensor, + inputC: Tensor, + params: Tensor, + sequenceLengths: Tensor, + output: Tensor, + outputH: Tensor, + outputC: Tensor, + outputBackprop: Tensor, + outputHBackprop: Tensor, + outputCBackprop: Tensor, + reserveSpace: Tensor, + hostReserved: Tensor, + rnnMode: RnnMode = .b'lstm', + inputMode: InputMode = .b'linearInput', + direction: Direction = .b'unidirectional', + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + timeMajor: Bool = true +) -> (inputBackprop: Tensor, inputHBackprop: Tensor, inputCBackprop: Tensor, paramsBackprop: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("CudnnRNNBackpropV3", + input, + inputH, + inputC, + params, + sequenceLengths, + output, + outputH, + outputC, + outputBackprop, + outputHBackprop, + outputCBackprop, + reserveSpace, + hostReserved, + T$dtype: T.tensorFlowDataType, + rnn_mode: rnnMode.cName, + input_mode: inputMode.cName, + direction: direction.cName, + dropout: dropout, + seed: seed, + seed2: seed2, + time_major: timeMajor) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3)) +} + +/// Converts CudnnRNN params from canonical form to usable form. +/// +/// Writes a set of weights into the opaque params buffer so they can be used in +/// upcoming training or inferences. +/// +/// Note that the params buffer may not be compatible across different GPUs. So any +/// save and restoration should be converted to and from the canonical weights and +/// biases. +/// +/// num_layers: Specifies the number of layers in the RNN model. +/// num_units: Specifies the size of the hidden state. +/// input_size: Specifies the size of the input state. +/// weights: the canonical form of weights that can be used for saving +/// and restoration. They are more likely to be compatible across different +/// generations. +/// biases: the canonical form of biases that can be used for saving +/// and restoration. They are more likely to be compatible across different +/// generations. +/// num_params: number of parameter sets for all layers. +/// Each layer may contain multiple parameter sets, with each set consisting of +/// a weight matrix and a bias vector. +/// rnn_mode: Indicates the type of the RNN model. +/// input_mode: Indicate whether there is a linear projection between the input and +/// The actual computation before the first layer. 'skip_input' is only allowed +/// when input_size == num_units; 'auto_select' implies 'skip_input' when +/// input_size == num_units; otherwise, it implies 'linear_input'. +/// direction: Indicates whether a bidirectional model will be used. +/// dir = (direction == bidirectional) ? 2 : 1 +/// dropout: dropout probability. When set to 0., dropout is disabled. +/// seed: the 1st part of a seed to initialize dropout. +/// seed2: the 2nd part of a seed to initialize dropout. +@inlinable @inline(__always) +public static func cudnnRNNCanonicalToParams( + numLayers: Tensor, + numUnits: Tensor, + inputSize: Tensor, + weights: [Tensor], + biases: [Tensor], + rnnMode: RnnMode = .b'lstm', + inputMode: InputMode = .b'linearInput', + direction: Direction = .b'unidirectional', + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0 +) -> Tensor { + let ret: TensorHandle = #tfop("CudnnRNNCanonicalToParams", + numLayers, + numUnits, + inputSize, + weights, + biases, + T$dtype: T.tensorFlowDataType, + rnn_mode: rnnMode.cName, + input_mode: inputMode.cName, direction: direction.cName, dropout: dropout, seed: seed, @@ -4917,9 +4466,9 @@ public static func cudnnRNNParamsSize, numUnits: Tensor, inputSize: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, + rnnMode: RnnMode = .b'lstm', + inputMode: InputMode = .b'linearInput', + direction: Direction = .b'unidirectional', dropout: Double = 0, seed: Int64 = 0, seed2: Int64 = 0, @@ -4981,9 +4530,9 @@ public static func cudnnRNNV2( inputH: Tensor, inputC: Tensor, params: Tensor, - rnnMode: RnnMode = .lstm, - inputMode: InputMode = .linearInput, - direction: Direction = .unidirectional, + rnnMode: RnnMode = .b'lstm', + inputMode: InputMode = .b'linearInput', + direction: Direction = .b'unidirectional', dropout: Double = 0, seed: Int64 = 0, seed2: Int64 = 0, @@ -5005,6 +4554,79 @@ public static func cudnnRNNV2( return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3), Tensor(handle: ret.4)) } +/// A RNN backed by cuDNN. +/// +/// Computes the RNN from the input and initial states, with respect to the params +/// buffer. Accepts one extra input "sequence_lengths" than CudnnRNN. +/// +/// rnn_mode: Indicates the type of the RNN model. +/// input_mode: Indicates whether there is a linear projection between the input and +/// the actual computation before the first layer. 'skip_input' is only allowed +/// when input_size == num_units; 'auto_select' implies 'skip_input' when +/// input_size == num_units; otherwise, it implies 'linear_input'. +/// direction: Indicates whether a bidirectional model will be used. Should be +/// "unidirectional" or "bidirectional". +/// dropout: Dropout probability. When set to 0., dropout is disabled. +/// seed: The 1st part of a seed to initialize dropout. +/// seed2: The 2nd part of a seed to initialize dropout. +/// input: If time_major is true, this is a 3-D tensor with the shape of +/// [seq_length, batch_size, input_size]. If time_major is false, the shape is +/// [batch_size, seq_length, input_size]. +/// input_h: If time_major is true, this is a 3-D tensor with the shape of +/// [num_layer * dir, batch_size, num_units]. If time_major is false, the shape +/// is [batch_size, num_layer * dir, num_units]. +/// input_c: For LSTM, a 3-D tensor with the shape of +/// [num_layer * dir, batch, num_units]. For other models, it is ignored. +/// params: A 1-D tensor that contains the weights and biases in an opaque layout. +/// The size must be created through CudnnRNNParamsSize, and initialized +/// separately. Note that they might not be compatible across different +/// generations. So it is a good idea to save and restore +/// sequence_lengths: a vector of lengths of each input sequence. +/// output: If time_major is true, this is a 3-D tensor with the shape of +/// [seq_length, batch_size, dir * num_units]. If time_major is false, the +/// shape is [batch_size, seq_length, dir * num_units]. +/// output_h: The same shape has input_h. +/// output_c: The same shape as input_c for LSTM. An empty tensor for other models. +/// is_training: Indicates whether this operation is used for inferenece or +/// training. +/// time_major: Indicates whether the input/output format is time major or batch +/// major. +/// reserve_space: An opaque tensor that can be used in backprop calculation. It +/// is only produced if is_training is true. +@inlinable @inline(__always) +public static func cudnnRNNV3( + _ input: Tensor, + inputH: Tensor, + inputC: Tensor, + params: Tensor, + sequenceLengths: Tensor, + rnnMode: RnnMode = .b'lstm', + inputMode: InputMode = .b'linearInput', + direction: Direction = .b'unidirectional', + dropout: Double = 0, + seed: Int64 = 0, + seed2: Int64 = 0, + isTraining: Bool = true, + timeMajor: Bool = true +) -> (output: Tensor, outputH: Tensor, outputC: Tensor, reserveSpace: Tensor, hostReserved: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("CudnnRNNV3", + input, + inputH, + inputC, + params, + sequenceLengths, + T$dtype: T.tensorFlowDataType, + rnn_mode: rnnMode.cName, + input_mode: inputMode.cName, + direction: direction.cName, + dropout: dropout, + seed: seed, + seed2: seed2, + is_training: isTraining, + time_major: timeMajor) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3), Tensor(handle: ret.4)) +} + /// Compute the cumulative product of the tensor `x` along `axis`. /// /// By default, this op performs an inclusive cumprod, which means that the first @@ -5136,8 +4758,8 @@ public static func cumsum( _ x: Tensor, - srcFormat: String = "NHWC", - dstFormat: String = "NCHW" + srcFormat: String = "b'NHWC'", + dstFormat: String = "b'NCHW'" ) -> Tensor { let ret: TensorHandle = #tfop("DataFormatDimMap", x, @@ -5161,8 +4783,8 @@ public static func dataFormatDimMap( @inlinable @inline(__always) public static func dataFormatVecPermute( _ x: Tensor, - srcFormat: String = "NHWC", - dstFormat: String = "NCHW" + srcFormat: String = "b'NHWC'", + dstFormat: String = "b'NCHW'" ) -> Tensor { let ret: TensorHandle = #tfop("DataFormatVecPermute", x, @@ -5187,21 +4809,6 @@ public static func debugGradientIdentity( return Tensor(handle: ret) } -/// Identity op for gradient debugging. -/// -/// This op is hidden from public in Python. It is used by TensorFlow Debugger to -/// register gradient tensors for gradient debugging. -/// This op operates on reference-type tensors. -@inlinable @inline(__always) -public static func debugGradientRefIdentity( - _ input: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("DebugGradientRefIdentity", - input, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - /// Debug Identity Op. /// /// Provides an identity mapping of the non-Ref type input tensor for debugging. @@ -5483,10 +5090,11 @@ public static func decodeCompressed( return StringTensor(handle: ret) } -/// Decode the first frame of a GIF-encoded image to a uint8 tensor. +/// Decode the frame(s) of a GIF-encoded image to a uint8 tensor. /// -/// GIF with frame or transparency compression are not supported -/// convert animated GIF from compressed to uncompressed by: +/// GIF images with frame or transparency compression are not supported. +/// On Linux and MacOS systems, convert animated GIFs from compressed to +/// uncompressed by running: /// /// convert $src.gif -coalesce $dst.gif /// @@ -5495,7 +5103,7 @@ public static func decodeCompressed( /// /// - Parameter contents: 0-D. The GIF-encoded image. /// -/// - Output image: 4-D with shape `[num_frames, height, width, 3]`. RGB order +/// - Output image: 4-D with shape `[num_frames, height, width, 3]`. RGB channel order. @inlinable @inline(__always) public static func decodeGif( contents: StringTensor @@ -5636,7 +5244,7 @@ public static func decodePng( /// added dimension will have size equal to the length of the elements /// of `bytes` divided by the number of bytes to represent `out_type`. @inlinable @inline(__always) -public static func decodeRaw( +public static func decodeRaw( bytes: StringTensor, littleEndian: Bool = true ) -> Tensor { @@ -5903,7 +5511,7 @@ public static func denseToSparseSetOperation( _ input: Tensor, blockSize: Int64, - dataFormat: DataFormat3 = .nhwc + dataFormat: DataFormat4 = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("DepthToSpace", input, @@ -5954,7 +5562,7 @@ public static func depthwiseConv2dNative( filter: Tensor, strides: [Int32], padding: Padding, - dataFormat: DataFormat = .nhwc, + dataFormat: DataFormat = .b'nhwc', dilations: [Int32] = [1, 1, 1, 1] ) -> Tensor { let ret: TensorHandle = #tfop("DepthwiseConv2dNative", @@ -6007,7 +5615,7 @@ public static func depthwiseConv2dNativeBackpropFilter, strides: [Int32], padding: Padding, - dataFormat: DataFormat = .nhwc, + dataFormat: DataFormat = .b'nhwc', dilations: [Int32] = [1, 1, 1, 1] ) -> Tensor { let ret: TensorHandle = #tfop("DepthwiseConv2dNativeBackpropFilter", @@ -6061,7 +5669,7 @@ public static func depthwiseConv2dNativeBackpropInput, strides: [Int32], padding: Padding, - dataFormat: DataFormat = .nhwc, + dataFormat: DataFormat = .b'nhwc', dilations: [Int32] = [1, 1, 1, 1] ) -> Tensor { let ret: TensorHandle = #tfop("DepthwiseConv2dNativeBackpropInput", @@ -6160,7 +5768,7 @@ public static func dequantize( _ input: Tensor, minRange: Tensor, maxRange: Tensor, - mode: Mode = .minCombined + mode: Mode = .b'minCombined' ) -> Tensor { let ret: TensorHandle = #tfop("Dequantize", input, @@ -6288,30 +5896,11 @@ public static func deserializeSparse( - ref: Tensor, - varName: String -) -> Tensor { - let ret: TensorHandle = #tfop("DestroyTemporaryVariable", - ref, - T$dtype: T.tensorFlowDataType, - var_name: varName) - return Tensor(handle: ret) +public static func devicePlacementOp( +) -> StringTensor { + let ret: TensorHandle = #tfop("DevicePlacementOp") + return StringTensor(handle: ret) } /// Returns a diagonal tensor with a given diagonal values. @@ -6860,7 +6449,7 @@ public static func encodeJpeg( progressive: Bool = false, optimizeSize: Bool = false, chromaDownsampling: Bool = true, - densityUnit: DensityUnit = .in_, + densityUnit: DensityUnit = .b'in', xDensity: Int64 = 300, yDensity: Int64 = 300, xmpMetadata: String @@ -6879,6 +6468,28 @@ public static func encodeJpeg( return StringTensor(handle: ret) } +/// JPEG encode input image with provided compression quality. +/// +/// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. +/// `quality` is an int32 jpeg compression quality value between 0 and 100. +/// +/// +/// - Parameters: +/// - images: Images to adjust. At least 3-D. +/// - quality: An int quality to encode to. +/// +/// - Output contents: 0-D. JPEG-encoded image. +@inlinable @inline(__always) +public static func encodeJpegVariableQuality( + images: Tensor, + quality: Tensor +) -> StringTensor { + let ret: TensorHandle = #tfop("EncodeJpegVariableQuality", + images, + quality) + return StringTensor(handle: ret) +} + /// PNG-encode an image. /// /// `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` @@ -6966,7 +6577,7 @@ public static func encodeProto( _ values: [Tensor], fieldNames: [String], messageType: String, - descriptorSource: String = "local://" + descriptorSource: String = "b'local://'" ) -> StringTensor { let ret: TensorHandle = #tfop("EncodeProto", sizes, @@ -7003,6 +6614,148 @@ public static func encodeWav( return StringTensor(handle: ret) } +/// An op that enqueues a list of input batch tensors to TPUEmbedding. +/// +/// - Parameters: +/// - batch: A list of 1D tensors, one for each embedding table, containing the +/// indices into the tables. +/// - mode_override: A string input that overrides the mode specified in the +/// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', +/// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set +/// in TPUEmbeddingConfiguration is used, otherwise mode_override is used. +/// +/// - Attr device_ordinal: The TPU device to use. Should be >= 0 and less than the number +/// of TPU cores in the task on which the node is placed. +@inlinable @inline(__always) +public static func enqueueTPUEmbeddingIntegerBatch( + batch: [Tensor], + modeOverride: StringTensor, + deviceOrdinal: Int64 = -1 +) { + return #tfop("EnqueueTPUEmbeddingIntegerBatch", + batch, + modeOverride, + device_ordinal: deviceOrdinal) +} + +/// An op that enqueues TPUEmbedding input indices from a SparseTensor. +/// +/// This Op eases the porting of code that uses embedding_lookup_sparse(), +/// although some Python preprocessing of the SparseTensor arguments to +/// embedding_lookup_sparse() is required to produce the arguments to this Op, +/// since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training +/// step. +/// +/// The tensors at corresponding positions in the three input lists +/// must have the same shape, i.e. rank 1 with dim_size() equal to the total +/// number of lookups into the table described by the corresponding table_id. +/// +/// - Parameters: +/// - sample_indices: A list of rank 1 Tensors specifying the training example and +/// feature to which the corresponding embedding_indices and aggregation_weights +/// values belong. sample_indices[i] must equal b * nf + f, where nf is the +/// number of features from the corresponding table, f is in [0, nf), and +/// b is in [0, batch size). +/// - embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. +/// - aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per +/// (training example, feature) -- aggregation weights. +/// - mode_override: A string input that overrides the mode specified in the +/// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', +/// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set +/// in TPUEmbeddingConfiguration is used, otherwise mode_override is used. +/// +/// - Attrs: +/// - device_ordinal: The TPU device to use. Should be >= 0 and less than the number +/// of TPU cores in the task on which the node is placed. +/// - combiners: A list of string scalars, one for each embedding table that specify +/// how to normalize the embedding activations after weighted summation. +/// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have +/// the sum of the weights be 0 for 'mean' or the sum of the squared weights be +/// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for +/// all tables. +@inlinable @inline(__always) +public static func enqueueTPUEmbeddingSparseBatch( + sampleIndices: [Tensor], + embeddingIndices: [Tensor], + aggregationWeights: [Tensor], + modeOverride: StringTensor, + deviceOrdinal: Int64 = -1, + combiners: [String] +) { + return #tfop("EnqueueTPUEmbeddingSparseBatch", + sampleIndices, + embeddingIndices, + aggregationWeights, + modeOverride, + T1$dtype: T1.tensorFlowDataType, + T2$dtype: T2.tensorFlowDataType, + T3$dtype: T3.tensorFlowDataType, + device_ordinal: deviceOrdinal, + combiners: combiners) +} + +/// Eases the porting of code that uses tf.nn.embedding_lookup_sparse(). +/// +/// sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond +/// to the ith feature. table_ids[i] indicates which embedding table to look up ith +/// feature. +/// +/// The tensors at corresponding positions in the three input lists (sample_indices, +/// embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1 +/// with dim_size() equal to the total number of lookups into the table described by +/// the corresponding feature. +/// +/// - Parameters: +/// - sample_indices: A list of rank 1 Tensors specifying the training example to +/// which the corresponding embedding_indices and aggregation_weights values +/// belong. It corresponds to sp_ids.indices[:,0] in embedding_lookup_sparse(). +/// - embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. +/// It corresponds to sp_ids.values in embedding_lookup_sparse(). +/// - aggregation_weights: A list of rank 1 Tensors containing per training example +/// aggregation weights. It corresponds to sp_weights.values in +/// embedding_lookup_sparse(). +/// - mode_override: A string input that overrides the mode specified in the +/// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', +/// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set +/// in TPUEmbeddingConfiguration is used, otherwise mode_override is used. +/// +/// - Attrs: +/// - device_ordinal: The TPU device to use. Should be >= 0 and less than the number +/// of TPU cores in the task on which the node is placed. +/// - combiners: A list of string scalars, one for each embedding table that specify +/// how to normalize the embedding activations after weighted summation. +/// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have +/// the sum of the weights be 0 for 'mean' or the sum of the squared weights be +/// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for +/// all tables. +/// - table_ids: A list of integers specifying the identifier of the embedding table +/// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the +/// corresponding input. The ith input is looked up using table_ids[i]. The size +/// of the table_ids list must be equal to that of sample_indices, +/// embedding_indices and aggregation_weights. +@inlinable @inline(__always) +public static func enqueueTPUEmbeddingSparseTensorBatch( + sampleIndices: [Tensor], + embeddingIndices: [Tensor], + aggregationWeights: [Tensor], + modeOverride: StringTensor, + deviceOrdinal: Int64 = -1, + combiners: [String], + tableIds: [Int32] +) { + return #tfop("EnqueueTPUEmbeddingSparseTensorBatch", + sampleIndices, + embeddingIndices, + aggregationWeights, + modeOverride, + T1$dtype: T1.tensorFlowDataType, + T2$dtype: T2.tensorFlowDataType, + T3$dtype: T3.tensorFlowDataType, + device_ordinal: deviceOrdinal, + combiners: combiners, + table_ids: tableIds) +} + /// Creates or finds a child frame, and makes `data` available to the child frame. /// /// This op is used together with `Exit` to create loops in the graph. @@ -7073,6 +6826,36 @@ public static func erfc( return Tensor(handle: ret) } +/// Computes the euclidean norm of elements across dimensions of a tensor. +/// +/// Reduces `input` along the dimensions given in `axis`. Unless +/// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in +/// `axis`. If `keep_dims` is true, the reduced dimensions are +/// retained with length 1. +/// +/// - Parameters: +/// - input: The tensor to reduce. +/// - reduction_indices: The dimensions to reduce. Must be in the range +/// `[-rank(input), rank(input))`. +/// +/// - Attr keep_dims: If true, retain reduced dimensions with length 1. +/// +/// - Output output: The reduced tensor. +@inlinable @inline(__always) +public static func euclideanNorm( + _ input: Tensor, + reductionIndices: Tensor, + keepDims: Bool = false +) -> Tensor { + let ret: TensorHandle = #tfop("EuclideanNorm", + input, + reductionIndices, + T$dtype: T.tensorFlowDataType, + Tidx$dtype: Tidx.tensorFlowDataType, + keep_dims: keepDims) + return Tensor(handle: ret) +} + /// Exits the current frame to its parent frame. /// /// Exit makes its input `data` available to the parent frame. @@ -7166,6 +6949,29 @@ public static func expm1( return Tensor(handle: ret) } +/// Extracts a glimpse from the input tensor. +/// +/// Returns a set of windows called glimpses extracted at location +/// `offsets` from the input tensor. If the windows only partially +/// overlaps the inputs, the non overlapping areas will be filled with +/// random noise. +/// +/// The result is a 4-D tensor of shape `[batch_size, glimpse_height, +/// glimpse_width, channels]`. The channels and batch dimensions are the +/// same as that of the input tensor. The height and width of the output +/// windows are specified in the `size` parameter. +/// +/// The argument `normalized` and `centered` controls how the windows are built: +/// +/// * If the coordinates are normalized but not centered, 0.0 and 1.0 +/// correspond to the minimum and maximum of each height and width +/// dimension. +/// * If the coordinates are both normalized and centered, they range from +/// -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper +/// left corner, the lower right corner is located at (1.0, 1.0) and the +/// center is at (0, 0). +/// * If the coordinates are not normalized they are interpreted as +/// numbers of pixels. /// /// - Parameters: /// - input: A 4-D float tensor of shape `[batch_size, height, width, channels]`. @@ -7183,6 +6989,9 @@ public static func expm1( /// - normalized: indicates if the offset coordinates are normalized. /// - uniform_noise: indicates if the noise should be generated using a /// uniform distribution or a Gaussian distribution. +/// - noise: indicates if the noise should `uniform`, `gaussian`, or +/// `zero`. The default is `uniform` which means the the noise type +/// will be decided by `uniform_noise`. /// /// - Output glimpse: A tensor representing the glimpses `[batch_size, /// glimpse_height, glimpse_width, channels]`. @@ -7193,7 +7002,8 @@ public static func extractGlimpse( offsets: Tensor, centered: Bool = true, normalized: Bool = true, - uniformNoise: Bool = true + uniformNoise: Bool = true, + noise: String = "b'uniform'" ) -> Tensor { let ret: TensorHandle = #tfop("ExtractGlimpse", input, @@ -7201,7 +7011,8 @@ public static func extractGlimpse( offsets, centered: centered, normalized: normalized, - uniform_noise: uniformNoise) + uniform_noise: uniformNoise, + noise: noise) return Tensor(handle: ret) } @@ -7394,6 +7205,15 @@ public static func fact( /// then de-quantized and output as floats in `[min; max]` interval. /// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. /// +/// Before quantization, `min` and `max` values are adjusted with the following +/// logic. +/// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, +/// the behavior can be unexpected: +/// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. +/// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. +/// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, +/// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. +/// /// Quantization is called fake since the output is still in floating point. @inlinable @inline(__always) public static func fakeQuantWithMinMaxArgs( @@ -7449,6 +7269,15 @@ public static func fakeQuantWithMinMaxArgsGradient( /// then de-quantized and output as floats in `[min; max]` interval. /// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. /// +/// Before quantization, `min` and `max` values are adjusted with the following +/// logic. +/// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, +/// the behavior can be unexpected: +/// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. +/// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. +/// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, +/// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. +/// /// This operation has a gradient and thus allows for training `min` and `max` /// values. @inlinable @inline(__always) @@ -7516,6 +7345,15 @@ public static func fakeQuantWithMinMaxVarsGradient( /// then de-quantized and output as floats in `[min; max]` interval. /// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. /// +/// Before quantization, `min` and `max` values are adjusted with the following +/// logic. +/// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, +/// the behavior can be unexpected: +/// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. +/// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. +/// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, +/// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. +/// /// This operation has a gradient and thus allows for training `min` and `max` /// values. @inlinable @inline(__always) @@ -7624,39 +7462,6 @@ public static func fiveFloatOutputs( return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3), Tensor(handle: ret.4)) } -/// A Reader that outputs fixed-length records from a file. -/// -/// - Attrs: -/// - header_bytes: Number of bytes in the header, defaults to 0. -/// - record_bytes: Number of bytes in the record. -/// - footer_bytes: Number of bytes in the footer, defaults to 0. -/// - hop_bytes: Number of bytes to hop before each read. Default of 0 means using -/// record_bytes. -/// - container: If non-empty, this reader is placed in the given container. -/// Otherwise, a default container is used. -/// - shared_name: If non-empty, this reader is named in the given bucket -/// with this shared_name. Otherwise, the node name is used instead. -/// -/// - Output reader_handle: The handle to reference the Reader. -@inlinable @inline(__always) -public static func fixedLengthRecordReader( - headerBytes: Int64 = 0, - recordBytes: Int64, - footerBytes: Int64 = 0, - hopBytes: Int64 = 0, - container: String, - sharedName: String -) -> StringTensor { - let ret: TensorHandle = #tfop("FixedLengthRecordReader", - header_bytes: headerBytes, - record_bytes: recordBytes, - footer_bytes: footerBytes, - hop_bytes: hopBytes, - container: container, - shared_name: sharedName) - return StringTensor(handle: ret) -} - /// Generates labels for candidate sampling with a learned unigram distribution. /// /// A unigram sampler could use a fixed unigram distribution read from a @@ -8140,7 +7945,7 @@ public static func fusedBatchNorm( mean: Tensor, variance: Tensor, epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, + dataFormat: DataFormat = .b'nhwc', isTraining: Bool = true ) -> (y: Tensor, batchMean: Tensor, batchVariance: Tensor, reserveSpace1: Tensor, reserveSpace2: Tensor) { let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("FusedBatchNorm", @@ -8198,7 +8003,7 @@ public static func fusedBatchNormGrad( reserveSpace1: Tensor, reserveSpace2: Tensor, epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, + dataFormat: DataFormat = .b'nhwc', isTraining: Bool = true ) -> (xBackprop: Tensor, scaleBackprop: Tensor, offsetBackprop: Tensor, reserveSpace3: Tensor, reserveSpace4: Tensor) { let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("FusedBatchNormGrad", @@ -8257,7 +8062,7 @@ public static func fusedBatchNormGradV2, reserveSpace2: Tensor, epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, + dataFormat: DataFormat = .b'nhwc', isTraining: Bool = true ) -> (xBackprop: Tensor, scaleBackprop: Tensor, offsetBackprop: Tensor, reserveSpace3: Tensor, reserveSpace4: Tensor) { let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("FusedBatchNormGradV2", @@ -8314,7 +8119,7 @@ public static func fusedBatchNormV2, variance: Tensor, epsilon: Double = 0.0001, - dataFormat: DataFormat = .nhwc, + dataFormat: DataFormat = .b'nhwc', isTraining: Bool = true ) -> (y: Tensor, batchMean: Tensor, batchVariance: Tensor, reserveSpace1: Tensor, reserveSpace2: Tensor) { let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("FusedBatchNormV2", @@ -8361,7 +8166,7 @@ public static func fusedPadConv2D( _ input: Tensor, paddings: Tensor, filter: Tensor, - mode: Mode4, + mode: Mode5, strides: [Int32], padding: Padding ) -> Tensor { @@ -8411,7 +8216,7 @@ public static func fusedResizeAndPadConv2D( paddings: Tensor, filter: Tensor, resizeAlignCorners: Bool = false, - mode: Mode4, + mode: Mode5, strides: [Int32], padding: Padding ) -> Tensor { @@ -9093,40 +8898,6 @@ public static func hSVToRGB( return Tensor(handle: ret) } -/// Creates a non-initialized hash table. -/// -/// This op creates a hash table, specifying the type of its keys and values. -/// Before using the table you will have to initialize it. After initialization the -/// table will be immutable. -/// -/// - Attrs: -/// - container: If non-empty, this table is placed in the given container. -/// Otherwise, a default container is used. -/// - shared_name: If non-empty, this table is shared under the given name across -/// multiple sessions. -/// - use_node_name_sharing: If true and shared_name is empty, the table is shared -/// using the node name. -/// - key_dtype: Type of the table keys. -/// - value_dtype: Type of the table values. -/// -/// - Output table_handle: Handle to a table. -@inlinable @inline(__always) -public static func hashTable( - container: String, - sharedName: String, - useNodeNameSharing: Bool = false, - typeKeyDtype: KeyDtype.Type, - typeValueDtype: ValueDtype.Type -) -> StringTensor { - let ret: TensorHandle = #tfop("HashTable", - key_dtype$dtype: KeyDtype.tensorFlowDataType, - value_dtype$dtype: ValueDtype.tensorFlowDataType, - container: container, - shared_name: sharedName, - use_node_name_sharing: useNodeNameSharing) - return StringTensor(handle: ret) -} - /// Return histogram of values. /// /// Given the tensor `values`, this operation returns a rank 1 histogram counting @@ -9273,29 +9044,6 @@ public static func identity( return Tensor(handle: ret) } -/// A Reader that outputs the queued work as both the key and value. -/// -/// To use, enqueue strings in a Queue. ReaderRead will take the front -/// work string and output (work, work). -/// -/// - Attrs: -/// - container: If non-empty, this reader is placed in the given container. -/// Otherwise, a default container is used. -/// - shared_name: If non-empty, this reader is named in the given bucket -/// with this shared_name. Otherwise, the node name is used instead. -/// -/// - Output reader_handle: The handle to reference the Reader. -@inlinable @inline(__always) -public static func identityReader( - container: String, - sharedName: String -) -> StringTensor { - let ret: TensorHandle = #tfop("IdentityReader", - container: container, - shared_name: sharedName) - return StringTensor(handle: ret) -} - /// Compute the lower regularized incomplete Gamma function `P(a, x)`. /// /// The lower regularized incomplete Gamma function is defined as: @@ -9472,67 +9220,6 @@ public static func inTopKV2( return Tensor(handle: ret) } -/// Table initializer that takes two tensors for keys and values respectively. -/// -/// - Parameters: -/// - table_handle: Handle to a table which will be initialized. -/// - keys: Keys of type Tkey. -/// - values: Values of type Tval. -@inlinable @inline(__always) -public static func initializeTable( - tableHandle: StringTensor, - keys: Tensor, - _ values: Tensor -) { - return #tfop("InitializeTable", - tableHandle, - keys, - values, - Tkey$dtype: Tkey.tensorFlowDataType, - Tval$dtype: Tval.tensorFlowDataType) -} - -/// Initializes a table from a text file. -/// -/// It inserts one key-value pair into the table for each line of the file. -/// The key and value is extracted from the whole line content, elements from the -/// split line based on `delimiter` or the line number (starting from zero). -/// Where to extract the key and value from a line is specified by `key_index` and -/// `value_index`. -/// -/// - A value of -1 means use the line number(starting from zero), expects `int64`. -/// - A value of -2 means use the whole line content, expects `string`. -/// - A value >= 0 means use the index (starting at zero) of the split line based -/// on `delimiter`. -/// -/// - Parameters: -/// - table_handle: Handle to a table which will be initialized. -/// - filename: Filename of a vocabulary text file. -/// -/// - Attrs: -/// - key_index: Column index in a line to get the table `key` values from. -/// - value_index: Column index that represents information of a line to get the table -/// `value` values from. -/// - vocab_size: Number of elements of the file, use -1 if unknown. -/// - delimiter: Delimiter to separate fields in a line. -@inlinable @inline(__always) -public static func initializeTableFromTextFile( - tableHandle: StringTensor, - filename: StringTensor, - keyIndex: Int64, - valueIndex: Int64, - vocabSize: Int64 = -1, - delimiter: String = "\t" -) { - return #tfop("InitializeTableFromTextFile", - tableHandle, - filename, - key_index: keyIndex, - value_index: valueIndex, - vocab_size: vocabSize, - delimiter: delimiter) -} - /// Adds v into specified rows of x. /// /// Computes y = x; y[i, :] += v; return y. @@ -9781,20 +9468,27 @@ public static func isNan( return Tensor(handle: ret) } -/// Checks whether a tensor has been initialized. +/// Returns the index of a data point that should be added to the seed set. /// -/// Outputs boolean scalar indicating whether the tensor has been initialized. +/// Entries in distances are assumed to be squared distances of candidate points to +/// the already sampled centers in the seed set. The op constructs one Markov chain +/// of the k-MC^2 algorithm and returns the index of one candidate point to be added +/// as an additional cluster center. /// -/// - Parameter ref: Should be from a `Variable` node. May be uninitialized. +/// - Parameters: +/// - distances: Vector with squared distances to the closest previously sampled cluster center +/// for each candidate point. +/// - seed: Scalar. Seed for initializing the random number generator. /// -/// - Attr dtype: The type of elements in the variable tensor. +/// - Output index: Scalar with the index of the sampled point. @inlinable @inline(__always) -public static func isVariableInitialized( - ref: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("IsVariableInitialized", - ref, - dtype$dtype: Dtype.tensorFlowDataType) +public static func kMC2ChainInitialization( + distances: Tensor, + seed: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("KMC2ChainInitialization", + distances, + seed) return Tensor(handle: ret) } @@ -9814,6 +9508,38 @@ public static func kernelLabelRequired( return StringTensor(handle: ret) } +/// Selects num_to_sample rows of input using the KMeans++ criterion. +/// +/// Rows of points are assumed to be input points. One row is selected at random. +/// Subsequent rows are sampled with probability proportional to the squared L2 +/// distance from the nearest row selected thus far till num_to_sample rows have +/// been sampled. +/// +/// - Parameters: +/// - points: Matrix of shape (n, d). Rows are assumed to be input points. +/// - num_to_sample: Scalar. The number of rows to sample. This value must not be larger than n. +/// - seed: Scalar. Seed for initializing the random number generator. +/// - num_retries_per_sample: Scalar. For each row that is sampled, this parameter +/// specifies the number of additional points to draw from the current +/// distribution before selecting the best. If a negative value is specified, a +/// heuristic is used to sample O(log(num_to_sample)) additional points. +/// +/// - Output samples: Matrix of shape (num_to_sample, d). The sampled rows. +@inlinable @inline(__always) +public static func kmeansPlusPlusInitialization( + points: Tensor, + numToSample: Tensor, + seed: Tensor, + numRetriesPerSample: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("KmeansPlusPlusInitialization", + points, + numToSample, + seed, + numRetriesPerSample) + return Tensor(handle: ret) +} + /// L2 Loss. /// /// Computes half the L2 norm of a tensor without the `sqrt`: @@ -9833,26 +9559,6 @@ public static func l2Loss( return Tensor(handle: ret) } -/// A Reader that outputs the records from a LMDB file. -/// -/// - Attrs: -/// - container: If non-empty, this reader is placed in the given container. -/// Otherwise, a default container is used. -/// - shared_name: If non-empty, this reader is named in the given bucket -/// with this shared_name. Otherwise, the node name is used instead. -/// -/// - Output reader_handle: The handle to reference the Reader. -@inlinable @inline(__always) -public static func lMDBReader( - container: String, - sharedName: String -) -> StringTensor { - let ret: TensorHandle = #tfop("LMDBReader", - container: container, - shared_name: sharedName) - return StringTensor(handle: ret) -} - /// Local Response Normalization. /// /// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last @@ -10080,6 +9786,41 @@ public static func lSTMBlockCellGrad( return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3), Tensor(handle: ret.4)) } +/// Computes rectified linear: `max(features, features * alpha)`. +@inlinable @inline(__always) +public static func leakyRelu( + features: Tensor, + alpha: Double = 0.2 +) -> Tensor { + let ret: TensorHandle = #tfop("LeakyRelu", + features, + T$dtype: T.tensorFlowDataType, + alpha: alpha) + return Tensor(handle: ret) +} + +/// Computes rectified linear gradients for a LeakyRelu operation. +/// +/// - Parameters: +/// - gradients: The backpropagated gradients to the corresponding LeakyRelu operation. +/// - features: The features passed as input to the corresponding LeakyRelu operation, +/// OR the outputs of that operation (both work equivalently). +/// +/// - Output backprops: `gradients * (features > 0) + alpha * gradients * (featurs <= 0)`. +@inlinable @inline(__always) +public static func leakyReluGrad( + gradients: Tensor, + features: Tensor, + alpha: Double = 0.2 +) -> Tensor { + let ret: TensorHandle = #tfop("LeakyReluGrad", + gradients, + features, + T$dtype: T.tensorFlowDataType, + alpha: alpha) + return Tensor(handle: ret) +} + /// Generates labels for candidate sampling with a learned unigram distribution. /// /// See explanations of candidate sampling and the data formats at @@ -10367,281 +10108,720 @@ public static func loadAndRemapMatrix( return Tensor(handle: ret) } -/// Computes natural logarithm of x element-wise. +/// Load ADAM embedding parameters. /// -/// I.e., \\(y = \log_e x\\). +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. +/// +/// - Parameters: +/// - parameters: Value of parameters used in the ADAM optimization algorithm. +/// - momenta: Value of momenta used in the ADAM optimization algorithm. +/// - velocities: Value of velocities used in the ADAM optimization algorithm. @inlinable @inline(__always) -public static func log( - _ x: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("Log", - x, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) +public static func loadTPUEmbeddingADAMParameters( + parameters: Tensor, + momenta: Tensor, + velocities: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingADAMParameters", + parameters, + momenta, + velocities, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Computes natural logarithm of (1 + x) element-wise. +/// Load ADAM embedding parameters with debug support. /// -/// I.e., \\(y = \log_e (1 + x)\\). +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. +/// +/// - Parameters: +/// - parameters: Value of parameters used in the ADAM optimization algorithm. +/// - momenta: Value of momenta used in the ADAM optimization algorithm. +/// - velocities: Value of velocities used in the ADAM optimization algorithm. +/// - gradient_accumulators: Value of gradient_accumulators used in the ADAM optimization algorithm. @inlinable @inline(__always) -public static func log1p( - _ x: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("Log1p", - x, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) +public static func loadTPUEmbeddingADAMParametersGradAccumDebug( + parameters: Tensor, + momenta: Tensor, + velocities: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingADAMParametersGradAccumDebug", + parameters, + momenta, + velocities, + gradientAccumulators, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Computes the sign and the log of the absolute value of the determinant of -/// -/// one or more square matrices. -/// -/// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions -/// form square matrices. The outputs are two tensors containing the signs and -/// absolute values of the log determinants for all N input submatrices -/// `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant). -/// The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU -/// is the LU decomposition of the input and P is the corresponding -/// permutation matrix. +/// Load Adadelta embedding parameters. /// -/// - Parameter input: Shape is `[N, M, M]`. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// -/// - Outputs: -/// - sign: The signs of the log determinants of the inputs. Shape is `[N]`. -/// - log_abs_determinant: The logs of the absolute values of the determinants -/// of the N input matrices. Shape is `[N]`. +/// - Parameters: +/// - parameters: Value of parameters used in the Adadelta optimization algorithm. +/// - accumulators: Value of accumulators used in the Adadelta optimization algorithm. +/// - updates: Value of updates used in the Adadelta optimization algorithm. @inlinable @inline(__always) -public static func logMatrixDeterminant( - _ input: Tensor -) -> (sign: Tensor, logAbsDeterminant: Tensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("LogMatrixDeterminant", - input, - T$dtype: T.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +public static func loadTPUEmbeddingAdadeltaParameters( + parameters: Tensor, + accumulators: Tensor, + updates: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingAdadeltaParameters", + parameters, + accumulators, + updates, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Computes log softmax activations. +/// Load Adadelta parameters with debug support. /// -/// For each batch `i` and class `j` we have +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// -/// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) +/// - Parameters: +/// - parameters: Value of parameters used in the Adadelta optimization algorithm. +/// - accumulators: Value of accumulators used in the Adadelta optimization algorithm. +/// - updates: Value of updates used in the Adadelta optimization algorithm. +/// - gradient_accumulators: Value of gradient_accumulators used in the Adadelta optimization algorithm. +@inlinable @inline(__always) +public static func loadTPUEmbeddingAdadeltaParametersGradAccumDebug( + parameters: Tensor, + accumulators: Tensor, + updates: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingAdadeltaParametersGradAccumDebug", + parameters, + accumulators, + updates, + gradientAccumulators, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) +} + +/// Load Adagrad embedding parameters. /// -/// - Parameter logits: 2-D with shape `[batch_size, num_classes]`. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// -/// - Output logsoftmax: Same shape as `logits`. +/// - Parameters: +/// - parameters: Value of parameters used in the Adagrad optimization algorithm. +/// - accumulators: Value of accumulators used in the Adagrad optimization algorithm. @inlinable @inline(__always) -public static func logSoftmax( - logits: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("LogSoftmax", - logits, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) +public static func loadTPUEmbeddingAdagradParameters( + parameters: Tensor, + accumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingAdagradParameters", + parameters, + accumulators, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Generates labels for candidate sampling with a log-uniform distribution. +/// Load Adagrad embedding parameters with debug support. /// -/// See explanations of candidate sampling and the data formats at -/// go/candidate-sampling. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// -/// For each batch, this op picks a single set of sampled candidate labels. +/// - Parameters: +/// - parameters: Value of parameters used in the Adagrad optimization algorithm. +/// - accumulators: Value of accumulators used in the Adagrad optimization algorithm. +/// - gradient_accumulators: Value of gradient_accumulators used in the Adagrad optimization algorithm. +@inlinable @inline(__always) +public static func loadTPUEmbeddingAdagradParametersGradAccumDebug( + parameters: Tensor, + accumulators: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingAdagradParametersGradAccumDebug", + parameters, + accumulators, + gradientAccumulators, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) +} + +/// Load centered RMSProp embedding parameters. /// -/// The advantages of sampling candidates per-batch are simplicity and the -/// possibility of efficient dense matrix multiplication. The disadvantage is that -/// the sampled candidates must be chosen independently of the context and of the -/// true labels. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// -/// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the -/// IDs of the num_true target_classes in the corresponding original label. +/// - Parameters: +/// - parameters: Value of parameters used in the centered RMSProp optimization algorithm. +/// - ms: Value of ms used in the centered RMSProp optimization algorithm. +/// - mom: Value of mom used in the centered RMSProp optimization algorithm. +/// - mg: Value of mg used in the centered RMSProp optimization algorithm. +@inlinable @inline(__always) +public static func loadTPUEmbeddingCenteredRMSPropParameters( + parameters: Tensor, + ms: Tensor, + mom: Tensor, + mg: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingCenteredRMSPropParameters", + parameters, + ms, + mom, + mg, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) +} + +/// Load FTRL embedding parameters. /// -/// - Attrs: -/// - num_true: Number of true labels per context. -/// - num_sampled: Number of candidates to randomly sample. -/// - unique: If unique is true, we sample with rejection, so that all sampled -/// candidates in a batch are unique. This requires some approximation to -/// estimate the post-rejection sampling probabilities. -/// - range_max: The sampler will sample integers from the interval [0, range_max). -/// - seed: If either seed or seed2 are set to be non-zero, the random number -/// generator is seeded by the given seed. Otherwise, it is seeded by a -/// random seed. -/// - seed2: An second seed to avoid seed collision. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// -/// - Outputs: -/// - sampled_candidates: A vector of length num_sampled, in which each element is -/// the ID of a sampled candidate. -/// - true_expected_count: A batch_size * num_true matrix, representing -/// the number of times each candidate is expected to occur in a batch -/// of sampled candidates. If unique=true, then this is a probability. -/// - sampled_expected_count: A vector of length num_sampled, for each sampled -/// candidate representing the number of times the candidate is expected -/// to occur in a batch of sampled candidates. If unique=true, then this is a -/// probability. +/// - Parameters: +/// - parameters: Value of parameters used in the FTRL optimization algorithm. +/// - accumulators: Value of accumulators used in the FTRL optimization algorithm. +/// - linears: Value of linears used in the FTRL optimization algorithm. @inlinable @inline(__always) -public static func logUniformCandidateSampler( - trueClasses: Tensor, - numTrue: Int64, - numSampled: Int64, - unique: Bool, - rangeMax: Int64, - seed: Int64 = 0, - seed2: Int64 = 0 -) -> (sampledCandidates: Tensor, trueExpectedCount: Tensor, sampledExpectedCount: Tensor) { - let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("LogUniformCandidateSampler", - trueClasses, - num_true: numTrue, - num_sampled: numSampled, - unique: unique, - range_max: rangeMax, - seed: seed, - seed2: seed2) - return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +public static func loadTPUEmbeddingFTRLParameters( + parameters: Tensor, + accumulators: Tensor, + linears: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingFTRLParameters", + parameters, + accumulators, + linears, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Returns the truth value of x AND y element-wise. +/// Load FTRL embedding parameters with debug support. /// -/// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting -/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. +/// +/// - Parameters: +/// - parameters: Value of parameters used in the FTRL optimization algorithm. +/// - accumulators: Value of accumulators used in the FTRL optimization algorithm. +/// - linears: Value of linears used in the FTRL optimization algorithm. +/// - gradient_accumulators: Value of gradient_accumulators used in the FTRL optimization algorithm. @inlinable @inline(__always) -public static func logicalAnd( - _ x: Tensor, - _ y: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("LogicalAnd", - x, - y) - return Tensor(handle: ret) +public static func loadTPUEmbeddingFTRLParametersGradAccumDebug( + parameters: Tensor, + accumulators: Tensor, + linears: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingFTRLParametersGradAccumDebug", + parameters, + accumulators, + linears, + gradientAccumulators, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Returns the truth value of NOT x element-wise. +/// Load MDL Adagrad Light embedding parameters. +/// +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. +/// +/// - Parameters: +/// - parameters: Value of parameters used in the MDL Adagrad Light optimization algorithm. +/// - accumulators: Value of accumulators used in the MDL Adagrad Light optimization algorithm. +/// - weights: Value of weights used in the MDL Adagrad Light optimization algorithm. +/// - benefits: Value of benefits used in the MDL Adagrad Light optimization algorithm. @inlinable @inline(__always) -public static func logicalNot( - _ x: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("LogicalNot", - x) - return Tensor(handle: ret) +public static func loadTPUEmbeddingMDLAdagradLightParameters( + parameters: Tensor, + accumulators: Tensor, + weights: Tensor, + benefits: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingMDLAdagradLightParameters", + parameters, + accumulators, + weights, + benefits, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Returns the truth value of x OR y element-wise. +/// Load Momentum embedding parameters. /// -/// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting -/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. +/// +/// - Parameters: +/// - parameters: Value of parameters used in the Momentum optimization algorithm. +/// - momenta: Value of momenta used in the Momentum optimization algorithm. @inlinable @inline(__always) -public static func logicalOr( - _ x: Tensor, - _ y: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("LogicalOr", - x, - y) - return Tensor(handle: ret) +public static func loadTPUEmbeddingMomentumParameters( + parameters: Tensor, + momenta: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingMomentumParameters", + parameters, + momenta, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Outputs all keys and values in the table. +/// Load Momentum embedding parameters with debug support. /// -/// - Parameter table_handle: Handle to the table. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// -/// - Outputs: -/// - keys: Vector of all keys present in the table. -/// - values: Tensor of all values in the table. Indexed in parallel with `keys`. -@inlinable @inline(__always) -public static func lookupTableExport( - tableHandle: StringTensor -) -> (keys: Tensor, values: Tensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("LookupTableExport", - tableHandle, - Tkeys$dtype: Tkeys.tensorFlowDataType, - Tvalues$dtype: Tvalues.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +/// - Parameters: +/// - parameters: Value of parameters used in the Momentum optimization algorithm. +/// - momenta: Value of momenta used in the Momentum optimization algorithm. +/// - gradient_accumulators: Value of gradient_accumulators used in the Momentum optimization algorithm. +@inlinable @inline(__always) +public static func loadTPUEmbeddingMomentumParametersGradAccumDebug( + parameters: Tensor, + momenta: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingMomentumParametersGradAccumDebug", + parameters, + momenta, + gradientAccumulators, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Looks up keys in a table, outputs the corresponding values. +/// Load proximal Adagrad embedding parameters. /// -/// The tensor `keys` must of the same type as the keys of the table. -/// The output `values` is of the type of the table values. -/// -/// The scalar `default_value` is the value output for keys not present in the -/// table. It must also be of the same type as the table values. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// /// - Parameters: -/// - table_handle: Handle to the table. -/// - keys: Any shape. Keys to look up. +/// - parameters: Value of parameters used in the proximal Adagrad optimization algorithm. +/// - accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm. +@inlinable @inline(__always) +public static func loadTPUEmbeddingProximalAdagradParameters( + parameters: Tensor, + accumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingProximalAdagradParameters", + parameters, + accumulators, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) +} + +/// Load proximal Adagrad embedding parameters with debug support. +/// +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// -/// - Output values: Same shape as `keys`. Values found in the table, or `default_values` -/// for missing keys. +/// - Parameters: +/// - parameters: Value of parameters used in the proximal Adagrad optimization algorithm. +/// - accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm. +/// - gradient_accumulators: Value of gradient_accumulators used in the proximal Adagrad optimization algorithm. @inlinable @inline(__always) -public static func lookupTableFind( - tableHandle: StringTensor, - keys: Tensor, - defaultValue: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("LookupTableFind", - tableHandle, - keys, - defaultValue, - Tin$dtype: Tin.tensorFlowDataType, - Tout$dtype: Tout.tensorFlowDataType) - return Tensor(handle: ret) +public static func loadTPUEmbeddingProximalAdagradParametersGradAccumDebug( + parameters: Tensor, + accumulators: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug", + parameters, + accumulators, + gradientAccumulators, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Replaces the contents of the table with the specified keys and values. +/// Load RMSProp embedding parameters. /// -/// The tensor `keys` must be of the same type as the keys of the table. -/// The tensor `values` must be of the type of the table values. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// /// - Parameters: -/// - table_handle: Handle to the table. -/// - keys: Any shape. Keys to look up. -/// - values: Values to associate with keys. +/// - parameters: Value of parameters used in the RMSProp optimization algorithm. +/// - ms: Value of ms used in the RMSProp optimization algorithm. +/// - mom: Value of mom used in the RMSProp optimization algorithm. @inlinable @inline(__always) -public static func lookupTableImport( - tableHandle: StringTensor, - keys: Tensor, - _ values: Tensor +public static func loadTPUEmbeddingRMSPropParameters( + parameters: Tensor, + ms: Tensor, + mom: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 ) { - return #tfop("LookupTableImport", - tableHandle, - keys, - values, - Tin$dtype: Tin.tensorFlowDataType, - Tout$dtype: Tout.tensorFlowDataType) + return #tfop("LoadTPUEmbeddingRMSPropParameters", + parameters, + ms, + mom, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Updates the table to associates keys with values. +/// Load RMSProp embedding parameters with debug support. /// -/// The tensor `keys` must be of the same type as the keys of the table. -/// The tensor `values` must be of the type of the table values. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// /// - Parameters: -/// - table_handle: Handle to the table. -/// - keys: Any shape. Keys to look up. -/// - values: Values to associate with keys. +/// - parameters: Value of parameters used in the RMSProp optimization algorithm. +/// - ms: Value of ms used in the RMSProp optimization algorithm. +/// - mom: Value of mom used in the RMSProp optimization algorithm. +/// - gradient_accumulators: Value of gradient_accumulators used in the RMSProp optimization algorithm. @inlinable @inline(__always) -public static func lookupTableInsert( - tableHandle: StringTensor, - keys: Tensor, - _ values: Tensor +public static func loadTPUEmbeddingRMSPropParametersGradAccumDebug( + parameters: Tensor, + ms: Tensor, + mom: Tensor, + gradientAccumulators: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 ) { - return #tfop("LookupTableInsert", - tableHandle, - keys, - values, - Tin$dtype: Tin.tensorFlowDataType, - Tout$dtype: Tout.tensorFlowDataType) + return #tfop("LoadTPUEmbeddingRMSPropParametersGradAccumDebug", + parameters, + ms, + mom, + gradientAccumulators, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) } -/// Computes the number of elements in the given table. +/// Load SGD embedding parameters. /// -/// - Parameter table_handle: Handle to the table. +/// An op that loads optimization parameters into HBM for embedding. Must be +/// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct +/// embedding table configuration. For example, this op is used to install +/// parameters that are loaded from a checkpoint before a training loop is +/// executed. /// -/// - Output size: Scalar that contains number of elements in the table. +/// - Parameter parameters: Value of parameters used in the stochastic gradient descent optimization algorithm. @inlinable @inline(__always) -public static func lookupTableSize( - tableHandle: StringTensor -) -> Tensor { - let ret: TensorHandle = #tfop("LookupTableSize", - tableHandle) +public static func loadTPUEmbeddingStochasticGradientDescentParameters( + parameters: Tensor, + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) { + return #tfop("LoadTPUEmbeddingStochasticGradientDescentParameters", + parameters, + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) +} + +/// Computes natural logarithm of x element-wise. +/// +/// I.e., \\(y = \log_e x\\). +@inlinable @inline(__always) +public static func log( + _ x: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("Log", + x, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) +} + +/// Computes natural logarithm of (1 + x) element-wise. +/// +/// I.e., \\(y = \log_e (1 + x)\\). +@inlinable @inline(__always) +public static func log1p( + _ x: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("Log1p", + x, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) +} + +/// Computes the sign and the log of the absolute value of the determinant of +/// +/// one or more square matrices. +/// +/// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions +/// form square matrices. The outputs are two tensors containing the signs and +/// absolute values of the log determinants for all N input submatrices +/// `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant). +/// The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU +/// is the LU decomposition of the input and P is the corresponding +/// permutation matrix. +/// +/// - Parameter input: Shape is `[N, M, M]`. +/// +/// - Outputs: +/// - sign: The signs of the log determinants of the inputs. Shape is `[N]`. +/// - log_abs_determinant: The logs of the absolute values of the determinants +/// of the N input matrices. Shape is `[N]`. +@inlinable @inline(__always) +public static func logMatrixDeterminant( + _ input: Tensor +) -> (sign: Tensor, logAbsDeterminant: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("LogMatrixDeterminant", + input, + T$dtype: T.tensorFlowDataType) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +} + +/// Computes log softmax activations. +/// +/// For each batch `i` and class `j` we have +/// +/// logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) +/// +/// - Parameter logits: 2-D with shape `[batch_size, num_classes]`. +/// +/// - Output logsoftmax: Same shape as `logits`. +@inlinable @inline(__always) +public static func logSoftmax( + logits: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("LogSoftmax", + logits, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) +} + +/// Generates labels for candidate sampling with a log-uniform distribution. +/// +/// See explanations of candidate sampling and the data formats at +/// go/candidate-sampling. +/// +/// For each batch, this op picks a single set of sampled candidate labels. +/// +/// The advantages of sampling candidates per-batch are simplicity and the +/// possibility of efficient dense matrix multiplication. The disadvantage is that +/// the sampled candidates must be chosen independently of the context and of the +/// true labels. +/// +/// - Parameter true_classes: A batch_size * num_true matrix, in which each row contains the +/// IDs of the num_true target_classes in the corresponding original label. +/// +/// - Attrs: +/// - num_true: Number of true labels per context. +/// - num_sampled: Number of candidates to randomly sample. +/// - unique: If unique is true, we sample with rejection, so that all sampled +/// candidates in a batch are unique. This requires some approximation to +/// estimate the post-rejection sampling probabilities. +/// - range_max: The sampler will sample integers from the interval [0, range_max). +/// - seed: If either seed or seed2 are set to be non-zero, the random number +/// generator is seeded by the given seed. Otherwise, it is seeded by a +/// random seed. +/// - seed2: An second seed to avoid seed collision. +/// +/// - Outputs: +/// - sampled_candidates: A vector of length num_sampled, in which each element is +/// the ID of a sampled candidate. +/// - true_expected_count: A batch_size * num_true matrix, representing +/// the number of times each candidate is expected to occur in a batch +/// of sampled candidates. If unique=true, then this is a probability. +/// - sampled_expected_count: A vector of length num_sampled, for each sampled +/// candidate representing the number of times the candidate is expected +/// to occur in a batch of sampled candidates. If unique=true, then this is a +/// probability. +@inlinable @inline(__always) +public static func logUniformCandidateSampler( + trueClasses: Tensor, + numTrue: Int64, + numSampled: Int64, + unique: Bool, + rangeMax: Int64, + seed: Int64 = 0, + seed2: Int64 = 0 +) -> (sampledCandidates: Tensor, trueExpectedCount: Tensor, sampledExpectedCount: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("LogUniformCandidateSampler", + trueClasses, + num_true: numTrue, + num_sampled: numSampled, + unique: unique, + range_max: rangeMax, + seed: seed, + seed2: seed2) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Returns the truth value of x AND y element-wise. +/// +/// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting +/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) +@inlinable @inline(__always) +public static func logicalAnd( + _ x: Tensor, + _ y: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("LogicalAnd", + x, + y) + return Tensor(handle: ret) +} + +/// Returns the truth value of NOT x element-wise. +@inlinable @inline(__always) +public static func logicalNot( + _ x: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("LogicalNot", + x) + return Tensor(handle: ret) +} + +/// Returns the truth value of x OR y element-wise. +/// +/// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting +/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) +@inlinable @inline(__always) +public static func logicalOr( + _ x: Tensor, + _ y: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("LogicalOr", + x, + y) return Tensor(handle: ret) } @@ -10703,12 +10883,58 @@ public static func lowerBound( - capacity: Int64 = 0, - memoryLimit: Int64 = 0, - container: String, +/// Computes the LU decomposition of one or more square matrices. +/// +/// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions +/// form square matrices. +/// +/// The input has to be invertible. +/// +/// The output consists of two tensors LU and P containing the LU decomposition +/// of all input submatrices `[..., :, :]`. LU encodes the lower triangular and +/// upper triangular factors. +/// +/// For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of +/// shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower +/// triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose +/// entries correspond to the upper triangular part, including the diagonal, of LU. +/// +/// P represents a permutation matrix encoded as a list of indices each between `0` +/// and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to +/// P, then the L, U and P satisfies P_mat * input = L * U. +/// +/// - Parameter input: A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of +/// size `[M, M]`. +/// +/// - Outputs: +/// - lu: A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the +/// lower triangular factor `L` with unit diagonal, and whose upper triangular part +/// denotes the upper triangular factor `U`. +/// - p: Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is +/// `[..., M]`. +/// @compatibility(scipy) +/// Similar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are +/// packed into a single tensor, the permutation is applied to `input` instead of +/// the right hand side and the permutation `P` is returned as a list of indices +/// instead of a permutation matrix. +/// @end_compatibility +@inlinable @inline(__always) +public static func lu( + _ input: Tensor +) -> (lu: Tensor, p: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("Lu", + input, + T$dtype: T.tensorFlowDataType, + output_idx_type$dtype: OutputIdxType.tensorFlowDataType) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +} + +/// Op removes all elements in the underlying container. +@inlinable @inline(__always) +public static func mapClear( + capacity: Int64 = 0, + memoryLimit: Int64 = 0, + container: String, sharedName: String, typeDtypes: Dtypes.Type ) { @@ -11210,6 +11436,41 @@ public static func matrixSolveLs( return Tensor(handle: ret) } +/// Computes the matrix square root of one or more square matrices: +/// +/// matmul(sqrtm(A), sqrtm(A)) = A +/// +/// The input matrix should be invertible. If the input matrix is real, it should +/// have no eigenvalues which are real and negative (pairs of complex conjugate +/// eigenvalues are allowed). +/// +/// The matrix square root is computed by first reducing the matrix to +/// quasi-triangular form with the real Schur decomposition. The square root +/// of the quasi-triangular matrix is then computed directly. Details of +/// the algorithm can be found in: Nicholas J. Higham, "Computing real +/// square roots of a real matrix", Linear Algebra Appl., 1987. +/// +/// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions +/// form square matrices. The output is a tensor of the same shape as the input +/// containing the matrix square root for all input submatrices `[..., :, :]`. +/// +/// - Parameter input: Shape is `[..., M, M]`. +/// +/// - Output output: Shape is `[..., M, M]`. +/// +/// @compatibility(scipy) +/// Equivalent to scipy.linalg.sqrtm +/// @end_compatibility +@inlinable @inline(__always) +public static func matrixSquareRoot( + _ input: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("MatrixSquareRoot", + input, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) +} + /// Solves systems of linear equations with upper or lower triangular matrices by /// /// backsubstitution. @@ -11311,7 +11572,7 @@ public static func maxPool( ksize: [Int32], strides: [Int32], padding: Padding, - dataFormat: DataFormat3 = .nhwc + dataFormat: DataFormat4 = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("MaxPool", input, @@ -11346,7 +11607,7 @@ public static func maxPool3D( ksize: [Int32], strides: [Int32], padding: Padding, - dataFormat: DataFormat1 = .ndhwc + dataFormat: DataFormat1 = .b'ndhwc' ) -> Tensor { let ret: TensorHandle = #tfop("MaxPool3D", input, @@ -11384,7 +11645,7 @@ public static func maxPool3DGrad Tensor { let ret: TensorHandle = #tfop("MaxPool3DGrad", origInput, @@ -11427,7 +11688,7 @@ public static func maxPool3DGradGrad( ksize: [Int32], strides: [Int32], padding: Padding, - dataFormat: DataFormat1 = .ndhwc + dataFormat: DataFormat1 = .b'ndhwc' ) -> Tensor { let ret: TensorHandle = #tfop("MaxPool3DGradGrad", origInput, @@ -11468,7 +11729,7 @@ public static func maxPoolGrad( ksize: [Int32], strides: [Int32], padding: Padding, - dataFormat: DataFormat = .nhwc + dataFormat: DataFormat = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("MaxPoolGrad", origInput, @@ -11509,7 +11770,7 @@ public static func maxPoolGradGrad( ksize: [Int32], strides: [Int32], padding: Padding, - dataFormat: DataFormat = .nhwc + dataFormat: DataFormat = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("MaxPoolGradGrad", origInput, @@ -11550,7 +11811,7 @@ public static func maxPoolGradGradV2( ksize: Tensor, strides: Tensor, padding: Padding, - dataFormat: DataFormat = .nhwc + dataFormat: DataFormat = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("MaxPoolGradGradV2", origInput, @@ -11577,6 +11838,7 @@ public static func maxPoolGradGradV2( /// - strides: The stride of the sliding window for each dimension of the /// input tensor. /// - padding: The type of padding algorithm to use. +/// - include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`. /// /// - Output output: Gradients of gradients w.r.t. the input of `max_pool`. @inlinable @inline(__always) @@ -11586,7 +11848,8 @@ public static func maxPoolGradGradWithArgmax, ksize: [Int32], strides: [Int32], - padding: Padding + padding: Padding, + includeBatchInIndex: Bool = false ) -> Tensor { let ret: TensorHandle = #tfop("MaxPoolGradGradWithArgmax", input, @@ -11596,7 +11859,8 @@ public static func maxPoolGradGradWithArgmax( ksize: Tensor, strides: Tensor, padding: Padding, - dataFormat: DataFormat = .nhwc + dataFormat: DataFormat = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("MaxPoolGradV2", origInput, @@ -11654,6 +11918,7 @@ public static func maxPoolGradV2( /// - strides: The stride of the sliding window for each dimension of the /// input tensor. /// - padding: The type of padding algorithm to use. +/// - include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`. /// /// - Output output: Gradients w.r.t. the input of `max_pool`. @inlinable @inline(__always) @@ -11663,7 +11928,8 @@ public static func maxPoolGradWithArgmax, ksize: [Int32], strides: [Int32], - padding: Padding + padding: Padding, + includeBatchInIndex: Bool = false ) -> Tensor { let ret: TensorHandle = #tfop("MaxPoolGradWithArgmax", input, @@ -11673,7 +11939,8 @@ public static func maxPoolGradWithArgmax( ksize: Tensor, strides: Tensor, padding: Padding, - dataFormat: DataFormat3 = .nhwc + dataFormat: DataFormat4 = .b'nhwc' ) -> Tensor { let ret: TensorHandle = #tfop("MaxPoolV2", input, @@ -11715,8 +11982,9 @@ public static func maxPoolV2( /// Performs max pooling on the input and outputs both max values and indices. /// /// The indices in `argmax` are flattened, so that a maximum value at position -/// `[b, y, x, c]` becomes flattened index -/// `((b * height + y) * width + x) * channels + c`. +/// `[b, y, x, c]` becomes flattened index: +/// `(y * width + x) * channels + c` if `include_batch_in_index` is False; +/// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. /// /// The indices returned are always in `[0, height) x [0, width)` before flattening, /// even if padding is involved and the mathematically correct answer is outside @@ -11730,6 +11998,7 @@ public static func maxPoolV2( /// - strides: The stride of the sliding window for each dimension of the /// input tensor. /// - padding: The type of padding algorithm to use. +/// - include_batch_in_index: Whether to include batch dimension in flattened index of `argmax`. /// /// - Outputs: /// - output: The max pooled output tensor. @@ -11739,7 +12008,8 @@ public static func maxPoolWithArgmax, ksize: [Int32], strides: [Int32], - padding: Padding + padding: Padding, + includeBatchInIndex: Bool = false ) -> (output: Tensor, argmax: Tensor) { let ret: (TensorHandle, TensorHandle) = #tfop("MaxPoolWithArgmax", input, @@ -11747,7 +12017,8 @@ public static func maxPoolWithArgmax( public static func mirrorPad( _ input: Tensor, paddings: Tensor, - mode: Mode4 + mode: Mode5 ) -> Tensor { let ret: TensorHandle = #tfop("MirrorPad", input, @@ -12046,7 +12317,7 @@ public static func mirrorPad( _ input: Tensor, paddings: Tensor, - mode: Mode4 + mode: Mode5 ) -> Tensor { let ret: TensorHandle = #tfop("MirrorPadGrad", input, @@ -12092,6 +12363,22 @@ public static func mul( return Tensor(handle: ret) } +/// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. +/// +/// *NOTE*: `Mul` supports broadcasting. More about broadcasting +/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) +@inlinable @inline(__always) +public static func mulNoNan( + _ x: Tensor, + _ y: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("MulNoNan", + x, + y, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) +} + /// Draws samples from a multinomial distribution. /// /// - Parameters: @@ -12123,40 +12410,6 @@ public static func multinomial( - container: String, - sharedName: String, - useNodeNameSharing: Bool = false, - typeKeyDtype: KeyDtype.Type, - typeValueDtype: ValueDtype.Type -) -> StringTensor { - let ret: TensorHandle = #tfop("MutableHashTable", - key_dtype$dtype: KeyDtype.tensorFlowDataType, - value_dtype$dtype: ValueDtype.tensorFlowDataType, - container: container, - shared_name: sharedName, - use_node_name_sharing: useNodeNameSharing) - return StringTensor(handle: ret) -} - @inlinable @inline(__always) public static func nInPolymorphicTwice( _ a: [Tensor], @@ -12216,6 +12469,88 @@ public static func nPolymorphicRestrictIn( T$dtype: T.tensorFlowDataType) } +/// Outputs a tensor containing the reduction across all input tensors. +/// +/// Outputs a tensor containing the reduction across all input tensors passed to ops +/// within the same `shared_name. +/// +/// The graph should be constructed so if one op runs with shared_name value `c`, +/// then `num_devices` ops will run with shared_name value `c`. Failure to do so +/// will cause the graph execution to fail to complete. +/// +/// input: the input to the reduction +/// data: the value of the reduction across all `num_devices` devices. +/// reduction: the reduction operation to perform. +/// num_devices: The number of devices participating in this reduction. +/// shared_name: Identifier that shared between ops of the same reduction. +@inlinable @inline(__always) +public static func ncclAllReduce( + _ input: Tensor, + reduction: Reduction, + numDevices: Int64, + sharedName: String +) -> Tensor { + let ret: TensorHandle = #tfop("NcclAllReduce", + input, + T$dtype: T.tensorFlowDataType, + reduction: reduction.cName, + num_devices: numDevices, + shared_name: sharedName) + return Tensor(handle: ret) +} + +/// Reduces `input` from `num_devices` using `reduction` to a single device. +/// +/// Reduces `input` from `num_devices` using `reduction` to a single device. +/// +/// The graph should be constructed so that all inputs have a valid device +/// assignment, and the op itself is assigned one of these devices. +/// +/// input: The input to the reduction. +/// data: the value of the reduction across all `num_devices` devices. +/// reduction: the reduction operation to perform. +@inlinable @inline(__always) +public static func ncclReduce( + _ input: [Tensor], + reduction: Reduction +) -> Tensor { + let ret: TensorHandle = #tfop("NcclReduce", + input, + T$dtype: T.tensorFlowDataType, + reduction: reduction.cName) + return Tensor(handle: ret) +} + +/// Selects the k nearest centers for each point. +/// +/// Rows of points are assumed to be input points. Rows of centers are assumed to be +/// the list of candidate centers. For each point, the k centers that have least L2 +/// distance to it are computed. +/// +/// - Parameters: +/// - points: Matrix of shape (n, d). Rows are assumed to be input points. +/// - centers: Matrix of shape (m, d). Rows are assumed to be centers. +/// - k: Number of nearest centers to return for each point. If k is larger than m, then +/// only m centers are returned. +/// +/// - Outputs: +/// - nearest_center_indices: Matrix of shape (n, min(m, k)). Each row contains the indices of the centers +/// closest to the corresponding point, ordered by increasing distance. +/// - nearest_center_distances: Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the +/// corresponding center in nearest_center_indices. +@inlinable @inline(__always) +public static func nearestNeighbors( + points: Tensor, + centers: Tensor, + k: Tensor +) -> (nearestCenterIndices: Tensor, nearestCenterDistances: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("NearestNeighbors", + points, + centers, + k) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +} + /// Computes numerical negative value element-wise. /// /// I.e., \\(y = -x\\). @@ -12229,35 +12564,25 @@ public static func neg( return Tensor(handle: ret) } -/// Training via negative sampling. +/// Returns the next representable value of `x1` in the direction of `x2`, element-wise. /// -/// - Parameters: -/// - w_in: input word embedding. -/// - w_out: output word embedding. -/// - examples: A vector of word ids. -/// - labels: A vector of word ids. +/// This operation returns the same result as the C++ std::nextafter function. /// -/// - Attrs: -/// - vocab_count: Count of words in the vocabulary. -/// - num_negative_samples: Number of negative samples per example. -@inlinable @inline(__always) -public static func negTrain( - wIn: Tensor, - wOut: Tensor, - examples: Tensor, - labels: Tensor, - lr: Tensor, - vocabCount: [Int32], - numNegativeSamples: Int64 -) { - return #tfop("NegTrain", - wIn, - wOut, - examples, - labels, - lr, - vocab_count: vocabCount, - num_negative_samples: numNegativeSamples) +/// It can also return a subnormal number. +/// +/// @compatibility(cpp) +/// Equivalent to C++ std::nextafter function. +/// @end_compatibility +@inlinable @inline(__always) +public static func nextAfter( + x1: Tensor, + x2: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("NextAfter", + x1, + x2, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) } /// Makes its input available to the next iteration. @@ -12282,6 +12607,26 @@ public static func noOp( return #tfop("NoOp") } +/// Non-deterministically generates some integers. +/// +/// This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results. +/// +/// - Parameter shape: The shape of the output tensor. +/// +/// - Attr dtype: The type of the output. +/// +/// - Output output: Non-deterministic integer values with specified shape. +@inlinable @inline(__always) +public static func nonDeterministicInts( + shape: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("NonDeterministicInts", + shape, + dtype$dtype: Dtype.tensorFlowDataType, + shape_dtype$dtype: ShapeDtype.tensorFlowDataType) + return Tensor(handle: ret) +} + /// Greedily selects a subset of bounding boxes in descending order of score, /// /// pruning away boxes that have high intersection-over-union (IOU) overlap @@ -12837,6 +13182,30 @@ public static func outT( return Tensor(handle: ret) } +/// Enqueue a Tensor on the computation outfeed. +/// +/// - Parameter input: A tensor that will be inserted into the outfeed queue. +@inlinable @inline(__always) +public static func outfeedEnqueue( + _ input: Tensor +) { + return #tfop("OutfeedEnqueue", + input, + dtype$dtype: Dtype.tensorFlowDataType) +} + +/// Enqueue multiple Tensor values on the computation outfeed. +/// +/// - Parameter inputs: A list of tensors that will be inserted into the outfeed queue as an +/// XLA tuple. +@inlinable @inline(__always) +public static func outfeedEnqueueTuple( + inputs: [Tensor] +) { + return #tfop("OutfeedEnqueueTuple", + inputs) +} + /// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. /// /// Packs the `N` tensors in `values` into a tensor with rank one higher than each @@ -13099,9 +13468,10 @@ public static func parseTensor( /// The polygamma function is defined as: /// /// -/// \\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\) +/// \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\) /// /// where \\(\psi(x)\\) is the digamma function. +/// The polygamma function is defined only for non-negative integer orders \\a\\. @inlinable @inline(__always) public static func polygamma( _ a: Tensor, @@ -13250,11 +13620,11 @@ public static func print( @inlinable @inline(__always) public static func printV2( _ input: StringTensor, - outputStream: OutputStream = .stderr + outputStream: String = "b'stderr'" ) { return #tfop("PrintV2", input, - output_stream: outputStream.cName) + output_stream: outputStream) } /// Computes the product of elements across dimensions of a tensor. @@ -13345,6 +13715,60 @@ public static func quantizeAndDequantize( return Tensor(handle: ret) } +/// Quantizes then dequantizes a tensor. +/// +/// This op simulates the precision loss from the quantized forward pass by: +/// +/// 1. Quantizing the tensor to fixed point numbers, which should match the target +/// quantization method when it is used in inference. +/// 2. Dequantizing it back to floating point numbers for the following ops, most +/// likely matmul. +/// +/// There are different ways to quantize. This version uses only scaling, so 0.0 +/// maps to 0. +/// +/// From the specified 'num_bits' in the quantized output type, it determines +/// minimum and maximum representable quantized values. +/// +/// e.g. +/// +/// * [-128, 127] for signed, num_bits = 8, or +/// * [0, 255] for unsigned, num_bits = 8. +/// +/// If range_given == False, the initial input_min, input_max will be determined +/// automatically as the minimum and maximum values in the input tensor, otherwise +/// the specified values of input_min, input_max are used. +/// +/// Note: If the input_min, input_max are specified, they do not need to equal the +/// actual minimum and maximum values in the tensor. e.g. in some cases it may be +/// beneficial to specify these values such that the low probability extremes of the +/// input distribution are clipped. +/// +/// This op determines the maximum scale_factor that would map the initial +/// [input_min, input_max] range to a range that lies within the representable +/// quantized range. +/// +/// It determines the scale from one of input_min and input_max, then updates the +/// other one to maximize the respresentable range. +/// +/// e.g. +/// +/// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, +/// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it +/// would update input_max to be 127 / 12.8 = 9.921875 +/// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, +/// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it +/// would update input_min to be 128.0 / 12.7 = -10.07874 +/// * if the output is unsigned, input_min is forced to be 0, and only the +/// specified input_max is used. +/// +/// After determining the scale_factor and updating the input range, it applies the +/// following to each value in the 'input' tensor. +/// +/// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. +/// +/// The above round function rounds the value based on the given round_mode. +/// /// /// - Parameters: /// - input: Tensor to quantize and then dequantize. @@ -13360,6 +13784,14 @@ public static func quantizeAndDequantize( /// have been called `signed_output`) /// - num_bits: The bitwidth of the quantization. /// - range_given: Whether the range is given or should be determined from the `input` tensor. +/// - round_mode: The 'round_mode' attribute controls which rounding tie-breaking algorithm is +/// used when rounding float values to their quantized equivalents. The following +/// rounding modes are currently supported: +/// +/// * HALF_TO_EVEN: this is the default round_mode. +/// * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 +/// rounds up to -7. +/// @inlinable @inline(__always) public static func quantizeAndDequantizeV2( _ input: Tensor, @@ -13367,7 +13799,8 @@ public static func quantizeAndDequantizeV2( inputMax: Tensor, signedInput: Bool = true, numBits: Int64 = 8, - rangeGiven: Bool = false + rangeGiven: Bool = false, + roundMode: RoundMode = .b'halfToEven' ) -> Tensor { let ret: TensorHandle = #tfop("QuantizeAndDequantizeV2", input, @@ -13376,7 +13809,8 @@ public static func quantizeAndDequantizeV2( T$dtype: T.tensorFlowDataType, signed_input: signedInput, num_bits: numBits, - range_given: rangeGiven) + range_given: rangeGiven, + round_mode: roundMode.cName) return Tensor(handle: ret) } @@ -13569,8 +14003,8 @@ public static func quantizeV2( _ input: Tensor, minRange: Tensor, maxRange: Tensor, - mode: Mode = .minCombined, - roundMode: RoundMode = .halfAwayFromZero + mode: Mode = .b'minCombined', + roundMode: RoundMode6 = .b'halfAwayFromZero' ) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizeV2", input, @@ -13689,168 +14123,575 @@ public static func quantizedAvgPool( /// - scale_after_normalization: A bool indicating whether the resulted tensor /// needs to be multiplied with gamma. @inlinable @inline(__always) -public static func quantizedBatchNormWithGlobalNormalization( - t: Tensor, - tMin: Tensor, - tMax: Tensor, - m: Tensor, - mMin: Tensor, - mMax: Tensor, - v: Tensor, - vMin: Tensor, - vMax: Tensor, - beta: Tensor, - betaMin: Tensor, - betaMax: Tensor, - gamma: Tensor, - gammaMin: Tensor, - gammaMax: Tensor, - varianceEpsilon: Double, - scaleAfterNormalization: Bool -) -> (result: Tensor, resultMin: Tensor, resultMax: Tensor) { - let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedBatchNormWithGlobalNormalization", - t, - tMin, - tMax, - m, - mMin, - mMax, - v, - vMin, - vMax, - beta, - betaMin, - betaMax, - gamma, - gammaMin, - gammaMax, +public static func quantizedBatchNormWithGlobalNormalization( + t: Tensor, + tMin: Tensor, + tMax: Tensor, + m: Tensor, + mMin: Tensor, + mMax: Tensor, + v: Tensor, + vMin: Tensor, + vMax: Tensor, + beta: Tensor, + betaMin: Tensor, + betaMax: Tensor, + gamma: Tensor, + gammaMin: Tensor, + gammaMax: Tensor, + varianceEpsilon: Double, + scaleAfterNormalization: Bool +) -> (result: Tensor, resultMin: Tensor, resultMax: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedBatchNormWithGlobalNormalization", + t, + tMin, + tMax, + m, + mMin, + mMax, + v, + vMin, + vMax, + beta, + betaMin, + betaMax, + gamma, + gammaMin, + gammaMax, + Tinput$dtype: Tinput.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + variance_epsilon: varianceEpsilon, + scale_after_normalization: scaleAfterNormalization) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Adds Tensor 'bias' to Tensor 'input' for Quantized types. +/// +/// Broadcasts the values of bias on dimensions 0..N-2 of 'input'. +/// +/// - Parameters: +/// - bias: A 1D bias Tensor with size matching the last dimension of 'input'. +/// - min_input: The float value that the lowest quantized input value represents. +/// - max_input: The float value that the highest quantized input value represents. +/// - min_bias: The float value that the lowest quantized bias value represents. +/// - max_bias: The float value that the highest quantized bias value represents. +/// +/// - Outputs: +/// - min_out: The float value that the lowest quantized output value represents. +/// - max_out: The float value that the highest quantized output value represents. +@inlinable @inline(__always) +public static func quantizedBiasAdd( + _ input: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minBias: Tensor, + maxBias: Tensor +) -> (output: Tensor, minOut: Tensor, maxOut: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedBiasAdd", + input, + bias, + minInput, + maxInput, + minBias, + maxBias, + T1$dtype: T1.tensorFlowDataType, + T2$dtype: T2.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Concatenates quantized tensors along one dimension. +/// +/// - Parameters: +/// - concat_dim: 0-D. The dimension along which to concatenate. Must be in the +/// range [0, rank(values)). +/// - values: The `N` Tensors to concatenate. Their ranks and types must match, +/// and their sizes must match in all dimensions except `concat_dim`. +/// - input_mins: The minimum scalar values for each of the input tensors. +/// - input_maxes: The maximum scalar values for each of the input tensors. +/// +/// - Outputs: +/// - output: A `Tensor` with the concatenation of values stacked along the +/// `concat_dim` dimension. This tensor's shape matches that of `values` except +/// in `concat_dim` where it has the sum of the sizes. +/// - output_min: The float value that the minimum quantized output value represents. +/// - output_max: The float value that the maximum quantized output value represents. +@inlinable @inline(__always) +public static func quantizedConcat( + concatDim: Tensor, + _ values: [Tensor], + inputMins: [Tensor], + inputMaxes: [Tensor] +) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConcat", + concatDim, + values, + inputMins, + inputMaxes, + T$dtype: T.tensorFlowDataType) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Computes a 2D convolution given quantized 4D input and filter tensors. +/// +/// The inputs are quantized tensors where the lowest value represents the real +/// number of the associated minimum, and the highest represents the maximum. +/// This means that you can only interpret the quantized output in the same way, by +/// taking the returned minimum and maximum values into account. +/// +/// - Parameters: +/// - filter: filter's input_depth dimension must match input's depth dimensions. +/// - min_input: The float value that the lowest quantized input value represents. +/// - max_input: The float value that the highest quantized input value represents. +/// - min_filter: The float value that the lowest quantized filter value represents. +/// - max_filter: The float value that the highest quantized filter value represents. +/// +/// - Attrs: +/// - strides: The stride of the sliding window for each dimension of the input +/// tensor. +/// - padding: The type of padding algorithm to use. +/// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of +/// `input`. If set to k > 1, there will be k-1 skipped cells between each +/// filter element on that dimension. The dimension order is determined by the +/// value of `data_format`, see above for details. Dilations in the batch and +/// depth dimensions must be 1. +/// +/// - Outputs: +/// - min_output: The float value that the lowest quantized output value represents. +/// - max_output: The float value that the highest quantized output value represents. +@inlinable @inline(__always) +public static func quantizedConv2D( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2D", + input, + filter, + minInput, + maxInput, + minFilter, + maxFilter, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +@inlinable @inline(__always) +public static func quantizedConv2DAndRelu( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DAndRelu", + input, + filter, + minInput, + maxInput, + minFilter, + maxFilter, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations, + padding_list: paddingList) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +@inlinable @inline(__always) +public static func quantizedConv2DAndReluAndRequantize( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DAndReluAndRequantize", + input, + filter, + minInput, + maxInput, + minFilter, + maxFilter, + minFreezedOutput, + maxFreezedOutput, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations, + padding_list: paddingList) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +@inlinable @inline(__always) +public static func quantizedConv2DAndRequantize( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DAndRequantize", + input, + filter, + minInput, + maxInput, + minFilter, + maxFilter, + minFreezedOutput, + maxFreezedOutput, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations, + padding_list: paddingList) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Computes QuantizedConv2D per channel. +/// +/// - Parameters: +/// - input: The original input tensor. +/// - filter: The original filter tensor. +/// - min_input: The minimum value of the input tensor +/// - max_input: The maximum value of the input tensor. +/// - min_filter: The minimum value of the filter tensor. +/// - max_filter: The maximum value of the filter tensor. +/// +/// - Attrs: +/// - Tinput: The quantized type of input tensor that needs to be converted. +/// - Tfilter: The quantized type of filter tensor that needs to be converted. +/// - out_type: The quantized type of output tensor that needs to be converted. +/// - strides: list of stride values. +/// - dilations: list of dilation values. +/// +/// - Outputs: +/// - output: The output tensor. +/// - min_output: The minimum value of the final output tensor. +/// - max_output: The maximum value of the final output tensor. +@inlinable @inline(__always) +public static func quantizedConv2DPerChannel( + _ input: Tensor, + filter: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DPerChannel", + input, + filter, + minInput, + maxInput, + minFilter, + maxFilter, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +@inlinable @inline(__always) +public static func quantizedConv2DWithBias( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DWithBias", + input, + filter, + bias, + minInput, + maxInput, + minFilter, + maxFilter, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations, + padding_list: paddingList) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +@inlinable @inline(__always) +public static func quantizedConv2DWithBiasAndRelu( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DWithBiasAndRelu", + input, + filter, + bias, + minInput, + maxInput, + minFilter, + maxFilter, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations, + padding_list: paddingList) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +@inlinable @inline(__always) +public static func quantizedConv2DWithBiasAndReluAndRequantize( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DWithBiasAndReluAndRequantize", + input, + filter, + bias, + minInput, + maxInput, + minFilter, + maxFilter, + minFreezedOutput, + maxFreezedOutput, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + Tbias$dtype: Tbias.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations, + padding_list: paddingList) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +@inlinable @inline(__always) +public static func quantizedConv2DWithBiasAndRequantize( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DWithBiasAndRequantize", + input, + filter, + bias, + minInput, + maxInput, + minFilter, + maxFilter, + minFreezedOutput, + maxFreezedOutput, Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + Tbias$dtype: Tbias.tensorFlowDataType, out_type$dtype: OutType.tensorFlowDataType, - variance_epsilon: varianceEpsilon, - scale_after_normalization: scaleAfterNormalization) + strides: strides, + padding: padding.cName, + dilations: dilations, + padding_list: paddingList) return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) } -/// Adds Tensor 'bias' to Tensor 'input' for Quantized types. -/// -/// Broadcasts the values of bias on dimensions 0..N-2 of 'input'. -/// -/// - Parameters: -/// - bias: A 1D bias Tensor with size matching the last dimension of 'input'. -/// - min_input: The float value that the lowest quantized input value represents. -/// - max_input: The float value that the highest quantized input value represents. -/// - min_bias: The float value that the lowest quantized bias value represents. -/// - max_bias: The float value that the highest quantized bias value represents. -/// -/// - Outputs: -/// - min_out: The float value that the lowest quantized output value represents. -/// - max_out: The float value that the highest quantized output value represents. @inlinable @inline(__always) -public static func quantizedBiasAdd( - _ input: Tensor, - bias: Tensor, +public static func quantizedConv2DWithBiasSignedSumAndReluAndRequantize( + _ input: Tensor, + filter: Tensor, + bias: Tensor, minInput: Tensor, maxInput: Tensor, - minBias: Tensor, - maxBias: Tensor -) -> (output: Tensor, minOut: Tensor, maxOut: Tensor) { - let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedBiasAdd", + minFilter: Tensor, + maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + summand: Tensor, + minSummand: Tensor, + maxSummand: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DWithBiasSignedSumAndReluAndRequantize", input, + filter, bias, minInput, maxInput, - minBias, - maxBias, - T1$dtype: T1.tensorFlowDataType, - T2$dtype: T2.tensorFlowDataType, - out_type$dtype: OutType.tensorFlowDataType) + minFilter, + maxFilter, + minFreezedOutput, + maxFreezedOutput, + summand, + minSummand, + maxSummand, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + Tbias$dtype: Tbias.tensorFlowDataType, + Tsummand$dtype: Tsummand.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations, + padding_list: paddingList) return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) } -/// Concatenates quantized tensors along one dimension. -/// -/// - Parameters: -/// - concat_dim: 0-D. The dimension along which to concatenate. Must be in the -/// range [0, rank(values)). -/// - values: The `N` Tensors to concatenate. Their ranks and types must match, -/// and their sizes must match in all dimensions except `concat_dim`. -/// - input_mins: The minimum scalar values for each of the input tensors. -/// - input_maxes: The maximum scalar values for each of the input tensors. -/// -/// - Outputs: -/// - output: A `Tensor` with the concatenation of values stacked along the -/// `concat_dim` dimension. This tensor's shape matches that of `values` except -/// in `concat_dim` where it has the sum of the sizes. -/// - output_min: The float value that the minimum quantized output value represents. -/// - output_max: The float value that the maximum quantized output value represents. @inlinable @inline(__always) -public static func quantizedConcat( - concatDim: Tensor, - _ values: [Tensor], - inputMins: [Tensor], - inputMaxes: [Tensor] -) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { - let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConcat", - concatDim, - values, - inputMins, - inputMaxes, - T$dtype: T.tensorFlowDataType) +public static func quantizedConv2DWithBiasSumAndRelu( + _ input: Tensor, + filter: Tensor, + bias: Tensor, + minInput: Tensor, + maxInput: Tensor, + minFilter: Tensor, + maxFilter: Tensor, + summand: Tensor, + strides: [Int32], + padding: Padding, + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] +) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DWithBiasSumAndRelu", + input, + filter, + bias, + minInput, + maxInput, + minFilter, + maxFilter, + summand, + Tinput$dtype: Tinput.tensorFlowDataType, + Tfilter$dtype: Tfilter.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType, + strides: strides, + padding: padding.cName, + dilations: dilations, + padding_list: paddingList) return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) } -/// Computes a 2D convolution given quantized 4D input and filter tensors. -/// -/// The inputs are quantized tensors where the lowest value represents the real -/// number of the associated minimum, and the highest represents the maximum. -/// This means that you can only interpret the quantized output in the same way, by -/// taking the returned minimum and maximum values into account. -/// -/// - Parameters: -/// - filter: filter's input_depth dimension must match input's depth dimensions. -/// - min_input: The float value that the lowest quantized input value represents. -/// - max_input: The float value that the highest quantized input value represents. -/// - min_filter: The float value that the lowest quantized filter value represents. -/// - max_filter: The float value that the highest quantized filter value represents. -/// -/// - Attrs: -/// - strides: The stride of the sliding window for each dimension of the input -/// tensor. -/// - padding: The type of padding algorithm to use. -/// - dilations: 1-D tensor of length 4. The dilation factor for each dimension of -/// `input`. If set to k > 1, there will be k-1 skipped cells between each -/// filter element on that dimension. The dimension order is determined by the -/// value of `data_format`, see above for details. Dilations in the batch and -/// depth dimensions must be 1. -/// -/// - Outputs: -/// - min_output: The float value that the lowest quantized output value represents. -/// - max_output: The float value that the highest quantized output value represents. @inlinable @inline(__always) -public static func quantizedConv2D( +public static func quantizedConv2DWithBiasSumAndReluAndRequantize( _ input: Tensor, filter: Tensor, + bias: Tensor, minInput: Tensor, maxInput: Tensor, minFilter: Tensor, maxFilter: Tensor, + minFreezedOutput: Tensor, + maxFreezedOutput: Tensor, + summand: Tensor, + minSummand: Tensor, + maxSummand: Tensor, strides: [Int32], padding: Padding, - dilations: [Int32] = [1, 1, 1, 1] + dilations: [Int32] = [1, 1, 1, 1], + paddingList: [Int32] ) -> (output: Tensor, minOutput: Tensor, maxOutput: Tensor) { - let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2D", + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedConv2DWithBiasSumAndReluAndRequantize", input, filter, + bias, minInput, maxInput, minFilter, maxFilter, + minFreezedOutput, + maxFreezedOutput, + summand, + minSummand, + maxSummand, Tinput$dtype: Tinput.tensorFlowDataType, Tfilter$dtype: Tfilter.tensorFlowDataType, + Tbias$dtype: Tbias.tensorFlowDataType, + Tsummand$dtype: Tsummand.tensorFlowDataType, out_type$dtype: OutType.tensorFlowDataType, strides: strides, padding: padding.cName, - dilations: dilations) + dilations: dilations, + padding_list: paddingList) return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) } @@ -14149,7 +14990,8 @@ public static func quantizedResizeBilinear( size: Tensor, min: Tensor, max: Tensor, - alignCorners: Bool = false + alignCorners: Bool = false, + halfPixelCenters: Bool = false ) -> (resizedImages: Tensor, outMin: Tensor, outMax: Tensor) { let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("QuantizedResizeBilinear", images, @@ -14157,141 +14999,105 @@ public static func quantizedResizeBilinear( min, max, T$dtype: T.tensorFlowDataType, - align_corners: alignCorners) + align_corners: alignCorners, + half_pixel_centers: halfPixelCenters) return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) } -/// Closes the given queue. -/// -/// This operation signals that no more elements will be enqueued in the -/// given queue. Subsequent Enqueue(Many) operations will fail. -/// Subsequent Dequeue(Many) operations will continue to succeed if -/// sufficient elements remain in the queue. Subsequent Dequeue(Many) -/// operations that would block will fail immediately. -/// -/// - Parameter handle: The handle to a queue. -/// -/// - Attr cancel_pending_enqueues: If true, all pending enqueue requests that are -/// blocked on the given queue will be canceled. -@inlinable @inline(__always) -public static func queueClose( - handle: StringTensor, - cancelPendingEnqueues: Bool = false -) { - return #tfop("QueueClose", - handle, - cancel_pending_enqueues: cancelPendingEnqueues) -} - -/// Enqueues a tuple of one or more tensors in the given queue. +/// Converts one or more images from RGB to HSV. /// -/// The components input has k elements, which correspond to the components of -/// tuples stored in the given queue. +/// Outputs a tensor of the same shape as the `images` tensor, containing the HSV +/// value of the pixels. The output is only well defined if the value in `images` +/// are in `[0,1]`. /// -/// N.B. If the queue is full, this operation will block until the given -/// element has been enqueued (or 'timeout_ms' elapses, if specified). +/// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and +/// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 +/// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. /// -/// - Parameters: -/// - handle: The handle to a queue. -/// - components: One or more tensors from which the enqueued tensors should be taken. +/// - Parameter images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3. /// -/// - Attr timeout_ms: If the queue is full, this operation will block for up to -/// timeout_ms milliseconds. -/// Note: This option is not supported yet. +/// - Output output: `images` converted to HSV. @inlinable @inline(__always) -public static func queueEnqueue( - handle: StringTensor, - components: [Tensor], - timeoutMs: Int64 = -1 -) { - return #tfop("QueueEnqueue", - handle, - components, - timeout_ms: timeoutMs) +public static func rGBToHSV( + images: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("RGBToHSV", + images, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) } -/// Enqueues zero or more tuples of one or more tensors in the given queue. +/// Returns a `RaggedTensor` containing the specified sequences of numbers. /// -/// This operation slices each component tensor along the 0th dimension to -/// make multiple queue elements. All of the tuple components must have the -/// same size in the 0th dimension. -/// -/// The components input has k elements, which correspond to the components of -/// tuples stored in the given queue. -/// -/// N.B. If the queue is full, this operation will block until the given -/// elements have been enqueued (or 'timeout_ms' elapses, if specified). -/// -/// - Parameters: -/// - handle: The handle to a queue. -/// - components: One or more tensors from which the enqueued tensors should -/// be taken. /// -/// - Attr timeout_ms: If the queue is too full, this operation will block for up -/// to timeout_ms milliseconds. -/// Note: This option is not supported yet. -@inlinable @inline(__always) -public static func queueEnqueueMany( - handle: StringTensor, - components: [Tensor], - timeoutMs: Int64 = -1 -) { - return #tfop("QueueEnqueueMany", - handle, - components, - timeout_ms: timeoutMs) -} - -/// Returns true if queue is closed. +/// Returns a `RaggedTensor` `result` composed from `rt_dense_values` and +/// `rt_nested_splits`, such that +/// `result[i] = range(starts[i], limits[i], deltas[i])`. /// -/// This operation returns true if the queue is closed and false if the queue -/// is open. +/// ```python +/// >>> (rt_nested_splits, rt_dense_values) = gen_ragged_ops.ragged_range( +/// ... starts=[2, 5, 8], limits=[3, 5, 12], deltas=1) +/// >>> result = ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits) +/// >>> print result.eval().tolist() +/// [[2], # result[0] = range(2, 3) +/// [], # result[1] = range(5, 5) +/// [8, 9, 10, 11]] # result[2] = range(8, 12) +/// ``` /// -/// - Parameter handle: The handle to a queue. -@inlinable @inline(__always) -public static func queueIsClosed( - handle: StringTensor -) -> Tensor { - let ret: TensorHandle = #tfop("QueueIsClosed", - handle) - return Tensor(handle: ret) -} - -/// Computes the number of elements in the given queue. +/// The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors. +/// The vector inputs must all have the same size. Scalar inputs are broadcast +/// to match the size of the vector inputs. /// -/// - Parameter handle: The handle to a queue. +/// - Parameters: +/// - starts: The starts of each range. +/// - limits: The limits of each range. +/// - deltas: The deltas of each range. /// -/// - Output size: The number of elements in the given queue. -@inlinable @inline(__always) -public static func queueSize( - handle: StringTensor -) -> Tensor { - let ret: TensorHandle = #tfop("QueueSize", - handle) - return Tensor(handle: ret) +/// - Outputs: +/// - rt_nested_splits: The `row_splits` for the returned `RaggedTensor`. +/// - rt_dense_values: The `flat_values` for the returned `RaggedTensor`. +@inlinable @inline(__always) +public static func raggedRange( + starts: Tensor, + limits: Tensor, + deltas: Tensor +) -> (rtNestedSplits: Tensor, rtDenseValues: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("RaggedRange", + starts, + limits, + deltas, + T$dtype: T.tensorFlowDataType) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) } -/// Converts one or more images from RGB to HSV. +/// Converts a `RaggedTensor` into a `SparseTensor` with the same values. /// -/// Outputs a tensor of the same shape as the `images` tensor, containing the HSV -/// value of the pixels. The output is only well defined if the value in `images` -/// are in `[0,1]`. +/// input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits) +/// output=SparseTensor(indices=sparse_indices, values=sparse_values, +/// dense_shape=sparse_dense_shape) /// -/// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and -/// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 -/// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. +/// - Parameters: +/// - rt_nested_splits: The `row_splits` for the `RaggedTensor`. +/// - rt_dense_values: The `flat_values` for the `RaggedTensor`. /// -/// - Parameter images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3. +/// - Attr RAGGED_RANK: The ragged rank of the input RaggedTensor. `rt_nested_splits` should contain +/// this number of ragged-splits tensors. This value should equal +/// `input.ragged_rank`. /// -/// - Output output: `images` converted to HSV. -@inlinable @inline(__always) -public static func rGBToHSV( - images: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("RGBToHSV", - images, +/// - Outputs: +/// - sparse_indices: The indices for the `SparseTensor`. +/// - sparse_values: The values of the `SparseTensor`. +/// - sparse_dense_shape: `sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`. +@inlinable @inline(__always) +public static func raggedTensorToSparse( + rtNestedSplits: [Tensor], + rtDenseValues: Tensor +) -> (sparseIndices: Tensor, sparseValues: Tensor, sparseDenseShape: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("RaggedTensorToSparse", + rtNestedSplits, + rtDenseValues, T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) } /// Randomly crop `image`. @@ -14649,130 +15455,6 @@ public static func readFile( return StringTensor(handle: ret) } -/// Returns the number of records this Reader has produced. -/// -/// This is the same as the number of ReaderRead executions that have -/// succeeded. -/// -/// - Parameter reader_handle: Handle to a Reader. -@inlinable @inline(__always) -public static func readerNumRecordsProduced( - readerHandle: StringTensor -) -> Tensor { - let ret: TensorHandle = #tfop("ReaderNumRecordsProduced", - readerHandle) - return Tensor(handle: ret) -} - -/// Returns the number of work units this Reader has finished processing. -/// -/// - Parameter reader_handle: Handle to a Reader. -@inlinable @inline(__always) -public static func readerNumWorkUnitsCompleted( - readerHandle: StringTensor -) -> Tensor { - let ret: TensorHandle = #tfop("ReaderNumWorkUnitsCompleted", - readerHandle) - return Tensor(handle: ret) -} - -/// Returns the next record (key, value pair) produced by a Reader. -/// -/// Will dequeue from the input queue if necessary (e.g. when the -/// Reader needs to start reading from a new file since it has finished -/// with the previous file). -/// -/// - Parameters: -/// - reader_handle: Handle to a Reader. -/// - queue_handle: Handle to a Queue, with string work items. -/// -/// - Outputs: -/// - key: A scalar. -/// - value: A scalar. -@inlinable @inline(__always) -public static func readerRead( - readerHandle: StringTensor, - queueHandle: StringTensor -) -> (key: StringTensor, value: StringTensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("ReaderRead", - readerHandle, - queueHandle) - return (StringTensor(handle: ret.0), StringTensor(handle: ret.1)) -} - -/// Returns up to `num_records` (key, value) pairs produced by a Reader. -/// -/// Will dequeue from the input queue if necessary (e.g. when the -/// Reader needs to start reading from a new file since it has finished -/// with the previous file). -/// It may return less than `num_records` even before the last batch. -/// -/// - Parameters: -/// - reader_handle: Handle to a `Reader`. -/// - queue_handle: Handle to a `Queue`, with string work items. -/// - num_records: number of records to read from `Reader`. -/// -/// - Outputs: -/// - keys: A 1-D tensor. -/// - values: A 1-D tensor. -@inlinable @inline(__always) -public static func readerReadUpTo( - readerHandle: StringTensor, - queueHandle: StringTensor, - numRecords: Tensor -) -> (keys: StringTensor, values: StringTensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("ReaderReadUpTo", - readerHandle, - queueHandle, - numRecords) - return (StringTensor(handle: ret.0), StringTensor(handle: ret.1)) -} - -/// Restore a Reader to its initial clean state. -/// -/// - Parameter reader_handle: Handle to a Reader. -@inlinable @inline(__always) -public static func readerReset( - readerHandle: StringTensor -) { - return #tfop("ReaderReset", - readerHandle) -} - -/// Restore a reader to a previously saved state. -/// -/// Not all Readers support being restored, so this can produce an -/// Unimplemented error. -/// -/// - Parameters: -/// - reader_handle: Handle to a Reader. -/// - state: Result of a ReaderSerializeState of a Reader with type -/// matching reader_handle. -@inlinable @inline(__always) -public static func readerRestoreState( - readerHandle: StringTensor, - state: StringTensor -) { - return #tfop("ReaderRestoreState", - readerHandle, - state) -} - -/// Produce a string tensor that encodes the state of a Reader. -/// -/// Not all Readers support being serialized, so this can produce an -/// Unimplemented error. -/// -/// - Parameter reader_handle: Handle to a Reader. -@inlinable @inline(__always) -public static func readerSerializeState( - readerHandle: StringTensor -) -> StringTensor { - let ret: TensorHandle = #tfop("ReaderSerializeState", - readerHandle) - return StringTensor(handle: ret) -} - /// Returns the real part of a complex number. /// /// Given a tensor `input` of complex numbers, this operation returns a tensor of @@ -14932,210 +15614,6 @@ public static func reduceJoin( return StringTensor(handle: ret) } -/// Creates or finds a child frame, and makes `data` available to the child frame. -/// -/// The unique `frame_name` is used by the `Executor` to identify frames. If -/// `is_constant` is true, `output` is a constant in the child frame; otherwise -/// it may be changed in the child frame. At most `parallel_iterations` iterations -/// are run in parallel in the child frame. -/// -/// - Parameter data: The tensor to be made available to the child frame. -/// -/// - Attrs: -/// - frame_name: The name of the child frame. -/// - is_constant: If true, the output is constant within the child frame. -/// - parallel_iterations: The number of iterations allowed to run in parallel. -/// -/// - Output output: The same tensor as `data`. -@inlinable @inline(__always) -public static func refEnter( - data: Tensor, - frameName: String, - isConstant: Bool = false, - parallelIterations: Int64 = 10 -) -> Tensor { - let ret: TensorHandle = #tfop("RefEnter", - data, - T$dtype: T.tensorFlowDataType, - frame_name: frameName, - is_constant: isConstant, - parallel_iterations: parallelIterations) - return Tensor(handle: ret) -} - -/// Exits the current frame to its parent frame. -/// -/// Exit makes its input `data` available to the parent frame. -/// -/// - Parameter data: The tensor to be made available to the parent frame. -/// -/// - Output output: The same tensor as `data`. -@inlinable @inline(__always) -public static func refExit( - data: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("RefExit", - data, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Return the same ref tensor as the input ref tensor. -@inlinable @inline(__always) -public static func refIdentity( - _ input: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("RefIdentity", - input, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -@inlinable @inline(__always) -public static func refIn( - _ a: Tensor -) { - return #tfop("RefIn", - a, - T$dtype: T.tensorFlowDataType) -} - -@inlinable @inline(__always) -public static func refInputFloatInput( - _ a: Tensor, - _ b: Tensor -) { - return #tfop("RefInputFloatInput", - a, - b) -} - -@inlinable @inline(__always) -public static func refInputFloatInputIntOutput( - _ a: Tensor, - _ b: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("RefInputFloatInputIntOutput", - a, - b) - return Tensor(handle: ret) -} - -@inlinable @inline(__always) -public static func refInputIntInput( - _ a: Tensor, - _ b: Tensor -) { - return #tfop("RefInputIntInput", - a, - b) -} - -/// Forwards the value of an available tensor from `inputs` to `output`. -/// -/// `Merge` waits for at least one of the tensors in `inputs` to become available. -/// It is usually combined with `Switch` to implement branching. -/// -/// `Merge` forwards the first tensor for become available to `output`, and sets -/// `value_index` to its index in `inputs`. -/// -/// - Parameter inputs: The input tensors, exactly one of which will become available. -/// -/// - Outputs: -/// - output: Will be set to the available input tensor. -/// - value_index: The index of the chosen input tensor in `inputs`. -@inlinable @inline(__always) -public static func refMerge( - inputs: [Tensor] -) -> (output: Tensor, valueIndex: Tensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("RefMerge", - inputs, - T$dtype: T.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) -} - -/// Makes its input available to the next iteration. -/// -/// - Parameter data: The tensor to be made available to the next iteration. -/// -/// - Output output: The same tensor as `data`. -@inlinable @inline(__always) -public static func refNextIteration( - data: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("RefNextIteration", - data, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -@inlinable @inline(__always) -public static func refOut( -) -> Tensor { - let ret: TensorHandle = #tfop("RefOut", - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -@inlinable @inline(__always) -public static func refOutput( -) -> Tensor { - let ret: TensorHandle = #tfop("RefOutput") - return Tensor(handle: ret) -} - -@inlinable @inline(__always) -public static func refOutputFloatOutput( -) -> (a: Tensor, b: Tensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("RefOutputFloatOutput") - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) -} - -/// Forwards the `index`th element of `inputs` to `output`. -/// -/// - Parameters: -/// - index: A scalar that determines the input that gets selected. -/// - inputs: A list of ref tensors, one of which will be forwarded to `output`. -/// -/// - Output output: The forwarded tensor. -@inlinable @inline(__always) -public static func refSelect( - index: Tensor, - inputs: [Tensor] -) -> Tensor { - let ret: TensorHandle = #tfop("RefSelect", - index, - inputs, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Forwards the ref tensor `data` to the output port determined by `pred`. -/// -/// If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, -/// the data goes to `output_false`. -/// -/// See also `Switch` and `Merge`. -/// -/// - Parameters: -/// - data: The ref tensor to be forwarded to the appropriate output. -/// - pred: A scalar that specifies which output port will receive data. -/// -/// - Outputs: -/// - output_false: If `pred` is false, data will be forwarded to this output. -/// - output_true: If `pred` is true, data will be forwarded to this output. -@inlinable @inline(__always) -public static func refSwitch( - data: Tensor, - pred: Tensor -) -> (outputFalse: Tensor, outputTrue: Tensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("RefSwitch", - data, - pred, - T$dtype: T.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) -} - /// Check if the input matches the regex pattern. /// /// The input is a string tensor of any shape. The pattern is a scalar @@ -15161,19 +15639,22 @@ public static func regexFullMatch( return Tensor(handle: ret) } -/// Replaces the match of pattern in input with rewrite. +/// Replaces matches of the `pattern` regular expression in `input` with the +/// replacement string provided in `rewrite`. /// /// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) /// /// - Parameters: /// - input: The text to be processed. -/// - pattern: The regular expression to match the input. -/// - rewrite: The rewrite to be applied to the matched expression. +/// - pattern: The regular expression to be matched in the `input` strings. +/// - rewrite: The rewrite string to be substituted for the `pattern` expression where it is +/// matched in the `input` strings. /// -/// - Attr replace_global: If True, the replacement is global, otherwise the replacement -/// is done only on the first match. +/// - Attr replace_global: If True, the replacement is global (that is, all matches of the `pattern` regular +/// expression in each input string are rewritten), otherwise the `rewrite` +/// substitution is only made for the first `pattern` match. /// -/// - Output output: The text after applying pattern and rewrite. +/// - Output output: The text after applying pattern match and rewrite substitution. @inlinable @inline(__always) public static func regexReplace( _ input: StringTensor, @@ -15272,13 +15753,44 @@ public static func reluGrad( public static func requantizationRange( _ input: Tensor, inputMin: Tensor, - inputMax: Tensor + inputMax: Tensor +) -> (outputMin: Tensor, outputMax: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("RequantizationRange", + input, + inputMin, + inputMax, + Tinput$dtype: Tinput.tensorFlowDataType) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +} + +/// Computes requantization range per channel. +/// +/// - Parameters: +/// - input: The original input tensor. +/// - input_min: The minimum value of the input tensor +/// - input_max: The maximum value of the input tensor. +/// +/// - Attrs: +/// - T: The quantized type of input tensor that needs to be converted. +/// - clip_value_max: The maximum value of the output that needs to be clipped. +/// Example: set this to 6 for Relu6. +/// +/// - Outputs: +/// - output_min: The minimum value of the final output tensor +/// - output_max: The maximum value of the final output tensor. +@inlinable @inline(__always) +public static func requantizationRangePerChannel( + _ input: Tensor, + inputMin: Tensor, + inputMax: Tensor, + clipValueMax: Double ) -> (outputMin: Tensor, outputMax: Tensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("RequantizationRange", + let ret: (TensorHandle, TensorHandle) = #tfop("RequantizationRangePerChannel", input, inputMin, inputMax, - Tinput$dtype: Tinput.tensorFlowDataType) + T$dtype: T.tensorFlowDataType, + clip_value_max: clipValueMax) return (Tensor(handle: ret.0), Tensor(handle: ret.1)) } @@ -15324,6 +15836,42 @@ public static func requantize( + _ input: Tensor, + inputMin: Tensor, + inputMax: Tensor, + requestedOutputMin: Tensor, + requestedOutputMax: Tensor +) -> (output: Tensor, outputMin: Tensor, outputMax: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("RequantizePerChannel", + input, + inputMin, + inputMax, + requestedOutputMin, + requestedOutputMax, + T$dtype: T.tensorFlowDataType, + out_type$dtype: OutType.tensorFlowDataType) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + @inlinable @inline(__always) public static func requiresOlderGraphVersion( ) -> Tensor { @@ -15476,13 +16024,15 @@ public static func resizeArea( public static func resizeBicubic( images: Tensor, size: Tensor, - alignCorners: Bool = false + alignCorners: Bool = false, + halfPixelCenters: Bool = false ) -> Tensor { let ret: TensorHandle = #tfop("ResizeBicubic", images, size, T$dtype: T.tensorFlowDataType, - align_corners: alignCorners) + align_corners: alignCorners, + half_pixel_centers: halfPixelCenters) return Tensor(handle: ret) } @@ -15503,13 +16053,15 @@ public static func resizeBicubic( public static func resizeBicubicGrad( grads: Tensor, originalImage: Tensor, - alignCorners: Bool = false + alignCorners: Bool = false, + halfPixelCenters: Bool = false ) -> Tensor { let ret: TensorHandle = #tfop("ResizeBicubicGrad", grads, originalImage, T$dtype: T.tensorFlowDataType, - align_corners: alignCorners) + align_corners: alignCorners, + half_pixel_centers: halfPixelCenters) return Tensor(handle: ret) } @@ -15531,13 +16083,15 @@ public static func resizeBicubicGrad( public static func resizeBilinear( images: Tensor, size: Tensor, - alignCorners: Bool = false + alignCorners: Bool = false, + halfPixelCenters: Bool = false ) -> Tensor { let ret: TensorHandle = #tfop("ResizeBilinear", images, size, T$dtype: T.tensorFlowDataType, - align_corners: alignCorners) + align_corners: alignCorners, + half_pixel_centers: halfPixelCenters) return Tensor(handle: ret) } @@ -15558,13 +16112,15 @@ public static func resizeBilinear( public static func resizeBilinearGrad( grads: Tensor, originalImage: Tensor, - alignCorners: Bool = false + alignCorners: Bool = false, + halfPixelCenters: Bool = false ) -> Tensor { let ret: TensorHandle = #tfop("ResizeBilinearGrad", grads, originalImage, T$dtype: T.tensorFlowDataType, - align_corners: alignCorners) + align_corners: alignCorners, + half_pixel_centers: halfPixelCenters) return Tensor(handle: ret) } @@ -15584,13 +16140,15 @@ public static func resizeBilinearGrad( public static func resizeNearestNeighbor( images: Tensor, size: Tensor, - alignCorners: Bool = false + alignCorners: Bool = false, + halfPixelCenters: Bool = false ) -> Tensor { let ret: TensorHandle = #tfop("ResizeNearestNeighbor", images, size, T$dtype: T.tensorFlowDataType, - align_corners: alignCorners) + align_corners: alignCorners, + half_pixel_centers: halfPixelCenters) return Tensor(handle: ret) } @@ -15610,13 +16168,15 @@ public static func resizeNearestNeighbor( public static func resizeNearestNeighborGrad( grads: Tensor, size: Tensor, - alignCorners: Bool = false + alignCorners: Bool = false, + halfPixelCenters: Bool = false ) -> Tensor { let ret: TensorHandle = #tfop("ResizeNearestNeighborGrad", grads, size, T$dtype: T.tensorFlowDataType, - align_corners: alignCorners) + align_corners: alignCorners, + half_pixel_centers: halfPixelCenters) return Tensor(handle: ret) } @@ -15714,6 +16274,448 @@ public static func restrict( return Tensor(handle: ret) } +/// Retrieve ADAM embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the ADAM optimization algorithm. +/// - momenta: Parameter momenta updated by the ADAM optimization algorithm. +/// - velocities: Parameter velocities updated by the ADAM optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingADAMParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, momenta: Tensor, velocities: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingADAMParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Retrieve ADAM embedding parameters with debug support. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the ADAM optimization algorithm. +/// - momenta: Parameter momenta updated by the ADAM optimization algorithm. +/// - velocities: Parameter velocities updated by the ADAM optimization algorithm. +/// - gradient_accumulators: Parameter gradient_accumulators updated by the ADAM optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingADAMParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, momenta: Tensor, velocities: Tensor, gradientAccumulators: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingADAMParametersGradAccumDebug", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3)) +} + +/// Retrieve Adadelta embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the Adadelta optimization algorithm. +/// - accumulators: Parameter accumulators updated by the Adadelta optimization algorithm. +/// - updates: Parameter updates updated by the Adadelta optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingAdadeltaParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, accumulators: Tensor, updates: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingAdadeltaParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Retrieve Adadelta embedding parameters with debug support. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the Adadelta optimization algorithm. +/// - accumulators: Parameter accumulators updated by the Adadelta optimization algorithm. +/// - updates: Parameter updates updated by the Adadelta optimization algorithm. +/// - gradient_accumulators: Parameter gradient_accumulators updated by the Adadelta optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingAdadeltaParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, accumulators: Tensor, updates: Tensor, gradientAccumulators: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3)) +} + +/// Retrieve Adagrad embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the Adagrad optimization algorithm. +/// - accumulators: Parameter accumulators updated by the Adagrad optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingAdagradParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, accumulators: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingAdagradParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +} + +/// Retrieve Adagrad embedding parameters with debug support. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the Adagrad optimization algorithm. +/// - accumulators: Parameter accumulators updated by the Adagrad optimization algorithm. +/// - gradient_accumulators: Parameter gradient_accumulators updated by the Adagrad optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingAdagradParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, accumulators: Tensor, gradientAccumulators: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Retrieve centered RMSProp embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the centered RMSProp optimization algorithm. +/// - ms: Parameter ms updated by the centered RMSProp optimization algorithm. +/// - mom: Parameter mom updated by the centered RMSProp optimization algorithm. +/// - mg: Parameter mg updated by the centered RMSProp optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingCenteredRMSPropParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, ms: Tensor, mom: Tensor, mg: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingCenteredRMSPropParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3)) +} + +/// Retrieve FTRL embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the FTRL optimization algorithm. +/// - accumulators: Parameter accumulators updated by the FTRL optimization algorithm. +/// - linears: Parameter linears updated by the FTRL optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingFTRLParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, accumulators: Tensor, linears: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingFTRLParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Retrieve FTRL embedding parameters with debug support. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the FTRL optimization algorithm. +/// - accumulators: Parameter accumulators updated by the FTRL optimization algorithm. +/// - linears: Parameter linears updated by the FTRL optimization algorithm. +/// - gradient_accumulators: Parameter gradient_accumulators updated by the FTRL optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingFTRLParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, accumulators: Tensor, linears: Tensor, gradientAccumulators: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3)) +} + +/// Retrieve MDL Adagrad Light embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the MDL Adagrad Light optimization algorithm. +/// - accumulators: Parameter accumulators updated by the MDL Adagrad Light optimization algorithm. +/// - weights: Parameter weights updated by the MDL Adagrad Light optimization algorithm. +/// - benefits: Parameter benefits updated by the MDL Adagrad Light optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingMDLAdagradLightParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, accumulators: Tensor, weights: Tensor, benefits: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingMDLAdagradLightParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3)) +} + +/// Retrieve Momentum embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the Momentum optimization algorithm. +/// - momenta: Parameter momenta updated by the Momentum optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingMomentumParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, momenta: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingMomentumParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +} + +/// Retrieve Momentum embedding parameters with debug support. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the Momentum optimization algorithm. +/// - momenta: Parameter momenta updated by the Momentum optimization algorithm. +/// - gradient_accumulators: Parameter gradient_accumulators updated by the Momentum optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingMomentumParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, momenta: Tensor, gradientAccumulators: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingMomentumParametersGradAccumDebug", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Retrieve proximal Adagrad embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm. +/// - accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingProximalAdagradParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, accumulators: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingProximalAdagradParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +} + +/// Retrieve proximal Adagrad embedding parameters with debug support. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm. +/// - accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm. +/// - gradient_accumulators: Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, accumulators: Tensor, gradientAccumulators: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Retrieve RMSProp embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the RMSProp optimization algorithm. +/// - ms: Parameter ms updated by the RMSProp optimization algorithm. +/// - mom: Parameter mom updated by the RMSProp optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingRMSPropParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, ms: Tensor, mom: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingRMSPropParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Retrieve RMSProp embedding parameters with debug support. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Outputs: +/// - parameters: Parameter parameters updated by the RMSProp optimization algorithm. +/// - ms: Parameter ms updated by the RMSProp optimization algorithm. +/// - mom: Parameter mom updated by the RMSProp optimization algorithm. +/// - gradient_accumulators: Parameter gradient_accumulators updated by the RMSProp optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingRMSPropParametersGradAccumDebug( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> (parameters: Tensor, ms: Tensor, mom: Tensor, gradientAccumulators: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle, TensorHandle) = #tfop("RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3)) +} + +/// Retrieve SGD embedding parameters. +/// +/// An op that retrieves optimization parameters from embedding to host +/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up +/// the correct embedding table configuration. For example, this op is +/// used to retrieve updated parameters before saving a checkpoint. +/// +/// - Output parameters: Parameter parameters updated by the stochastic gradient descent optimization algorithm. +@inlinable @inline(__always) +public static func retrieveTPUEmbeddingStochasticGradientDescentParameters( + tableId: Int64 = -1, + tableName: String, + numShards: Int64, + shardId: Int64 +) -> Tensor { + let ret: TensorHandle = #tfop("RetrieveTPUEmbeddingStochasticGradientDescentParameters", + table_id: tableId, + table_name: tableName, + num_shards: numShards, + shard_id: shardId) + return Tensor(handle: ret) +} + /// Reverses specific dimensions of a tensor. /// /// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions @@ -16443,288 +17445,72 @@ public static func saveV2( shapeAndSlices: StringTensor, tensors: [Tensor] ) { - return #tfop("SaveV2", - prefix, - tensorNames, - shapeAndSlices, - tensors) -} - -/// Outputs a `Summary` protocol buffer with scalar values. -/// -/// The input `tags` and `values` must have the same shape. The generated summary -/// has a summary value for each tag-value pair in `tags` and `values`. -/// -/// - Parameters: -/// - tags: Tags for the summary. -/// - values: Same shape as `tags. Values for the summary. -/// -/// - Output summary: Scalar. Serialized `Summary` protocol buffer. -@inlinable @inline(__always) -public static func scalarSummary( - tags: StringTensor, - _ values: Tensor -) -> StringTensor { - let ret: TensorHandle = #tfop("ScalarSummary", - tags, - values, - T$dtype: T.tensorFlowDataType) - return StringTensor(handle: ret) -} - -/// Adds sparse updates to a variable reference. -/// -/// This operation computes -/// -/// # Scalar indices -/// ref[indices, ...] += updates[...] -/// -/// # Vector indices (for each i) -/// ref[indices[i], ...] += updates[i, ...] -/// -/// # High rank indices (for each i, ..., j) -/// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] -/// -/// This operation outputs `ref` after the update is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// Duplicate entries are handled correctly: if multiple `indices` reference -/// the same location, their contributions add. -/// -/// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. -/// -///
-/// -///
-/// -/// - Parameters: -/// - ref: Should be from a `Variable` node. -/// - indices: A tensor of indices into the first dimension of `ref`. -/// - updates: A tensor of updated values to add to `ref`. -/// -/// - Attr use_locking: If True, the addition will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output output_ref: = Same as `ref`. Returned as a convenience for operations that want -/// to use the updated values after the update is done. -@inlinable @inline(__always) -public static func scatterAdd( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ScatterAdd", - ref, - indices, - updates, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Divides a variable reference by sparse updates. -/// -/// This operation computes -/// -/// ```python -/// # Scalar indices -/// ref[indices, ...] /= updates[...] -/// -/// # Vector indices (for each i) -/// ref[indices[i], ...] /= updates[i, ...] -/// -/// # High rank indices (for each i, ..., j) -/// ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] -/// ``` -/// -/// This operation outputs `ref` after the update is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// Duplicate entries are handled correctly: if multiple `indices` reference -/// the same location, their contributions divide. -/// -/// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. -/// -/// - Parameters: -/// - ref: Should be from a `Variable` node. -/// - indices: A tensor of indices into the first dimension of `ref`. -/// - updates: A tensor of values that `ref` is divided by. -/// -/// - Attr use_locking: If True, the operation will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output output_ref: = Same as `ref`. Returned as a convenience for operations that want -/// to use the updated values after the update is done. -@inlinable @inline(__always) -public static func scatterDiv( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ScatterDiv", - ref, - indices, - updates, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) + return #tfop("SaveV2", + prefix, + tensorNames, + shapeAndSlices, + tensors) } -/// Reduces sparse updates into a variable reference using the `max` operation. -/// -/// This operation computes -/// -/// # Scalar indices -/// ref[indices, ...] = max(ref[indices, ...], updates[...]) -/// -/// # Vector indices (for each i) -/// ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) -/// -/// # High rank indices (for each i, ..., j) -/// ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) -/// -/// This operation outputs `ref` after the update is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// Duplicate entries are handled correctly: if multiple `indices` reference -/// the same location, their contributions combine. -/// -/// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. +/// Outputs a `Summary` protocol buffer with scalar values. /// -///
-/// -///
+/// The input `tags` and `values` must have the same shape. The generated summary +/// has a summary value for each tag-value pair in `tags` and `values`. /// /// - Parameters: -/// - ref: Should be from a `Variable` node. -/// - indices: A tensor of indices into the first dimension of `ref`. -/// - updates: A tensor of updated values to reduce into `ref`. -/// -/// - Attr use_locking: If True, the update will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. +/// - tags: Tags for the summary. +/// - values: Same shape as `tags. Values for the summary. /// -/// - Output output_ref: = Same as `ref`. Returned as a convenience for operations that want -/// to use the updated values after the update is done. +/// - Output summary: Scalar. Serialized `Summary` protocol buffer. @inlinable @inline(__always) -public static func scatterMax( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ScatterMax", - ref, - indices, - updates, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) +public static func scalarSummary( + tags: StringTensor, + _ values: Tensor +) -> StringTensor { + let ret: TensorHandle = #tfop("ScalarSummary", + tags, + values, + T$dtype: T.tensorFlowDataType) + return StringTensor(handle: ret) } -/// Reduces sparse updates into a variable reference using the `min` operation. -/// -/// This operation computes -/// -/// # Scalar indices -/// ref[indices, ...] = min(ref[indices, ...], updates[...]) -/// -/// # Vector indices (for each i) -/// ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) -/// -/// # High rank indices (for each i, ..., j) -/// ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) -/// -/// This operation outputs `ref` after the update is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// Duplicate entries are handled correctly: if multiple `indices` reference -/// the same location, their contributions combine. -/// -/// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. -/// -///
-/// -///
-/// -/// - Parameters: -/// - ref: Should be from a `Variable` node. -/// - indices: A tensor of indices into the first dimension of `ref`. -/// - updates: A tensor of updated values to reduce into `ref`. -/// -/// - Attr use_locking: If True, the update will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output output_ref: = Same as `ref`. Returned as a convenience for operations that want -/// to use the updated values after the update is done. @inlinable @inline(__always) -public static func scatterMin( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ScatterMin", - ref, - indices, - updates, +public static func scaleAndTranslate( + images: Tensor, + size: Tensor, + scale: Tensor, + translation: Tensor, + kernelType: String = "b'lanczos3'", + antialias: Bool = true +) -> Tensor { + let ret: TensorHandle = #tfop("ScaleAndTranslate", + images, + size, + scale, + translation, T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) + kernel_type: kernelType, + antialias: antialias) return Tensor(handle: ret) } -/// Multiplies sparse updates into a variable reference. -/// -/// This operation computes -/// -/// ```python -/// # Scalar indices -/// ref[indices, ...] *= updates[...] -/// -/// # Vector indices (for each i) -/// ref[indices[i], ...] *= updates[i, ...] -/// -/// # High rank indices (for each i, ..., j) -/// ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] -/// ``` -/// -/// This operation outputs `ref` after the update is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// Duplicate entries are handled correctly: if multiple `indices` reference -/// the same location, their contributions multiply. -/// -/// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. -/// -/// - Parameters: -/// - ref: Should be from a `Variable` node. -/// - indices: A tensor of indices into the first dimension of `ref`. -/// - updates: A tensor of updated values to multiply to `ref`. -/// -/// - Attr use_locking: If True, the operation will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output output_ref: = Same as `ref`. Returned as a convenience for operations that want -/// to use the updated values after the update is done. @inlinable @inline(__always) -public static func scatterMul( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = false +public static func scaleAndTranslateGrad( + grads: Tensor, + originalImage: Tensor, + scale: Tensor, + translation: Tensor, + kernelType: String = "b'lanczos3'", + antialias: Bool = true ) -> Tensor { - let ret: TensorHandle = #tfop("ScatterMul", - ref, - indices, - updates, + let ret: TensorHandle = #tfop("ScaleAndTranslateGrad", + grads, + originalImage, + scale, + translation, T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) + kernel_type: kernelType, + antialias: antialias) return Tensor(handle: ret) } @@ -16835,72 +17621,6 @@ public static func scatterNd( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ScatterNdAdd", - ref, - indices, - updates, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - /// Applies sparse addition to `input` using individual values or slices /// /// from `updates` according to indices `indices`. The updates are non-aliasing: @@ -16961,251 +17681,6 @@ public static func scatterNdNonAliasingAdd( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ScatterNdSub", - ref, - indices, - updates, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Applies sparse `updates` to individual values or slices within a given -/// -/// variable according to `indices`. -/// -/// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. -/// -/// `indices` must be integer tensor, containing indices into `ref`. -/// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. -/// -/// The innermost dimension of `indices` (with length `K`) corresponds to -/// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th -/// dimension of `ref`. -/// -/// `updates` is `Tensor` of rank `Q-1+P-K` with shape: -/// -/// $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ -/// -/// For example, say we want to update 4 scattered elements to a rank-1 tensor to -/// 8 elements. In Python, that update would look like this: -/// -/// ```python -/// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) -/// indices = tf.constant([[4], [3], [1] ,[7]]) -/// updates = tf.constant([9, 10, 11, 12]) -/// update = tf.scatter_nd_update(ref, indices, updates) -/// with tf.Session() as sess: -/// print sess.run(update) -/// ``` -/// -/// The resulting update to ref would look like this: -/// -/// [1, 11, 3, 10, 9, 6, 7, 12] -/// -/// See `tf.scatter_nd` for more details about how to make updates to -/// slices. -/// -/// See also `tf.scatter_update` and `tf.batch_scatter_update`. -/// -/// - Parameters: -/// - ref: A mutable Tensor. Should be from a Variable node. -/// - indices: A Tensor. Must be one of the following types: int32, int64. -/// A tensor of indices into ref. -/// - updates: A Tensor. Must have the same type as ref. A tensor of updated -/// values to add to ref. -/// -/// - Attr use_locking: An optional bool. Defaults to True. If True, the assignment will -/// be protected by a lock; otherwise the behavior is undefined, -/// but may exhibit less contention. -/// -/// - Output output_ref: Same as ref. Returned as a convenience for operations that want to -/// use the updated values after the update is done. -@inlinable @inline(__always) -public static func scatterNdUpdate( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = true -) -> Tensor { - let ret: TensorHandle = #tfop("ScatterNdUpdate", - ref, - indices, - updates, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Subtracts sparse updates to a variable reference. -/// -/// ```python -/// # Scalar indices -/// ref[indices, ...] -= updates[...] -/// -/// # Vector indices (for each i) -/// ref[indices[i], ...] -= updates[i, ...] -/// -/// # High rank indices (for each i, ..., j) -/// ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] -/// ``` -/// -/// This operation outputs `ref` after the update is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// Duplicate entries are handled correctly: if multiple `indices` reference -/// the same location, their (negated) contributions add. -/// -/// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. -/// -///
-/// -///
-/// -/// - Parameters: -/// - ref: Should be from a `Variable` node. -/// - indices: A tensor of indices into the first dimension of `ref`. -/// - updates: A tensor of updated values to subtract from `ref`. -/// -/// - Attr use_locking: If True, the subtraction will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output output_ref: = Same as `ref`. Returned as a convenience for operations that want -/// to use the updated values after the update is done. -@inlinable @inline(__always) -public static func scatterSub( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("ScatterSub", - ref, - indices, - updates, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Applies sparse updates to a variable reference. -/// -/// This operation computes -/// -/// ```python -/// # Scalar indices -/// ref[indices, ...] = updates[...] -/// -/// # Vector indices (for each i) -/// ref[indices[i], ...] = updates[i, ...] -/// -/// # High rank indices (for each i, ..., j) -/// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] -/// ``` -/// -/// This operation outputs `ref` after the update is done. -/// This makes it easier to chain operations that need to use the reset value. -/// -/// If values in `ref` is to be updated more than once, because there are -/// duplicate entries in `indices`, the order at which the updates happen -/// for each value is undefined. -/// -/// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. -/// -///
-/// -///
-/// -/// See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. -/// -/// - Parameters: -/// - ref: Should be from a `Variable` node. -/// - indices: A tensor of indices into the first dimension of `ref`. -/// - updates: A tensor of updated values to store in `ref`. -/// -/// - Attr use_locking: If True, the assignment will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output output_ref: = Same as `ref`. Returned as a convenience for operations that want -/// to use the updated values after the update is done. -@inlinable @inline(__always) -public static func scatterUpdate( - ref: Tensor, - indices: Tensor, - updates: Tensor, - useLocking: Bool = true -) -> Tensor { - let ret: TensorHandle = #tfop("ScatterUpdate", - ref, - indices, - updates, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - /// Computes fingerprints of the input strings. /// /// - Parameter input: vector of strings to compute fingerprints on. @@ -17221,27 +17696,6 @@ public static func sdcaFprint( return Tensor(handle: ret) } -/// Applies L1 regularization shrink step on the parameters. -/// -/// - Parameter weights: a list of vectors where each value is the weight associated with a -/// feature group. -/// -/// - Attrs: -/// - num_features: Number of feature groups to apply shrinking step. -/// - l1: Symmetric l1 regularization strength. -/// - l2: Symmetric l2 regularization strength. Should be a positive float. -@inlinable @inline(__always) -public static func sdcaShrinkL1( - weights: [Tensor], - l1: Double, - l2: Double -) { - return #tfop("SdcaShrinkL1", - weights, - l1: l1, - l2: l2) -} - /// Computes the maximum along segments of a tensor. /// /// Read @@ -17620,6 +18074,35 @@ public static func seluGrad( return Tensor(handle: ret) } +/// Performs gradient updates of embedding tables. +/// +/// - Parameters: +/// - inputs: A TensorList of gradients with which to update embedding tables. +/// This argument has the same length and shapes as the return value of +/// RecvTPUEmbeddingActivations, but contains gradients of the model's loss +/// with respect to the embedding activations. The embedding tables are updated +/// from these gradients via the optimizer specified in the TPU embedding +/// configuration given to tpu.initialize_system. +/// - learning_rates: A TensorList of float32 scalars, one for each dynamic learning +/// rate tag: see the comments in +/// //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto. +/// Multiple tables can share the same dynamic learning rate tag as specified +/// in the configuration. If the learning rates for all tables are constant, +/// this list should be empty. +/// +/// - Attr config: Serialized TPUEmbeddingConfiguration proto. +@inlinable @inline(__always) +public static func sendTPUEmbeddingGradients( + inputs: [Tensor], + learningRates: [Tensor], + config: String +) { + return #tfop("SendTPUEmbeddingGradients", + inputs, + learningRates, + config: config) +} + /// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. /// /// The `SparseTensor` must have rank `R` greater than 1, and the first dimension @@ -17775,6 +18258,15 @@ public static func shardedFilespec( return StringTensor(handle: ret) } +/// Shuts down a running distributed TPU system. +/// +/// The op returns an error if no system is running. +@inlinable @inline(__always) +public static func shutdownDistributedTPU( +) { + return #tfop("ShutdownDistributedTPU") +} + /// Computes sigmoid of `x` element-wise. /// /// Specifically, `y = 1 / (1 + exp(-x))`. @@ -18392,675 +18884,102 @@ public static func spaceToBatchND( - _ input: Tensor, - blockSize: Int64, - dataFormat: DataFormat3 = .nhwc -) -> Tensor { - let ret: TensorHandle = #tfop("SpaceToDepth", - input, - T$dtype: T.tensorFlowDataType, - block_size: blockSize, - data_format: dataFormat.cName) - return Tensor(handle: ret) -} - -/// Applies a sparse gradient to a given accumulator. -/// -/// Does not add if local_step is smaller than the accumulator's -/// global_step. -/// -/// - Parameters: -/// - handle: The handle to a accumulator. -/// - local_step: The local_step value at which the sparse gradient was computed. -/// - gradient_indices: Indices of the sparse gradient to be accumulated. Must be a -/// vector. -/// - gradient_values: Values are the non-zero slices of the gradient, and must have -/// the same first dimension as indices, i.e., the nnz represented by indices and -/// values must be consistent. -/// - gradient_shape: Shape of the sparse gradient to be accumulated. -/// -/// - Attrs: -/// - dtype: The data type of accumulated gradients. Needs to correspond to the type -/// of the accumulator. -/// - has_known_shape: Boolean indicating whether gradient_shape is unknown, in which -/// case the input is ignored during validation. -@inlinable @inline(__always) -public static func sparseAccumulatorApplyGradient( - handle: StringTensor, - localStep: Tensor, - gradientIndices: Tensor, - gradientValues: Tensor, - gradientShape: Tensor, - hasKnownShape: Bool -) { - return #tfop("SparseAccumulatorApplyGradient", - handle, - localStep, - gradientIndices, - gradientValues, - gradientShape, - dtype$dtype: Dtype.tensorFlowDataType, - has_known_shape: hasKnownShape) -} - -/// Extracts the average sparse gradient in a SparseConditionalAccumulator. -/// -/// The op will blocks until sufficient (i.e., more than num_required) -/// gradients have been accumulated. If the accumulator has already -/// aggregated more than num_required gradients, it will return its -/// average of the accumulated gradients. Also automatically increments -/// the recorded global_step in the accumulator by 1, and resets the -/// aggregate to 0. -/// -/// - Parameters: -/// - handle: The handle to a SparseConditionalAccumulator. -/// - num_required: Number of gradients required before we return an aggregate. -/// -/// - Attr dtype: The data type of accumulated gradients. Needs to correspond to the type -/// of the accumulator. -/// -/// - Outputs: -/// - indices: Indices of the average of the accumulated sparse gradients. -/// - values: Values of the average of the accumulated sparse gradients. -/// - shape: Shape of the average of the accumulated sparse gradients. -@inlinable @inline(__always) -public static func sparseAccumulatorTakeGradient( - handle: StringTensor, - numRequired: Tensor -) -> (indices: Tensor, values: Tensor, shape: Tensor) { - let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("SparseAccumulatorTakeGradient", - handle, - numRequired, - dtype$dtype: Dtype.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) -} - -/// Adds two `SparseTensor` objects to produce another `SparseTensor`. -/// -/// The input `SparseTensor` objects' indices are assumed ordered in standard -/// lexicographic order. If this is not the case, before this step run -/// `SparseReorder` to restore index ordering. -/// -/// By default, if two values sum to zero at some index, the output `SparseTensor` -/// would still include that particular location in its index, storing a zero in the -/// corresponding value slot. To override this, callers can specify `thresh`, -/// indicating that if the sum has a magnitude strictly smaller than `thresh`, its -/// corresponding value and index would then not be included. In particular, -/// `thresh == 0` (default) means everything is kept and actual thresholding happens -/// only for a positive value. -/// -/// In the following shapes, `nnz` is the count after taking `thresh` into account. -/// -/// - Parameters: -/// - a_indices: 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix. -/// - a_values: 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. -/// - a_shape: 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. -/// - b_indices: 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix. -/// - b_values: 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. -/// - b_shape: 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. -/// - thresh: 0-D. The magnitude threshold that determines if an output value/index -/// pair takes space. -@inlinable @inline(__always) -public static func sparseAdd( - aIndices: Tensor, - aValues: Tensor, - aShape: Tensor, - bIndices: Tensor, - bValues: Tensor, - bShape: Tensor, - thresh: Tensor -) -> (sumIndices: Tensor, sumValues: Tensor, sumShape: Tensor) { - let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("SparseAdd", - aIndices, - aValues, - aShape, - bIndices, - bValues, - bShape, - thresh, - T$dtype: T.tensorFlowDataType, - Treal$dtype: Treal.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) -} - -/// The gradient operator for the SparseAdd op. -/// -/// The SparseAdd op calculates A + B, where A, B, and the sum are all represented -/// as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. -/// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty -/// values of A and B. -/// -/// - Parameters: -/// - backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to -/// the non-empty values of the sum. -/// - a_indices: 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. -/// - b_indices: 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. -/// - sum_indices: 2-D. The `indices` of the sum `SparseTensor`, size -/// `[nnz(sum), ndims]`. -/// -/// - Outputs: -/// - a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the -/// non-empty values of A. -/// - b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the -/// non-empty values of B. -@inlinable @inline(__always) -public static func sparseAddGrad( - backpropValGrad: Tensor, - aIndices: Tensor, - bIndices: Tensor, - sumIndices: Tensor -) -> (aValGrad: Tensor, bValGrad: Tensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("SparseAddGrad", - backpropValGrad, - aIndices, - bIndices, - sumIndices, - T$dtype: T.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) -} - -/// var: Should be from a Variable(). -/// -/// - Parameters: -/// - accum: Should be from a Variable(). -/// - accum_update: : Should be from a Variable(). -/// - lr: Learning rate. Must be a scalar. -/// - rho: Decay factor. Must be a scalar. -/// - epsilon: Constant factor. Must be a scalar. -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var and accum. -/// -/// - Attr use_locking: If True, updating of the var and accum tensors will be protected by -/// a lock; otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output out: Same as "var". -@inlinable @inline(__always) -public static func sparseApplyAdadelta( - var_: Tensor, - accum: Tensor, - accumUpdate: Tensor, - lr: Tensor, - rho: Tensor, - epsilon: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyAdadelta", - var_, - accum, - accumUpdate, - lr, - rho, - epsilon, - grad, - indices, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Update relevant entries in '*var' and '*accum' according to the adagrad scheme. -/// -/// That is for rows we have grad for, we update var and accum as follows: -/// $$accum += grad * grad$$ -/// $$var -= lr * grad * (1 / sqrt(accum))$$ -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - lr: Learning rate. Must be a scalar. -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var and accum. -/// -/// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// -/// - Output out: Same as "var". -@inlinable @inline(__always) -public static func sparseApplyAdagrad( - var_: Tensor, - accum: Tensor, - lr: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false, - updateSlots: Bool = true -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyAdagrad", - var_, - accum, - lr, - grad, - indices, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking, - update_slots: updateSlots) - return Tensor(handle: ret) -} - -/// Update entries in '*var' and '*accum' according to the proximal adagrad scheme. -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - gradient_accumulator: Should be from a Variable(). -/// - gradient_squared_accumulator: Should be from a Variable(). -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var and accum. -/// - lr: Learning rate. Must be a scalar. -/// - l1: L1 regularization. Must be a scalar. -/// - l2: L2 regularization. Must be a scalar. -/// - global_step: Training step number. Must be a scalar. -/// -/// - Attr use_locking: If True, updating of the var and accum tensors will be protected by -/// a lock; otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output out: Same as "var". -@inlinable @inline(__always) -public static func sparseApplyAdagradDA( - var_: Tensor, - gradientAccumulator: Tensor, - gradientSquaredAccumulator: Tensor, - grad: Tensor, - indices: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - globalStep: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyAdagradDA", - var_, - gradientAccumulator, - gradientSquaredAccumulator, - grad, - indices, - lr, - l1, - l2, - globalStep, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Update '*var' according to the centered RMSProp algorithm. -/// -/// The centered RMSProp algorithm uses an estimate of the centered second moment -/// (i.e., the variance) for normalization, as opposed to regular RMSProp, which -/// uses the (uncentered) second moment. This often helps with training, but is -/// slightly more expensive in terms of computation and memory. -/// -/// Note that in dense implementation of this algorithm, mg, ms, and mom will -/// update even if the grad is zero, but in this sparse implementation, mg, ms, -/// and mom will not update in iterations during which the grad is zero. -/// -/// mean_square = decay * mean_square + (1-decay) * gradient ** 2 -/// mean_grad = decay * mean_grad + (1-decay) * gradient -/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) -/// -/// $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ -/// $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ -/// $$var <- var - mom$$ -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - mg: Should be from a Variable(). -/// - ms: Should be from a Variable(). -/// - mom: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - rho: Decay rate. Must be a scalar. -/// - epsilon: Ridge term. Must be a scalar. -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var, ms and mom. -/// -/// - Attr use_locking: If `True`, updating of the var, mg, ms, and mom tensors is -/// protected by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// -/// - Output out: Same as "var". -@inlinable @inline(__always) -public static func sparseApplyCenteredRMSProp( - var_: Tensor, - mg: Tensor, - ms: Tensor, - mom: Tensor, - lr: Tensor, - rho: Tensor, - momentum: Tensor, - epsilon: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyCenteredRMSProp", - var_, - mg, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - indices, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Update relevant entries in '*var' according to the Ftrl-proximal scheme. -/// -/// That is for rows we have grad for, we update var, accum and linear as follows: -/// $$accum_new = accum + grad * grad$$ -/// $$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$ -/// $$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$ -/// $$var = (sign(linear) * l1 - linear) / quadratic\ if\ |linear| > l1\ else\ 0.0$$ -/// $$accum = accum_{new}$$ -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - linear: Should be from a Variable(). -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var and accum. -/// - lr: Scaling factor. Must be a scalar. -/// - l1: L1 regularization. Must be a scalar. -/// - l2: L2 regularization. Must be a scalar. -/// - lr_power: Scaling factor. Must be a scalar. -/// -/// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// -/// - Output out: Same as "var". -@inlinable @inline(__always) -public static func sparseApplyFtrl( - var_: Tensor, - accum: Tensor, - linear: Tensor, - grad: Tensor, - indices: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - lrPower: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyFtrl", - var_, - accum, - linear, - grad, - indices, - lr, - l1, - l2, - lrPower, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Update relevant entries in '*var' according to the Ftrl-proximal scheme. -/// -/// That is for rows we have grad for, we update var, accum and linear as follows: -/// grad_with_shrinkage = grad + 2 * l2_shrinkage * var -/// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage -/// linear += grad_with_shrinkage + -/// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var -/// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 -/// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 -/// accum = accum_new -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - linear: Should be from a Variable(). -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var and accum. -/// - lr: Scaling factor. Must be a scalar. -/// - l1: L1 regularization. Must be a scalar. -/// - l2: L2 shrinkage regulariation. Must be a scalar. -/// - lr_power: Scaling factor. Must be a scalar. -/// -/// - Attr use_locking: If `True`, updating of the var and accum tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// -/// - Output out: Same as "var". -@inlinable @inline(__always) -public static func sparseApplyFtrlV2( - var_: Tensor, - accum: Tensor, - linear: Tensor, - grad: Tensor, - indices: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - l2Shrinkage: Tensor, - lrPower: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyFtrlV2", - var_, - accum, - linear, - grad, - indices, - lr, - l1, - l2, - l2Shrinkage, - lrPower, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) -} - -/// Update relevant entries in '*var' and '*accum' according to the momentum scheme. -/// -/// Set use_nesterov = True if you want to use Nesterov momentum. -/// -/// That is for rows we have grad for, we update var and accum as follows: -/// -/// $$accum = accum * momentum + grad$$ -/// $$var -= lr * accum$$ -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - lr: Learning rate. Must be a scalar. -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var and accum. -/// - momentum: Momentum. Must be a scalar. -/// -/// - Attrs: -/// - use_locking: If `True`, updating of the var and accum tensors will be protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. -/// - use_nesterov: If `True`, the tensor passed to compute grad will be -/// var - lr * momentum * accum, so in the end, the var you get is actually -/// var - lr * momentum * accum. -/// -/// - Output out: Same as "var". -@inlinable @inline(__always) -public static func sparseApplyMomentum( - var_: Tensor, - accum: Tensor, - lr: Tensor, - grad: Tensor, - indices: Tensor, - momentum: Tensor, - useLocking: Bool = false, - useNesterov: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyMomentum", - var_, - accum, - lr, - grad, - indices, - momentum, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking, - use_nesterov: useNesterov) - return Tensor(handle: ret) -} - -/// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. -/// -/// That is for rows we have grad for, we update var and accum as follows: -/// $$accum += grad * grad$$ -/// $$prox_v = var$$ -/// $$prox_v -= lr * grad * (1 / sqrt(accum))$$ -/// $$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$ -/// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - accum: Should be from a Variable(). -/// - lr: Learning rate. Must be a scalar. -/// - l1: L1 regularization. Must be a scalar. -/// - l2: L2 regularization. Must be a scalar. -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var and accum. -/// -/// - Attr use_locking: If True, updating of the var and accum tensors will be protected by -/// a lock; otherwise the behavior is undefined, but may exhibit less contention. -/// -/// - Output out: Same as "var". -@inlinable @inline(__always) -public static func sparseApplyProximalAdagrad( - var_: Tensor, - accum: Tensor, - lr: Tensor, - l1: Tensor, - l2: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyProximalAdagrad", - var_, - accum, - lr, - l1, - l2, - grad, - indices, +@inlinable @inline(__always) +public static func spaceToDepth( + _ input: Tensor, + blockSize: Int64, + dataFormat: DataFormat4 = .b'nhwc' +) -> Tensor { + let ret: TensorHandle = #tfop("SpaceToDepth", + input, T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) + block_size: blockSize, + data_format: dataFormat.cName) return Tensor(handle: ret) } -/// Sparse update '*var' as FOBOS algorithm with fixed learning rate. +/// Adds two `SparseTensor` objects to produce another `SparseTensor`. /// -/// That is for rows we have grad for, we update var as follows: -/// $$prox_v = var - alpha * grad$$ -/// $$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$ +/// The input `SparseTensor` objects' indices are assumed ordered in standard +/// lexicographic order. If this is not the case, before this step run +/// `SparseReorder` to restore index ordering. /// -/// - Parameters: -/// - var: Should be from a Variable(). -/// - alpha: Scaling factor. Must be a scalar. -/// - l1: L1 regularization. Must be a scalar. -/// - l2: L2 regularization. Must be a scalar. -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var and accum. +/// By default, if two values sum to zero at some index, the output `SparseTensor` +/// would still include that particular location in its index, storing a zero in the +/// corresponding value slot. To override this, callers can specify `thresh`, +/// indicating that if the sum has a magnitude strictly smaller than `thresh`, its +/// corresponding value and index would then not be included. In particular, +/// `thresh == 0` (default) means everything is kept and actual thresholding happens +/// only for a positive value. /// -/// - Attr use_locking: If True, the subtraction will be protected by a lock; -/// otherwise the behavior is undefined, but may exhibit less contention. +/// In the following shapes, `nnz` is the count after taking `thresh` into account. /// -/// - Output out: Same as "var". +/// - Parameters: +/// - a_indices: 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix. +/// - a_values: 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. +/// - a_shape: 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. +/// - b_indices: 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix. +/// - b_values: 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. +/// - b_shape: 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. +/// - thresh: 0-D. The magnitude threshold that determines if an output value/index +/// pair takes space. @inlinable @inline(__always) -public static func sparseApplyProximalGradientDescent( - var_: Tensor, - alpha: Tensor, - l1: Tensor, - l2: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyProximalGradientDescent", - var_, - alpha, - l1, - l2, - grad, - indices, +public static func sparseAdd( + aIndices: Tensor, + aValues: Tensor, + aShape: Tensor, + bIndices: Tensor, + bValues: Tensor, + bShape: Tensor, + thresh: Tensor +) -> (sumIndices: Tensor, sumValues: Tensor, sumShape: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("SparseAdd", + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape, + thresh, T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) + Treal$dtype: Treal.tensorFlowDataType) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) } -/// Update '*var' according to the RMSProp algorithm. -/// -/// Note that in dense implementation of this algorithm, ms and mom will -/// update even if the grad is zero, but in this sparse implementation, ms -/// and mom will not update in iterations during which the grad is zero. -/// -/// mean_square = decay * mean_square + (1-decay) * gradient ** 2 -/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon) +/// The gradient operator for the SparseAdd op. /// -/// $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ -/// $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ -/// $$var <- var - mom$$ +/// The SparseAdd op calculates A + B, where A, B, and the sum are all represented +/// as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. +/// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty +/// values of A and B. /// /// - Parameters: -/// - var: Should be from a Variable(). -/// - ms: Should be from a Variable(). -/// - mom: Should be from a Variable(). -/// - lr: Scaling factor. Must be a scalar. -/// - rho: Decay rate. Must be a scalar. -/// - epsilon: Ridge term. Must be a scalar. -/// - grad: The gradient. -/// - indices: A vector of indices into the first dimension of var, ms and mom. -/// -/// - Attr use_locking: If `True`, updating of the var, ms, and mom tensors is protected -/// by a lock; otherwise the behavior is undefined, but may exhibit less -/// contention. +/// - backprop_val_grad: 1-D with shape `[nnz(sum)]`. The gradient with respect to +/// the non-empty values of the sum. +/// - a_indices: 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. +/// - b_indices: 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. +/// - sum_indices: 2-D. The `indices` of the sum `SparseTensor`, size +/// `[nnz(sum), ndims]`. /// -/// - Output out: Same as "var". +/// - Outputs: +/// - a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the +/// non-empty values of A. +/// - b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the +/// non-empty values of B. @inlinable @inline(__always) -public static func sparseApplyRMSProp( - var_: Tensor, - ms: Tensor, - mom: Tensor, - lr: Tensor, - rho: Tensor, - momentum: Tensor, - epsilon: Tensor, - grad: Tensor, - indices: Tensor, - useLocking: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("SparseApplyRMSProp", - var_, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - indices, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - use_locking: useLocking) - return Tensor(handle: ret) +public static func sparseAddGrad( + backpropValGrad: Tensor, + aIndices: Tensor, + bIndices: Tensor, + sumIndices: Tensor +) -> (aValGrad: Tensor, bValGrad: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("SparseAddGrad", + backpropValGrad, + aIndices, + bIndices, + sumIndices, + T$dtype: T.tensorFlowDataType) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) } /// Concatenates a list of `SparseTensor` along the specified dimension. @@ -20494,53 +20413,6 @@ public static func squeeze( return Tensor(handle: ret) } -/// Deprecated, use StackV2. -@inlinable @inline(__always) -public static func stack( - stackName: String, - typeElemType: ElemType.Type -) -> StringTensor { - let ret: TensorHandle = #tfop("Stack", - elem_type$dtype: ElemType.tensorFlowDataType, - stack_name: stackName) - return StringTensor(handle: ret) -} - -/// Deprecated, use StackCloseV2. -@inlinable @inline(__always) -public static func stackClose( - handle: StringTensor -) { - return #tfop("StackClose", - handle) -} - -/// Deprecated, use StackPopV2. -@inlinable @inline(__always) -public static func stackPop( - handle: StringTensor -) -> Tensor { - let ret: TensorHandle = #tfop("StackPop", - handle, - elem_type$dtype: ElemType.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Deprecated, use StackPushV2. -@inlinable @inline(__always) -public static func stackPush( - handle: StringTensor, - elem: Tensor, - swapMemory: Bool = false -) -> Tensor { - let ret: TensorHandle = #tfop("StackPush", - handle, - elem, - T$dtype: T.tensorFlowDataType, - swap_memory: swapMemory) - return Tensor(handle: ret) -} - /// Stage values similar to a lightweight Enqueue. /// /// The basic functionality of this Op is similar to a queue with many @@ -20687,6 +20559,39 @@ public static func statelessRandomUniform( + shape: Tensor, + seed: Tensor, + minval: Tensor, + maxval: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("StatelessRandomUniformInt", + shape, + seed, + minval, + maxval, + dtype$dtype: Dtype.tensorFlowDataType, + T$dtype: T.tensorFlowDataType, + Tseed$dtype: Tseed.tensorFlowDataType) + return Tensor(handle: ret) +} + /// Outputs deterministic pseudorandom values from a truncated normal distribution. /// /// The generated values follow a normal distribution with mean 0 and standard @@ -20954,43 +20859,6 @@ public static func stridedSlice( - ref: Tensor, - begin: Tensor, - end: Tensor, - strides: Tensor, - value: Tensor, - beginMask: Int64 = 0, - endMask: Int64 = 0, - ellipsisMask: Int64 = 0, - newAxisMask: Int64 = 0, - shrinkAxisMask: Int64 = 0 -) -> Tensor { - let ret: TensorHandle = #tfop("StridedSliceAssign", - ref, - begin, - end, - strides, - value, - T$dtype: T.tensorFlowDataType, - Index$dtype: Index.tensorFlowDataType, - begin_mask: beginMask, - end_mask: endMask, - ellipsis_mask: ellipsisMask, - new_axis_mask: newAxisMask, - shrink_axis_mask: shrinkAxisMask) - return Tensor(handle: ret) -} - /// Returns the gradient of `StridedSlice`. /// /// Since `StridedSlice` cuts out pieces of its `input` which is size @@ -21045,8 +20913,8 @@ public static func stridedSliceGrad( inputs: [Tensor], - template: String = "%s", - placeholder: String = "%s", + template: String = "b'%s'", + placeholder: String = "b'%s'", summarize: Int64 = 3 ) -> StringTensor { let ret: TensorHandle = #tfop("StringFormat", @@ -21094,7 +20962,7 @@ public static func stringJoin( @inlinable @inline(__always) public static func stringLength( _ input: StringTensor, - unit: Unit = .byte + unit: Unit = .b'byte' ) -> Tensor { let ret: TensorHandle = #tfop("StringLength", input, @@ -21335,24 +21203,109 @@ public static func sub( return Tensor(handle: ret) } +/// Return substrings from `Tensor` of strings. +/// +/// For each string in the input `Tensor`, creates a substring starting at index +/// `pos` with a total length of `len`. +/// +/// If `len` defines a substring that would extend beyond the length of the input +/// string, then as many characters as possible are used. +/// +/// A negative `pos` indicates distance within the string backwards from the end. +/// +/// If `pos` specifies an index which is out of range for any of the input strings, +/// then an `InvalidArgumentError` is thrown. +/// +/// `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on +/// Op creation. +/// +/// *NOTE*: `Substr` supports broadcasting up to two dimensions. More about +/// broadcasting +/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) +/// +/// --- +/// +/// Examples +/// +/// Using scalar `pos` and `len`: +/// +/// ```python +/// input = [b'Hello', b'World'] +/// position = 1 +/// length = 3 +/// +/// output = [b'ell', b'orl'] +/// ``` +/// +/// Using `pos` and `len` with same shape as `input`: +/// +/// ```python +/// input = [[b'ten', b'eleven', b'twelve'], +/// [b'thirteen', b'fourteen', b'fifteen'], +/// [b'sixteen', b'seventeen', b'eighteen']] +/// position = [[1, 2, 3], +/// [1, 2, 3], +/// [1, 2, 3]] +/// length = [[2, 3, 4], +/// [4, 3, 2], +/// [5, 5, 5]] +/// +/// output = [[b'en', b'eve', b'lve'], +/// [b'hirt', b'urt', b'te'], +/// [b'ixtee', b'vente', b'hteen']] +/// ``` +/// +/// Broadcasting `pos` and `len` onto `input`: +/// +/// ``` +/// input = [[b'ten', b'eleven', b'twelve'], +/// [b'thirteen', b'fourteen', b'fifteen'], +/// [b'sixteen', b'seventeen', b'eighteen'], +/// [b'nineteen', b'twenty', b'twentyone']] +/// position = [1, 2, 3] +/// length = [1, 2, 3] +/// +/// output = [[b'e', b'ev', b'lve'], +/// [b'h', b'ur', b'tee'], +/// [b'i', b've', b'hte'], +/// [b'i', b'en', b'nty']] +/// ``` +/// +/// Broadcasting `input` onto `pos` and `len`: +/// +/// ``` +/// input = b'thirteen' +/// position = [1, 5, 7] +/// length = [3, 2, 1] +/// +/// output = [b'hir', b'ee', b'n'] +/// ``` /// /// - Parameters: /// - input: Tensor of strings /// - pos: Scalar defining the position of first character in each substring /// - len: Scalar defining the number of characters to include in each substring /// +/// - Attr unit: The unit that is used to create the substring. One of: `"BYTE"` (for +/// defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 +/// encoded Unicode code points). The default is `"BYTE"`. Results are undefined if +/// `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid +/// UTF-8. +/// /// - Output output: Tensor of substrings @inlinable @inline(__always) public static func substr( _ input: StringTensor, pos: Tensor, - len: Tensor + len: Tensor, + unit: Unit = .b'byte' ) -> StringTensor { let ret: TensorHandle = #tfop("Substr", input, pos, len, - T$dtype: T.tensorFlowDataType) + T$dtype: T.tensorFlowDataType, + unit: unit.cName) return StringTensor(handle: ret) } @@ -21459,26 +21412,102 @@ public static func switch_( return (Tensor(handle: ret.0), Tensor(handle: ret.1)) } -/// A Reader that outputs the records from a TensorFlow Records file. +/// CompilationResultProto indicating the status of the TPU compilation. +@inlinable @inline(__always) +public static func tPUCompilationResult( +) -> StringTensor { + let ret: TensorHandle = #tfop("TPUCompilationResult") + return StringTensor(handle: ret) +} + +/// An op enabling differentiation of TPU Embeddings. +/// +/// This op simply returns its first input, which is assumed to have been sliced +/// from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of +/// this op, and its first argument being a trainable Variable, enables automatic +/// differentiation of graphs containing embeddings via the TPU Embedding Python +/// libraries. +/// +/// - Parameters: +/// - embedding_variable: A trainable variable, enabling optimizers to find this op. +/// - sliced_activations: The embedding activations Tensor to return. /// /// - Attrs: -/// - container: If non-empty, this reader is placed in the given container. -/// Otherwise, a default container is used. -/// - shared_name: If non-empty, this reader is named in the given bucket -/// with this shared_name. Otherwise, the node name is used instead. +/// - table_id: The id of the table in the embedding layer configuration from which +/// these activations were computed. +/// - lookup_id: Identifier of the set of embedding indices which produced these +/// activations. +@inlinable @inline(__always) +public static func tPUEmbeddingActivations( + embeddingVariable: Tensor, + slicedActivations: Tensor, + tableId: Int64, + lookupId: Int64 +) -> Tensor { + let ret: TensorHandle = #tfop("TPUEmbeddingActivations", + embeddingVariable, + slicedActivations, + table_id: tableId, + lookup_id: lookupId) + return Tensor(handle: ret) +} + +/// A TPU core selector Op. /// -/// - Output reader_handle: The handle to reference the Reader. +/// This Op produces a set of TPU cores (for warm-up) or a single TPU core +/// (for regular inference) to execute the TPU program on. The output is +/// consumed by TPUPartitionedCall. +/// +/// - Output device_ordinals: A vector 1 or more TPU cores. @inlinable @inline(__always) -public static func tFRecordReader( - container: String, - sharedName: String, - compressionType: String -) -> StringTensor { - let ret: TensorHandle = #tfop("TFRecordReader", - container: container, - shared_name: sharedName, - compression_type: compressionType) - return StringTensor(handle: ret) +public static func tPUOrdinalSelector( +) -> Tensor { + let ret: TensorHandle = #tfop("TPUOrdinalSelector") + return Tensor(handle: ret) +} + +/// Metadata indicaitng how the TPU computation should be replicated. +/// +/// - Attrs: +/// - num_replicas: Number of replicas of the computation +/// - num_cores_per_replica: Number of cores per replica. Used for model parallelism. +/// - topology: TopologyProto indicating the topology of the TPU pod slice. +/// - use_tpu: Whether to place the computation on the TPU. +/// - device_assignment: The assignment of devices for the computation. +/// - computation_shape: DEPRECATED. Use num_cores_per_replica instead. +@inlinable @inline(__always) +public static func tPUReplicateMetadata( + numReplicas: Int64, + numCoresPerReplica: Int64 = 1, + topology: String, + useTpu: Bool = true, + deviceAssignment: [Int32], + computationShape: [Int32], + hostComputeCore: [String], + paddingMap: [String], + stepMarkerLocation: String = "b'STEP_MARK_AT_ENTRY'" +) { + return #tfop("TPUReplicateMetadata", + num_replicas: numReplicas, + num_cores_per_replica: numCoresPerReplica, + topology: topology, + use_tpu: useTpu, + device_assignment: deviceAssignment, + computation_shape: computationShape, + host_compute_core: hostComputeCore, + padding_map: paddingMap, + step_marker_location: stepMarkerLocation) +} + +/// Connects N inputs to an N-way replicated TPU computation. +@inlinable @inline(__always) +public static func tPUReplicatedInput( + inputs: [Tensor] +) -> Tensor { + let ret: TensorHandle = #tfop("TPUReplicatedInput", + inputs, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) } /// Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. @@ -21594,17 +21623,9 @@ public static func tanhGrad( ) -> Tensor { let ret: TensorHandle = #tfop("TanhGrad", y, - dy, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -@inlinable @inline(__always) -public static func tensorArrayClose( - handle: StringTensor -) { - return #tfop("TensorArrayClose", - handle) + dy, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) } /// Deprecated. Use TensorArrayCloseV3 @@ -21616,19 +21637,6 @@ public static func tensorArrayCloseV2( handle) } -@inlinable @inline(__always) -public static func tensorArrayGrad( - handle: StringTensor, - flowIn: Tensor, - source: String -) -> StringTensor { - let ret: TensorHandle = #tfop("TensorArrayGrad", - handle, - flowIn, - source: source) - return StringTensor(handle: ret) -} - /// Deprecated. Use TensorArrayGradV3 @inlinable @inline(__always) public static func tensorArrayGradV2( @@ -21643,20 +21651,6 @@ public static func tensorArrayGradV2( return StringTensor(handle: ret) } -@inlinable @inline(__always) -public static func tensorArrayRead( - handle: StringTensor, - index: Tensor, - flowIn: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("TensorArrayRead", - handle, - index, - flowIn, - dtype$dtype: Dtype.tensorFlowDataType) - return Tensor(handle: ret) -} - /// Deprecated. Use TensorArrayReadV3 @inlinable @inline(__always) public static func tensorArrayReadV2( @@ -21672,22 +21666,6 @@ public static func tensorArrayReadV2( return Tensor(handle: ret) } -@inlinable @inline(__always) -public static func tensorArrayScatter( - handle: StringTensor, - indices: Tensor, - value: Tensor, - flowIn: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("TensorArrayScatter", - handle, - indices, - value, - flowIn, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - /// Deprecated. Use TensorArrayScatterV3 @inlinable @inline(__always) public static func tensorArrayScatterV2( @@ -21705,17 +21683,6 @@ public static func tensorArrayScatterV2( return Tensor(handle: ret) } -@inlinable @inline(__always) -public static func tensorArraySize( - handle: StringTensor, - flowIn: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("TensorArraySize", - handle, - flowIn) - return Tensor(handle: ret) -} - /// Deprecated. Use TensorArraySizeV3 @inlinable @inline(__always) public static func tensorArraySizeV2( @@ -21728,14 +21695,15 @@ public static func tensorArraySizeV2( return Tensor(handle: ret) } +/// Deprecated. Use TensorArraySplitV3 @inlinable @inline(__always) -public static func tensorArraySplit( +public static func tensorArraySplitV2( handle: StringTensor, value: Tensor, lengths: Tensor, flowIn: Tensor ) -> Tensor { - let ret: TensorHandle = #tfop("TensorArraySplit", + let ret: TensorHandle = #tfop("TensorArraySplitV2", handle, value, lengths, @@ -21744,67 +21712,296 @@ public static func tensorArraySplit( return Tensor(handle: ret) } -/// Deprecated. Use TensorArraySplitV3 +/// Deprecated. Use TensorArrayGradV3 @inlinable @inline(__always) -public static func tensorArraySplitV2( +public static func tensorArrayWriteV2( handle: StringTensor, + index: Tensor, value: Tensor, - lengths: Tensor, flowIn: Tensor ) -> Tensor { - let ret: TensorHandle = #tfop("TensorArraySplitV2", + let ret: TensorHandle = #tfop("TensorArrayWriteV2", handle, + index, value, - lengths, flowIn, T$dtype: T.tensorFlowDataType) return Tensor(handle: ret) } +/// Adds sparse `updates` to an existing tensor according to `indices`. +/// +/// This operation creates a new tensor by adding sparse `updates` to the passed +/// in `tensor`. +/// This operation is very similar to `tf.scatter_nd_add`, except that the updates +/// are added onto an existing tensor (as opposed to a variable). If the memory +/// for the existing tensor cannot be re-used, a copy is made and updated. +/// +/// `indices` is an integer tensor containing indices into a new tensor of shape +/// `shape`. The last dimension of `indices` can be at most the rank of `shape`: +/// +/// indices.shape[-1] <= shape.rank +/// +/// The last dimension of `indices` corresponds to indices into elements +/// (if `indices.shape[-1] = shape.rank`) or slices +/// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of +/// `shape`. `updates` is a tensor with shape +/// +/// indices.shape[:-1] + shape[indices.shape[-1]:] +/// +/// The simplest form of tensor_scatter_add is to add individual elements to a +/// tensor by index. For example, say we want to add 4 elements in a rank-1 +/// tensor with 8 elements. +/// +/// In Python, this scatter add operation would look like this: +/// +/// ```python +/// indices = tf.constant([[4], [3], [1], [7]]) +/// updates = tf.constant([9, 10, 11, 12]) +/// tensor = tf.ones([8], dtype=tf.int32) +/// updated = tf.tensor_scatter_add(tensor, indices, updates) +/// with tf.Session() as sess: +/// print(sess.run(scatter)) +/// ``` +/// +/// The resulting tensor would look like this: +/// +/// [1, 12, 1, 11, 10, 1, 1, 13] +/// +/// We can also, insert entire slices of a higher rank tensor all at once. For +/// example, if we wanted to insert two slices in the first dimension of a +/// rank-3 tensor with two matrices of new values. +/// +/// In Python, this scatter add operation would look like this: +/// +/// ```python +/// indices = tf.constant([[0], [2]]) +/// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], +/// [7, 7, 7, 7], [8, 8, 8, 8]], +/// [[5, 5, 5, 5], [6, 6, 6, 6], +/// [7, 7, 7, 7], [8, 8, 8, 8]]]) +/// tensor = tf.ones([4, 4, 4]) +/// updated = tf.tensor_scatter_add(tensor, indices, updates) +/// with tf.Session() as sess: +/// print(sess.run(scatter)) +/// ``` +/// +/// The resulting tensor would look like this: +/// +/// [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], +/// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], +/// [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], +/// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] +/// +/// Note that on CPU, if an out of bound index is found, an error is returned. +/// On GPU, if an out of bound index is found, the index is ignored. +/// +/// - Parameters: +/// - tensor: Tensor to copy/update. +/// - indices: Index tensor. +/// - updates: Updates to scatter into output. +/// +/// - Output output: A new tensor copied from tensor and updates added according to the indices. @inlinable @inline(__always) -public static func tensorArrayUnpack( - handle: StringTensor, - value: Tensor, - flowIn: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("TensorArrayUnpack", - handle, - value, - flowIn, - T$dtype: T.tensorFlowDataType) +public static func tensorScatterAdd( + _ tensor: Tensor, + indices: Tensor, + updates: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("TensorScatterAdd", + tensor, + indices, + updates, + T$dtype: T.tensorFlowDataType, + Tindices$dtype: Tindices.tensorFlowDataType) return Tensor(handle: ret) } +/// Subtracts sparse `updates` from an existing tensor according to `indices`. +/// +/// This operation creates a new tensor by subtracting sparse `updates` from the +/// passed in `tensor`. +/// This operation is very similar to `tf.scatter_nd_sub`, except that the updates +/// are subtracted from an existing tensor (as opposed to a variable). If the memory +/// for the existing tensor cannot be re-used, a copy is made and updated. +/// +/// `indices` is an integer tensor containing indices into a new tensor of shape +/// `shape`. The last dimension of `indices` can be at most the rank of `shape`: +/// +/// indices.shape[-1] <= shape.rank +/// +/// The last dimension of `indices` corresponds to indices into elements +/// (if `indices.shape[-1] = shape.rank`) or slices +/// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of +/// `shape`. `updates` is a tensor with shape +/// +/// indices.shape[:-1] + shape[indices.shape[-1]:] +/// +/// The simplest form of tensor_scatter_sub is to subtract individual elements +/// from a tensor by index. For example, say we want to insert 4 scattered elements +/// in a rank-1 tensor with 8 elements. +/// +/// In Python, this scatter subtract operation would look like this: +/// +/// ```python +/// indices = tf.constant([[4], [3], [1], [7]]) +/// updates = tf.constant([9, 10, 11, 12]) +/// tensor = tf.ones([8], dtype=tf.int32) +/// updated = tf.tensor_scatter_sub(tensor, indices, updates) +/// with tf.Session() as sess: +/// print(sess.run(scatter)) +/// ``` +/// +/// The resulting tensor would look like this: +/// +/// [1, -10, 1, -9, -8, 1, 1, -11] +/// +/// We can also, insert entire slices of a higher rank tensor all at once. For +/// example, if we wanted to insert two slices in the first dimension of a +/// rank-3 tensor with two matrices of new values. +/// +/// In Python, this scatter add operation would look like this: +/// +/// ```python +/// indices = tf.constant([[0], [2]]) +/// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], +/// [7, 7, 7, 7], [8, 8, 8, 8]], +/// [[5, 5, 5, 5], [6, 6, 6, 6], +/// [7, 7, 7, 7], [8, 8, 8, 8]]]) +/// tensor = tf.ones([4, 4, 4]) +/// updated = tf.tensor_scatter_sub(tensor, indices, updates) +/// with tf.Session() as sess: +/// print(sess.run(scatter)) +/// ``` +/// +/// The resulting tensor would look like this: +/// +/// [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], +/// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], +/// [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], +/// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] +/// +/// Note that on CPU, if an out of bound index is found, an error is returned. +/// On GPU, if an out of bound index is found, the index is ignored. +/// +/// - Parameters: +/// - tensor: Tensor to copy/update. +/// - indices: Index tensor. +/// - updates: Updates to scatter into output. +/// +/// - Output output: A new tensor copied from tensor and updates subtracted according to the indices. @inlinable @inline(__always) -public static func tensorArrayWrite( - handle: StringTensor, - index: Tensor, - value: Tensor, - flowIn: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("TensorArrayWrite", - handle, - index, - value, - flowIn, - T$dtype: T.tensorFlowDataType) +public static func tensorScatterSub( + _ tensor: Tensor, + indices: Tensor, + updates: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("TensorScatterSub", + tensor, + indices, + updates, + T$dtype: T.tensorFlowDataType, + Tindices$dtype: Tindices.tensorFlowDataType) return Tensor(handle: ret) } -/// Deprecated. Use TensorArrayGradV3 +/// Scatter `updates` into an existing tensor according to `indices`. +/// +/// This operation creates a new tensor by applying sparse `updates` to the passed +/// in `tensor`. +/// This operation is very similar to `tf.scatter_nd`, except that the updates are +/// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory +/// for the existing tensor cannot be re-used, a copy is made and updated. +/// +/// If `indices` contains duplicates, then their updates are accumulated (summed). +/// +/// **WARNING**: The order in which updates are applied is nondeterministic, so the +/// output will be nondeterministic if `indices` contains duplicates -- because +/// of some numerical approximation issues, numbers summed in different order +/// may yield different results. +/// +/// `indices` is an integer tensor containing indices into a new tensor of shape +/// `shape`. The last dimension of `indices` can be at most the rank of `shape`: +/// +/// indices.shape[-1] <= shape.rank +/// +/// The last dimension of `indices` corresponds to indices into elements +/// (if `indices.shape[-1] = shape.rank`) or slices +/// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of +/// `shape`. `updates` is a tensor with shape +/// +/// indices.shape[:-1] + shape[indices.shape[-1]:] +/// +/// The simplest form of scatter is to insert individual elements in a tensor by +/// index. For example, say we want to insert 4 scattered elements in a rank-1 +/// tensor with 8 elements. +/// +///
+/// +///
+/// +/// In Python, this scatter operation would look like this: +/// +/// ```python +/// indices = tf.constant([[4], [3], [1], [7]]) +/// updates = tf.constant([9, 10, 11, 12]) +/// tensor = tf.ones([8], dtype=tf.int32) +/// updated = tf.tensor_scatter_update(tensor, indices, updates) +/// with tf.Session() as sess: +/// print(sess.run(scatter)) +/// ``` +/// +/// The resulting tensor would look like this: +/// +/// [1, 11, 1, 10, 9, 1, 1, 12] +/// +/// We can also, insert entire slices of a higher rank tensor all at once. For +/// example, if we wanted to insert two slices in the first dimension of a +/// rank-3 tensor with two matrices of new values. +/// +/// In Python, this scatter operation would look like this: +/// +/// ```python +/// indices = tf.constant([[0], [2]]) +/// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], +/// [7, 7, 7, 7], [8, 8, 8, 8]], +/// [[5, 5, 5, 5], [6, 6, 6, 6], +/// [7, 7, 7, 7], [8, 8, 8, 8]]]) +/// tensor = tf.ones([4, 4, 4]) +/// updated = tf.tensor_scatter_update(tensor, indices, updates) +/// with tf.Session() as sess: +/// print(sess.run(scatter)) +/// ``` +/// +/// The resulting tensor would look like this: +/// +/// [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], +/// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], +/// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], +/// [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] +/// +/// Note that on CPU, if an out of bound index is found, an error is returned. +/// On GPU, if an out of bound index is found, the index is ignored. +/// +/// - Parameters: +/// - tensor: Tensor to copy/update. +/// - indices: Index tensor. +/// - updates: Updates to scatter into output. +/// +/// - Output output: A new tensor with the given shape and updates applied according +/// to the indices. @inlinable @inline(__always) -public static func tensorArrayWriteV2( - handle: StringTensor, - index: Tensor, - value: Tensor, - flowIn: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("TensorArrayWriteV2", - handle, - index, - value, - flowIn, - T$dtype: T.tensorFlowDataType) +public static func tensorScatterUpdate( + _ tensor: Tensor, + indices: Tensor, + updates: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("TensorScatterUpdate", + tensor, + indices, + updates, + T$dtype: T.tensorFlowDataType, + Tindices$dtype: Tindices.tensorFlowDataType) return Tensor(handle: ret) } @@ -21874,29 +22071,6 @@ public static func testStringOutput( return (Tensor(handle: ret.0), StringTensor(handle: ret.1)) } -/// A Reader that outputs the lines of a file delimited by '\n'. -/// -/// - Attrs: -/// - skip_header_lines: Number of lines to skip from the beginning of every file. -/// - container: If non-empty, this reader is placed in the given container. -/// Otherwise, a default container is used. -/// - shared_name: If non-empty, this reader is named in the given bucket -/// with this shared_name. Otherwise, the node name is used instead. -/// -/// - Output reader_handle: The handle to reference the Reader. -@inlinable @inline(__always) -public static func textLineReader( - skipHeaderLines: Int64 = 0, - container: String, - sharedName: String -) -> StringTensor { - let ret: TensorHandle = #tfop("TextLineReader", - skip_header_lines: skipHeaderLines, - container: container, - shared_name: sharedName) - return StringTensor(handle: ret) -} - /// Generates labels for candidate sampling with a learned unigram distribution. /// /// See explanations of candidate sampling and the data formats at @@ -22104,6 +22278,33 @@ public static func transpose( + diagonals: Tensor, + rhs: Tensor +) -> Tensor { + let ret: TensorHandle = #tfop("TridiagonalSolve", + diagonals, + rhs, + T$dtype: T.tensorFlowDataType) + return Tensor(handle: ret) +} + /// Returns x / y element-wise for integer types. /// /// Truncation designates that negative numbers will round fractional quantities @@ -22329,17 +22530,6 @@ public static func twoIntOutputs( return (Tensor(handle: ret.0), Tensor(handle: ret.1)) } -@inlinable @inline(__always) -public static func twoRefsIn( - _ a: Tensor, - _ b: Tensor -) { - return #tfop("TwoRefsIn", - a, - b, - T$dtype: T.tensorFlowDataType) -} - @inlinable @inline(__always) public static func typeList( _ a: [Tensor] @@ -22452,6 +22642,181 @@ public static func unbatchGrad( return Tensor(handle: ret) } +/// Decodes each string in `input` into a sequence of Unicode code points. +/// +/// The character codepoints for all strings are returned using a single vector +/// `char_values`, with strings expanded to characters in row-major order. +/// +/// The `row_splits` tensor indicates where the codepoints for +/// each input string begin and end within the `char_values` tensor. +/// In particular, the values for the `i`th +/// string (in row-major order) are stored in the slice +/// `[row_splits[i]:row_splits[i+1]]`. Thus: +/// +/// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th +/// character in the `i`th string (in row-major order). +/// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th +/// string (in row-major order). +/// +/// - Parameter input: The text to be decoded. Can have any shape. Note that the output is flattened +/// to a vector of char values. +/// +/// - Attrs: +/// - input_encoding: Text encoding of the input strings. This is any of the encodings supported +/// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. +/// - errors: Error handling policy when there is invalid formatting found in the input. +/// The value of 'strict' will cause the operation to produce a InvalidArgument +/// error on any invalid input formatting. A value of 'replace' (the default) will +/// cause the operation to replace any invalid formatting in the input with the +/// `replacement_char` codepoint. A value of 'ignore' will cause the operation to +/// skip any invalid formatting in the input and produce no corresponding output +/// character. +/// - replacement_char: The replacement character codepoint to be used in place of any invalid +/// formatting in the input when `errors='replace'`. Any valid unicode codepoint may +/// be used. The default value is the default unicode replacement character is +/// 0xFFFD or U+65533.) +/// - replace_control_characters: Whether to replace the C0 control characters (00-1F) with the +/// `replacement_char`. Default is false. +/// +/// - Outputs: +/// - row_splits: A 1D int32 tensor containing the row splits. +/// - char_values: A 1D int32 Tensor containing the decoded codepoints. +@inlinable @inline(__always) +public static func unicodeDecode( + _ input: StringTensor, + inputEncoding: String, + errors: Errors = .b'replace', + replacementChar: Int64 = 65533, + replaceControlCharacters: Bool = false +) -> (rowSplits: Tensor, charValues: Tensor) { + let ret: (TensorHandle, TensorHandle) = #tfop("UnicodeDecode", + input, + input_encoding: inputEncoding, + errors: errors.cName, + replacement_char: replacementChar, + replace_control_characters: replaceControlCharacters) + return (Tensor(handle: ret.0), Tensor(handle: ret.1)) +} + +/// Decodes each string in `input` into a sequence of Unicode code points. +/// +/// The character codepoints for all strings are returned using a single vector +/// `char_values`, with strings expanded to characters in row-major order. +/// Similarly, the character start byte offsets are returned using a single vector +/// `char_to_byte_starts`, with strings expanded in row-major order. +/// +/// The `row_splits` tensor indicates where the codepoints and start offsets for +/// each input string begin and end within the `char_values` and +/// `char_to_byte_starts` tensors. In particular, the values for the `i`th +/// string (in row-major order) are stored in the slice +/// `[row_splits[i]:row_splits[i+1]]`. Thus: +/// +/// * `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th +/// character in the `i`th string (in row-major order). +/// * `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th +/// character in the `i`th string (in row-major order). +/// * `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th +/// string (in row-major order). +/// +/// - Parameter input: The text to be decoded. Can have any shape. Note that the output is flattened +/// to a vector of char values. +/// +/// - Attrs: +/// - input_encoding: Text encoding of the input strings. This is any of the encodings supported +/// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. +/// - errors: Error handling policy when there is invalid formatting found in the input. +/// The value of 'strict' will cause the operation to produce a InvalidArgument +/// error on any invalid input formatting. A value of 'replace' (the default) will +/// cause the operation to replace any invalid formatting in the input with the +/// `replacement_char` codepoint. A value of 'ignore' will cause the operation to +/// skip any invalid formatting in the input and produce no corresponding output +/// character. +/// - replacement_char: The replacement character codepoint to be used in place of any invalid +/// formatting in the input when `errors='replace'`. Any valid unicode codepoint may +/// be used. The default value is the default unicode replacement character is +/// 0xFFFD or U+65533.) +/// - replace_control_characters: Whether to replace the C0 control characters (00-1F) with the +/// `replacement_char`. Default is false. +/// +/// - Outputs: +/// - row_splits: A 1D int32 tensor containing the row splits. +/// - char_values: A 1D int32 Tensor containing the decoded codepoints. +/// - char_to_byte_starts: A 1D int32 Tensor containing the byte index in the input string where each +/// character in `char_values` starts. +@inlinable @inline(__always) +public static func unicodeDecodeWithOffsets( + _ input: StringTensor, + inputEncoding: String, + errors: Errors = .b'replace', + replacementChar: Int64 = 65533, + replaceControlCharacters: Bool = false +) -> (rowSplits: Tensor, charValues: Tensor, charToByteStarts: Tensor) { + let ret: (TensorHandle, TensorHandle, TensorHandle) = #tfop("UnicodeDecodeWithOffsets", + input, + input_encoding: inputEncoding, + errors: errors.cName, + replacement_char: replacementChar, + replace_control_characters: replaceControlCharacters) + return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2)) +} + +/// Encode a tensor of ints into unicode strings. +/// +/// Returns a vector of strings, where `output[i]` is constructed by encoding the +/// Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]` +/// using `output_encoding`. +/// +/// --- +/// +/// Example: +/// +/// ``` +/// input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100] +/// input_splits = [0, 5, 10] +/// output_encoding = 'UTF-8' +/// +/// output = ['Hello', 'World'] +/// ``` +/// +/// - Parameters: +/// - input_values: A 1D tensor containing the unicode codepoints that should be encoded. +/// - input_splits: A 1D tensor specifying how the unicode codepoints should be split into strings. +/// In particular, `output[i]` is constructed by encoding the codepoints in the +/// slice `input_values[input_splits[i]:input_splits[i+1]]`. +/// +/// - Attrs: +/// - errors: Error handling policy when there is invalid formatting found in the input. +/// The value of 'strict' will cause the operation to produce a InvalidArgument +/// error on any invalid input formatting. A value of 'replace' (the default) will +/// cause the operation to replace any invalid formatting in the input with the +/// `replacement_char` codepoint. A value of 'ignore' will cause the operation to +/// skip any invalid formatting in the input and produce no corresponding output +/// character. +/// - output_encoding: Unicode encoding of the output strings. Valid encodings are: `"UTF-8", +/// "UTF-16-BE", and "UTF-32-BE"`. +/// - replacement_char: The replacement character codepoint to be used in place of any invalid +/// formatting in the input when `errors='replace'`. Any valid unicode codepoint may +/// be used. The default value is the default unicode replacement character is +/// 0xFFFD (U+65533). +/// +/// - Output output: The 1-D Tensor of strings encoded from the provided unicode codepoints. +@inlinable @inline(__always) +public static func unicodeEncode( + inputValues: Tensor, + inputSplits: Tensor, + errors: Errors = .b'replace', + outputEncoding: OutputEncoding, + replacementChar: Int64 = 65533 +) -> StringTensor { + let ret: TensorHandle = #tfop("UnicodeEncode", + inputValues, + inputSplits, + errors: errors.cName, + output_encoding: outputEncoding.cName, + replacement_char: replacementChar) + return StringTensor(handle: ret) +} + /// Determine the script codes of a given tensor of Unicode integer code points. /// /// This operation converts Unicode code points to script codes corresponding to @@ -22472,6 +22837,79 @@ public static func unicodeScript( return Tensor(handle: ret) } +/// Transcode the input text from a source encoding to a destination encoding. +/// +/// The input is a string tensor of any shape. The output is a string tensor of +/// the same shape containing the transcoded strings. Output strings are always +/// valid unicode. If the input contains invalid encoding positions, the +/// `errors` attribute sets the policy for how to deal with them. If the default +/// error-handling policy is used, invalid formatting will be substituted in the +/// output by the `replacement_char`. If the errors policy is to `ignore`, any +/// invalid encoding positions in the input are skipped and not included in the +/// output. If it set to `strict` then any invalid formatting will result in an +/// InvalidArgument error. +/// +/// This operation can be used with `output_encoding = input_encoding` to enforce +/// correct formatting for inputs even if they are already in the desired encoding. +/// +/// If the input is prefixed by a Byte Order Mark needed to determine encoding +/// (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that +/// BOM will be consumed and not emitted into the output. If the input encoding +/// is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is +/// interpreted as a non-breaking-space and is preserved in the output (including +/// always for UTF-8). +/// +/// The end result is that if the input is marked as an explicit endianness the +/// transcoding is faithful to all codepoints in the source. If it is not marked +/// with an explicit endianness, the BOM is not considered part of the string itself +/// but as metadata, and so is not preserved in the output. +/// +/// - Parameter input: The text to be processed. Can have any shape. +/// +/// - Attrs: +/// - input_encoding: Text encoding of the input strings. This is any of the encodings supported +/// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. +/// - output_encoding: The unicode encoding to use in the output. Must be one of +/// `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian. +/// - errors: Error handling policy when there is invalid formatting found in the input. +/// The value of 'strict' will cause the operation to produce a InvalidArgument +/// error on any invalid input formatting. A value of 'replace' (the default) will +/// cause the operation to replace any invalid formatting in the input with the +/// `replacement_char` codepoint. A value of 'ignore' will cause the operation to +/// skip any invalid formatting in the input and produce no corresponding output +/// character. +/// - replacement_char: The replacement character codepoint to be used in place of any invalid +/// formatting in the input when `errors='replace'`. Any valid unicode codepoint may +/// be used. The default value is the default unicode replacement character is +/// 0xFFFD or U+65533.) +/// +/// Note that for UTF-8, passing a replacement character expressible in 1 byte, such +/// as ' ', will preserve string alignment to the source since invalid bytes will be +/// replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte +/// replacement character will preserve byte alignment to the source. +/// - replace_control_characters: Whether to replace the C0 control characters (00-1F) with the +/// `replacement_char`. Default is false. +/// +/// - Output output: A string tensor containing unicode text encoded using `output_encoding`. +@inlinable @inline(__always) +public static func unicodeTranscode( + _ input: StringTensor, + inputEncoding: String, + outputEncoding: OutputEncoding, + errors: Errors = .b'replace', + replacementChar: Int64 = 65533, + replaceControlCharacters: Bool = false +) -> StringTensor { + let ret: TensorHandle = #tfop("UnicodeTranscode", + input, + input_encoding: inputEncoding, + output_encoding: outputEncoding.cName, + errors: errors.cName, + replacement_char: replacementChar, + replace_control_characters: replaceControlCharacters) + return StringTensor(handle: ret) +} + /// Generates labels for candidate sampling with a uniform distribution. /// /// See explanations of candidate sampling and the data formats at @@ -23089,26 +23527,20 @@ public static func where_( return Tensor(handle: ret) } -/// A Reader that outputs the entire contents of a file as a value. +/// Worker heartbeat op. /// -/// To use, enqueue filenames in a Queue. The output of ReaderRead will -/// be a filename (key) and the contents of that file (value). +/// Heartbeats may be sent periodically to indicate the coordinator is still active, +/// to retrieve the current worker status and to expedite shutdown when necessary. /// -/// - Attrs: -/// - container: If non-empty, this reader is placed in the given container. -/// Otherwise, a default container is used. -/// - shared_name: If non-empty, this reader is named in the given bucket -/// with this shared_name. Otherwise, the node name is used instead. +/// - Parameter request: A string tensor containing a serialized WorkerHeartbeatRequest /// -/// - Output reader_handle: The handle to reference the Reader. +/// - Output response: A string tensor containing a serialized WorkerHeartbeatResponse @inlinable @inline(__always) -public static func wholeFileReader( - container: String, - sharedName: String +public static func workerHeartbeat( + request: StringTensor ) -> StringTensor { - let ret: TensorHandle = #tfop("WholeFileReader", - container: container, - shared_name: sharedName) + let ret: TensorHandle = #tfop("WorkerHeartbeat", + request) return StringTensor(handle: ret) } @@ -23142,282 +23574,6 @@ public static func xdivy( return Tensor(handle: ret) } -/// Helper operator for performing XLA-style broadcasts -/// -/// Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to -/// whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules -/// for binary operators. -/// -/// - Parameters: -/// - lhs: the LHS input tensor -/// - rhs: the RHS input tensor -/// - broadcast_dims: an XLA-style broadcast dimension specification -/// -/// - Outputs: -/// - lhs_output: the broadcasted LHS tensor -/// - rhs_output: the broadcasted RHS tensor -@inlinable @inline(__always) -public static func xlaBroadcastHelper( - lhs: Tensor, - rhs: Tensor, - broadcastDims: Tensor -) -> (lhsOutput: Tensor, rhsOutput: Tensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("XlaBroadcastHelper", - lhs, - rhs, - broadcastDims, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) -} - -/// Operator that connects the output of an XLA computation to other consumer graph nodes. -@inlinable @inline(__always) -public static func xlaClusterOutput( - _ input: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("XlaClusterOutput", - input, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Wraps the XLA ConvGeneralDilated operator, documented at -/// -/// https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution -/// . -/// -/// - Parameters: -/// - lhs: the input tensor -/// - rhs: the kernel tensor -/// - window_strides: the inter-window strides -/// - padding: the padding to apply at the start and end of each input dimensions -/// - lhs_dilation: dilation to apply between input elements -/// - rhs_dilation: dilation to apply between kernel elements -/// - feature_group_count: number of feature groups for grouped convolution. -/// -/// - Attrs: -/// - dimension_numbers: a serialized xla::ConvolutionDimensionNumbers proto. -/// - precision_config: a serialized xla::PrecisionConfig proto. -@inlinable @inline(__always) -public static func xlaConv( - lhs: Tensor, - rhs: Tensor, - windowStrides: Tensor, - padding: Tensor, - lhsDilation: Tensor, - rhsDilation: Tensor, - featureGroupCount: Tensor, - dimensionNumbers: String, - precisionConfig: String -) -> Tensor { - let ret: TensorHandle = #tfop("XlaConv", - lhs, - rhs, - windowStrides, - padding, - lhsDilation, - rhsDilation, - featureGroupCount, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType, - dimension_numbers: dimensionNumbers, - precision_config: precisionConfig) - return Tensor(handle: ret) -} - -/// Wraps the XLA ConvGeneralDilated operator, documented at -/// -/// https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral -/// . -/// -/// - Parameters: -/// - lhs: the LHS tensor -/// - rhs: the RHS tensor -/// -/// - Attrs: -/// - dimension_numbers: a serialized xla::DotDimensionNumbers proto. -/// - precision_config: a serialized xla::PrecisionConfig proto. -@inlinable @inline(__always) -public static func xlaDot( - lhs: Tensor, - rhs: Tensor, - dimensionNumbers: String, - precisionConfig: String -) -> Tensor { - let ret: TensorHandle = #tfop("XlaDot", - lhs, - rhs, - T$dtype: T.tensorFlowDataType, - dimension_numbers: dimensionNumbers, - precision_config: precisionConfig) - return Tensor(handle: ret) -} - -/// Wraps the XLA DynamicSlice operator, documented at -/// -/// https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice -/// . -/// -/// DynamicSlice extracts a sub-array from the input array at dynamic -/// start_indices. The size of the slice in each dimension is passed in -/// size_indices, which specify the end point of exclusive slice intervals in each -/// dimension -- [start, start + size). The shape of start_indices must have rank 1, -/// with dimension size equal to the rank of operand. -/// -/// - Parameters: -/// - input: A `Tensor` of type T. -/// - start_indices: List of N integers containing the slice size for each -/// dimension. Each value must be strictly greater than zero, and start + size -/// must be less than or equal to the size of the dimension to avoid -/// implementation defined behavior. -@inlinable @inline(__always) -public static func xlaDynamicSlice( - _ input: Tensor, - startIndices: Tensor, - sizeIndices: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("XlaDynamicSlice", - input, - startIndices, - sizeIndices, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Wraps the XLA DynamicUpdateSlice operator, documented at -/// -/// https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice -/// . -/// -/// XlaDynamicUpdateSlice generates a result which is the value of the `input` -/// operand, with a slice update overwritten at `indices`. The shape of `update` -/// determines the shape of the sub-array of the result which is updated. The shape -/// of indices must be rank == 1, with dimension size equal to the rank of `input`. -/// -/// Handling of out-of-bounds slice indices is implementation-defined. -/// -/// - Parameters: -/// - input: A `Tensor` of type T. -/// - update: A `Tensor` of type T. Same rank as `input`. -/// - indices: A vector of indices into `input`. Must have length equal to the rank of -/// `input`. -/// -/// - Output output: A `Tensor` of type T. -@inlinable @inline(__always) -public static func xlaDynamicUpdateSlice( - _ input: Tensor, - update: Tensor, - indices: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("XlaDynamicUpdateSlice", - input, - update, - indices, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Wraps the XLA Sort operator, documented at -/// -/// https://www.tensorflow.org/performance/xla/operation_semantics#sort -/// . -/// -/// Sorts a tensor. Currently only sorts in ascending order are supported. -/// -/// - Parameters: -/// - keys: A `Tensor` of type K. -/// - values: A `Tensor` of type V. -/// -/// - Outputs: -/// - sorted_keys: A `Tensor` of type K. -/// - sorted_values: A `Tensor` of type V. -@inlinable @inline(__always) -public static func xlaKeyValueSort( - keys: Tensor, - _ values: Tensor -) -> (sortedKeys: Tensor, sortedValues: Tensor) { - let ret: (TensorHandle, TensorHandle) = #tfop("XlaKeyValueSort", - keys, - values, - K$dtype: K.tensorFlowDataType, - V$dtype: V.tensorFlowDataType) - return (Tensor(handle: ret.0), Tensor(handle: ret.1)) -} - -/// Wraps the XLA Pad operator, documented at -/// -/// https://www.tensorflow.org/performance/xla/operation_semantics#pad -/// . -/// -/// - Parameters: -/// - input: A `Tensor` of type T. -/// - padding_value: A scalar `Tensor` of type T. -/// - padding_low: the padding to apply at the start of each input dimensions -/// - padding_high: the padding to apply at the end of each input dimension. -/// - padding_interior: the padding to apply between each input element. -/// -/// - Output output: A `Tensor` of type T. -@inlinable @inline(__always) -public static func xlaPad( - _ input: Tensor, - paddingValue: Tensor, - paddingLow: Tensor, - paddingHigh: Tensor, - paddingInterior: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("XlaPad", - input, - paddingValue, - paddingLow, - paddingHigh, - paddingInterior, - T$dtype: T.tensorFlowDataType, - Tindices$dtype: Tindices.tensorFlowDataType) - return Tensor(handle: ret) -} - -/// Sends the named tensor to another XLA computation. Wraps the XLA Send operator -/// -/// documented at -/// https://www.tensorflow.org/performance/xla/operation_semantics#send . -/// -/// - Parameter tensor: The tensor to send. -/// -/// - Attr tensor_name: A string key that identifies the channel. -@inlinable @inline(__always) -public static func xlaSend( - _ tensor: Tensor, - tensorName: String -) { - return #tfop("XlaSend", - tensor, - T$dtype: T.tensorFlowDataType, - tensor_name: tensorName) -} - -/// Wraps the XLA Sort operator, documented at -/// -/// https://www.tensorflow.org/performance/xla/operation_semantics#sort -/// . -/// -/// Sorts a tensor. Currently only sorts in ascending order are supported. -/// -/// - Parameter input: A `Tensor` of type T. -/// -/// - Output output: A `Tensor` of type T. -@inlinable @inline(__always) -public static func xlaSort( - _ input: Tensor -) -> Tensor { - let ret: TensorHandle = #tfop("XlaSort", - input, - T$dtype: T.tensorFlowDataType) - return Tensor(handle: ret) -} - /// Returns 0 if x == 0, and x * log(y) otherwise, elementwise. @inlinable @inline(__always) public static func xlogy( diff --git a/generate_wrappers.py b/generate_wrappers.py index 9dd3042..8499a8f 100644 --- a/generate_wrappers.py +++ b/generate_wrappers.py @@ -19,6 +19,7 @@ import collections import os +import six import tensorflow as tf from tensorflow.core.framework import types_pb2 @@ -176,8 +177,9 @@ def __init__(self): def enum_codes(self): """Generates the swift code for enums.""" codes = [] - entries = list(self._entries.iteritems()) + entries = list(six.iteritems(self._entries)) for allowed_values, type_name in sorted(entries, key=lambda x: x[1]): + allowed_values = [str(a, encoding='utf-8') for a in allowed_values] codes.append( # FIXME: Readd `@_frozen` after SR-9739 is resolved. # https://bugs.swift.org/browse/SR-9739 @@ -434,7 +436,7 @@ def generate_code(op, api_def, enum_store): tfop_args = ',\n '.join( ['"' + op.name + '"'] + [name for name, _ in input_names_and_types] + - filter(None, [t.op_arg() for t in types]) + + list(filter(None, [t.op_arg() for t in types])) + [a.tfop_name + ': ' + a.swift_value for a in attributes_as_input] ) @@ -528,7 +530,14 @@ def main(argv): try: if op_name[0] == '_': continue op = api_def_map.get_op_def(op_name) - api_def = api_def_map.get_api_def(bytes(op_name)) + + if any(a.is_ref for a in op.input_arg): + raise UnableToGenerateCodeError('has ref-valued input') + + if any(a.is_ref for a in op.output_arg): + raise UnableToGenerateCodeError('has ref-valued output') + + api_def = api_def_map.get_api_def(bytes(op_name, 'utf8')) op_codes.append(generate_code(op, api_def, enum_store)) except UnableToGenerateCodeError as e: print('Cannot generate code for %s: %s' % (op.name, e.details))