diff --git a/stdlib/public/TensorFlow/Ops.swift b/stdlib/public/TensorFlow/Ops.swift index c4bd54b0b6175..9f542deb7018b 100644 --- a/stdlib/public/TensorFlow/Ops.swift +++ b/stdlib/public/TensorFlow/Ops.swift @@ -1719,56 +1719,297 @@ public extension Tensor where Scalar : Numeric { // Indexing and slicing //===----------------------------------------------------------------------===// +// TODO: Negative indexing and strides syntax. + public extension Tensor { - /// Access the element tensor specified by an index in the leading dimension. - /// - Parameter index: Index of the element tensor. + /// Extracts a slice from the tensor defined by lower and upper bounds for + /// each dimension. + /// + /// - Parameter lowerBounds: The lower bounds at each dimension. + /// - Parameter upperBounds: The upper bounds at each dimension. + @inlinable + @differentiable(wrt: self) + func slice(lowerBounds: [Int], upperBounds: [Int]) -> Tensor { + // TODO: Precondition `lowerBounds.count == upperBounds.count`, + // preferably in graph. + // TODO: Differentiating control flow is not supported yet, thus the thunks. + let lowerBoundsTensor = Tensor({lowerBounds.map(Int32.init)}()) + let upperBoundsTensor = Tensor({upperBounds.map(Int32.init)}()) + return slice( + lowerBounds: lowerBoundsTensor, + sizes: upperBoundsTensor - lowerBoundsTensor) + } + @inlinable - subscript(index: Int) -> Tensor { + @differentiable(wrt: self, vjp: _vjpSlice) + func slice(lowerBounds: Tensor, sizes: Tensor) -> Tensor { + return Raw.slice(self, begin: lowerBounds, size: sizes) + } + + @inlinable + internal func _vjpSlice( + lowerBounds: Tensor, + sizes: Tensor + ) -> (Tensor, (Tensor) -> Tensor) { + let value = slice(lowerBounds: lowerBounds, sizes: sizes) + let afterPaddings = shapeTensor - value.shapeTensor - lowerBounds + return (value, { [after = afterPaddings] v in + let beforePaddings = lowerBounds.expandingShape(at: 1) + let afterPaddings = after.expandingShape(at: 1) + let paddings = Tensor( + concatenating: [beforePaddings, afterPaddings], alongAxis: 1) + return Raw.pad(v, paddings: paddings) + }) + } +} + +public enum TensorRange : TensorRangeExpression { + case ellipsis + case newAxis + case squeezeAxis + case index(Int) + case range(Range, stride: Int) + case closedRange(ClosedRange, stride: Int) + case partialRangeFrom(PartialRangeFrom, stride: Int) + case partialRangeUpTo(PartialRangeUpTo, stride: Int) + case partialRangeThrough(PartialRangeThrough, stride: Int) + + public var tensorRange: TensorRange { return self } +} + +extension TensorRange : Equatable { + public static func == (lhs: TensorRange, rhs: TensorRange) -> Bool { + switch (lhs, rhs) { + case (.ellipsis, .ellipsis), + (.newAxis, .newAxis), + (.squeezeAxis, .squeezeAxis): + return true + case (let .index(i1), let .index(i2)): return i1 == i2 + case (let .range(r1, s1), let .range(r2, s2)): return r1 == r2 && s1 == s2 + case (let .closedRange(r1, s1), let .closedRange(r2, s2)): + return r1 == r2 && s1 == s2 + case (let .partialRangeFrom(r1, s1), let .partialRangeFrom(r2, s2)): + return r1.lowerBound == r2.lowerBound && s1 == s2 + case (let .partialRangeUpTo(r1, s1), let .partialRangeUpTo(r2, s2)): + return r1.upperBound == r2.upperBound && s1 == s2 + case (let .partialRangeThrough(r1, s1), let .partialRangeThrough(r2, s2)): + return r1.upperBound == r2.upperBound && s1 == s2 + default: return false + } + } +} + +public protocol TensorRangeExpression { + var tensorRange: TensorRange { get } +} + +// TODO: Cannot extend non-nominal type 'UnboundedRange'. +// extension UnboundedRange : TensorRangeExpression { +// public var tensorRange: TensorRange { return .ellipsis } +// } + +extension Int : TensorRangeExpression { + public var tensorRange: TensorRange { return .index(self) } +} + +extension Range : TensorRangeExpression where Bound == Int { + public var tensorRange: TensorRange { + return .range(self, stride: 1) + } +} + +extension ClosedRange : TensorRangeExpression where Bound == Int { + public var tensorRange: TensorRange { + return .closedRange(self, stride: 1) + } +} + +extension PartialRangeFrom : TensorRangeExpression where Bound == Int { + public var tensorRange: TensorRange { + return .partialRangeFrom(self, stride: 1) + } +} + +extension PartialRangeUpTo : TensorRangeExpression where Bound == Int { + public var tensorRange: TensorRange { + return .partialRangeUpTo(self, stride: 1) + } +} + +extension PartialRangeThrough : TensorRangeExpression where Bound == Int { + public var tensorRange: TensorRange { + return .partialRangeThrough(self, stride: 1) + } +} + +infix operator .. : StridedRangeFormationPrecedence +precedencegroup StridedRangeFormationPrecedence { + associativity: left + higherThan: CastingPrecedence + lowerThan: RangeFormationPrecedence +} + +public extension Range where Bound == Int { + static func .. (range: Range, stride: Int) -> TensorRange { + return .range(range, stride: stride) + } +} + +public extension ClosedRange where Bound == Int { + static func .. (range: ClosedRange, stride: Int) -> TensorRange { + return .closedRange(range, stride: stride) + } +} + +public extension PartialRangeFrom where Bound == Int { + static func .. (range: PartialRangeFrom, stride: Int) -> TensorRange { + return .partialRangeFrom(range, stride: stride) + } +} + +public extension PartialRangeUpTo where Bound == Int { + static func .. (range: PartialRangeUpTo, stride: Int) -> TensorRange { + return .partialRangeUpTo(range, stride: stride) + } +} + +public extension PartialRangeThrough where Bound == Int { + static func .. (range: PartialRangeThrough, stride: Int) -> TensorRange { + return .partialRangeThrough(range, stride: stride) + } +} + +public extension Tensor { + @_fixed_layout @usableFromInline + internal struct IndexPath { + @usableFromInline + let begin, end, strides: Tensor + + @usableFromInline + let beginMask, endMask, ellipsisMask, newAxisMask, squeezeAxisMask: Int64 + + @inlinable + public init( + begin: Tensor, end: Tensor, strides: Tensor, + beginMask: Int64, endMask: Int64, ellipsisMask: Int64, newAxisMask: Int64, + squeezeAxisMask: Int64 + ) { + self.begin = begin + self.end = end + self.strides = strides + self.beginMask = beginMask + self.endMask = endMask + self.ellipsisMask = ellipsisMask + self.newAxisMask = newAxisMask + self.squeezeAxisMask = squeezeAxisMask + } + } + + @inlinable + @differentiable(wrt: self, vjp: _vjpSubscript) + internal subscript(_ indexPath: IndexPath) -> Tensor { get { - let index = Int32(index) - let slice = Raw.stridedSlice( - self, begin: Tensor([index]), end: Tensor([index + 1]), - strides: Tensor([1])) - return slice.squeezingShape(at: 0) + return Raw.stridedSlice( + self, begin: indexPath.begin, end: indexPath.end, + strides: indexPath.strides, beginMask: indexPath.beginMask, + endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, + newAxisMask: indexPath.newAxisMask, + shrinkAxisMask: indexPath.squeezeAxisMask) } set { - let leftElements = self[0..(0)) + self = Raw.tensorStridedSliceUpdate( + self, begin: indexPath.begin, end: indexPath.end, + strides: indexPath.strides, value: newValue, + beginMask: indexPath.beginMask, endMask: indexPath.endMask, + ellipsisMask: indexPath.ellipsisMask, + newAxisMask: indexPath.newAxisMask, + shrinkAxisMask: indexPath.squeezeAxisMask) } } - /// Access the subtensor specified by a contiguous range of indices. - /// - Parameter bounds: Contiguous range of indices. @inlinable - subscript(bounds: Range) -> Tensor { - return Raw.stridedSlice( - self, - begin: Tensor([Int32(bounds.lowerBound)]), - end: Tensor([Int32(bounds.upperBound)]), - strides: Tensor([1])) + // TODO: @differentiable(wrt: self) + subscript(_ ranges: TensorRangeExpression...) -> Tensor { + get { + return self[IndexPath(ranges.map { $0.tensorRange })] + } + set { + self[IndexPath(ranges.map { $0.tensorRange })] = newValue + } + } + + @usableFromInline + internal func _vjpSubscript( + _ indexPath: IndexPath + ) -> (Tensor, (Tensor) -> Tensor) { + return (self[indexPath], { [shape = shapeTensor] v in + Raw.stridedSliceGrad( + shape: shape, begin: indexPath.begin, end: indexPath.end, + strides: indexPath.strides, dy: v, beginMask: indexPath.beginMask, + endMask: indexPath.endMask, ellipsisMask: indexPath.ellipsisMask, + newAxisMask: indexPath.newAxisMask, + shrinkAxisMask: indexPath.squeezeAxisMask) + }) } +} - // TODO(danielzheng): Add strided slices? (increment by something different - // than 1) - // Ideas for strided slice API: it could be another subscript method, or it - // be a top level `stride` function like Swift's `stride(from:to:by:)`. +internal extension Tensor.IndexPath { + @inlinable + init(_ ranges: [TensorRange]) { + precondition(!ranges.isEmpty, "The tensor range collection cannot be empty.") + precondition(ranges.count { $0 == TensorRange.ellipsis } < 2, + "Only one ellipsis is allowed per tensor range collection.") + + var begin = [Int32](repeating: 0, count: ranges.count) + var end = [Int32](repeating: 0, count: ranges.count) + var strides = [Int32](repeating: 1, count: ranges.count) + var beginMask: Int64 = 0 + var endMask: Int64 = 0 + var ellipsisMask: Int64 = 0 + var newAxisMask: Int64 = 0 + var squeezeAxisMask: Int64 = 0 + for (i, index) in ranges.enumerated() { + switch index { + case .ellipsis: ellipsisMask |= 1 << i + case .newAxis: newAxisMask |= 1 << i + case .squeezeAxis: squeezeAxisMask |= 1 << i + case .index(let index): + begin[i] = Int32(index) + end[i] = Int32(index) + 1 + squeezeAxisMask |= 1 << i + case .range(let range, let stride): + begin[i] = Int32(range.lowerBound) + end[i] = Int32(range.upperBound) + strides[i] = Int32(stride) + case .closedRange(let range, let stride): + begin[i] = Int32(range.lowerBound) + switch Int32(range.upperBound) { + case -1: endMask |= 1 << i + case let u: end[i] = u + 1 + } + strides[i] = Int32(stride) + case .partialRangeFrom(let range, let stride): + begin[i] = Int32(range.lowerBound) + strides[i] = Int32(stride) + endMask |= 1 << i + case .partialRangeUpTo(let range, let stride): + end[i] = Int32(range.upperBound) + strides[i] = Int32(stride) + beginMask |= 1 << i + case .partialRangeThrough(let range, let stride): + end[i] = Int32(range.upperBound) + 1 + strides[i] = Int32(stride) + beginMask |= 1 << i + } + } - /// Extracts a slice from the tensor defined by lower and upper bounds for - /// each dimension. - /// - /// - Parameter lowerBounds: The lower bounds at each dimension. - /// - Parameter upperBounds: The upper bounds at each dimension. - @inlinable @inline(__always) - func slice(lowerBounds: [Int], upperBounds: [Int]) -> Tensor { - /// TODO: Precondition `lowerBounds.count == upperBounds.count`, - /// preferably in graph. - let lowerBoundsTensor = Tensor(lowerBounds.map(Int32.init)) - let upperBoundsTensor = Tensor(upperBounds.map(Int32.init)) - return Raw.slice( - self, - begin: lowerBoundsTensor, - size: upperBoundsTensor - lowerBoundsTensor) + self.begin = Tensor(begin) + self.end = Tensor(end) + self.strides = Tensor(strides) + self.beginMask = beginMask + self.endMask = endMask + self.ellipsisMask = ellipsisMask + self.newAxisMask = newAxisMask + self.squeezeAxisMask = squeezeAxisMask } } diff --git a/test/TensorFlowRuntime/tensor.swift b/test/TensorFlowRuntime/tensor.swift index 98903fbb5ddf2..e67a2c3364d8f 100644 --- a/test/TensorFlowRuntime/tensor.swift +++ b/test/TensorFlowRuntime/tensor.swift @@ -105,11 +105,6 @@ TensorTests.testAllBackends("BoolToNumericCast_NonTPU") { } TensorTests.testAllBackends("ElementIndexing") { - // XLA compilation error under TPU. - if _RuntimeConfig.executionMode.isTPU { return } - - // NOTE: This tests the `subscript(index:)` method, which is distinct from - // the `subscript(indices:)` method. // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly // until send and receive are implemented (without writing a bunch of mini // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy @@ -135,17 +130,119 @@ TensorTests.testAllBackends("ElementIndexing") { expectEqual([43], array0D.scalars) } -TensorTests.testAllBackends("SliceIndexing") { - // XLA compilation error under TPU. - if _RuntimeConfig.executionMode.isTPU { return } +TensorTests.testAllBackends("ElementIndexingAssignment") { + // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly + // until send and receive are implemented (without writing a bunch of mini + // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy + // and the ShapedArray is tested. + var tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2] = Tensor(shape: [4, 5], + scalars: Array(stride(from: 20.0, to: 40, by: 1))) + let element2D = tensor3D[2] + let element1D = tensor3D[1][3] + let element0D = tensor3D[2][0][3] + + let array2D = element2D.array + let array1D = element1D.array + let array0D = element0D.array + + /// Test shapes + expectEqual([4, 5], array2D.shape) + expectEqual([5], array1D.shape) + expectEqual([], array0D.shape) + /// Test scalars + expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) + expectEqual([23], array0D.scalars) +} + +TensorTests.testAllBackends("NestedElementIndexing") { + // NOTE: This test could use a clearer name, along with other "indexing" + // tests. Note to update corresponding test names in other files + // (shaped_array.test) as well. + let tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let element1D = tensor3D[1, 3] + let element0D = tensor3D[2, 0, 3] + + let array1D = element1D.array + let array0D = element0D.array + + /// Test shapes + expectEqual([5], array1D.shape) + expectEqual([], array0D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 35.0, to: 40, by: 1)), array1D.scalars) + expectEqual([43], array0D.scalars) +} + +TensorTests.testAllBackends("SliceIndexing") { // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send // and receive are implemented (without writing a bunch of mini tests). // Instead, `Tensor.array` is called to make a ShapedArray host copy and the // ShapedArray is tested instead. let tensor3D = Tensor(shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) - let slice3D = tensor3D[1..<2] + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("SliceIndexingAssignment") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + var tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2, 0..<5, 0..<6] = Tensor( + shape: [4, 5], scalars: Array(stride(from: 20.0, to: 40, by: 1))) + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 20.0, to: 40, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("EllipsisIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + var tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2, TensorRange.ellipsis] = Tensor( + shape: [4, 5], scalars: Array(stride(from: 20.0, to: 40, by: 1))) + let slice3D = tensor3D[2..., TensorRange.ellipsis] let slice2D = tensor3D[1][0..<2] let slice1D = tensor3D[0][0][3..<5] @@ -164,6 +261,124 @@ TensorTests.testAllBackends("SliceIndexing") { expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) } +TensorTests.testAllBackends("NewAxisIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let newAxis = TensorRange.newAxis + let ellipsis = TensorRange.ellipsis + let slice3D = tensor3D[2..., newAxis, ellipsis] + let slice2D = tensor3D[1, newAxis][0..<1, 0..<2] + let slice1D = tensor3D[0][newAxis, 0][0..<1, 3..<5, newAxis] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 1, 4, 5], array3D.shape) + expectEqual([1, 2, 5], array2D.shape) + expectEqual([1, 2, 1], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("SqueezeAxisIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let newAxis = TensorRange.newAxis + let ellipsis = TensorRange.ellipsis + let squeezeAxis = TensorRange.squeezeAxis + let slice3D = tensor3D[2..., newAxis, ellipsis][squeezeAxis, squeezeAxis] + let slice2D = tensor3D[1, newAxis][squeezeAxis, 0..<2] + let slice1D = tensor3D[0..<1, 0, 3..<5, newAxis][ + squeezeAxis, ellipsis, squeezeAxis] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + +TensorTests.testAllBackends("StridedSliceIndexing") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + let tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<3..2] + let slice1D = tensor3D[0][0][1..<5..2] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual(Array(stride(from: 40.0, to: 60, by: 1)), array3D.scalars) + expectEqual( + Array(stride(from: 20.0, to: 25, by: 1)) + + Array(stride(from: 30.0, to: 35, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 1.0, to: 5, by: 2)), array1D.scalars) +} + +TensorTests.testAllBackends("StridedSliceIndexingAssignment") { + // NOTE: cannot test `Tensor.shape` or `Tensor.scalars` directly until send + // and receive are implemented (without writing a bunch of mini tests). + // Instead, `Tensor.array` is called to make a ShapedArray host copy and the + // ShapedArray is tested instead. + var tensor3D = Tensor( + shape: [3, 4, 5], scalars: Array(stride(from: 0.0, to: 60, by: 1))) + tensor3D[2, 0..<5..2, 0..<6] = Tensor( + shape: [2, 5], scalars: Array(stride(from: 20.0, to: 40, by: 2))) + let slice3D = tensor3D[2...] + let slice2D = tensor3D[1][0..<2] + let slice1D = tensor3D[0][0][3..<5] + + let array3D = slice3D.array + let array2D = slice2D.array + let array1D = slice1D.array + + /// Test shapes + expectEqual([1, 4, 5], array3D.shape) + expectEqual([2, 5], array2D.shape) + expectEqual([2], array1D.shape) + + /// Test scalars + expectEqual( + Array(stride(from: 20.0, to: 30, by: 2)) + + Array(stride(from: 45.0, to: 50, by: 1)) + + Array(stride(from: 30.0, to: 40, by: 2)) + + Array(stride(from: 55.0, to: 60, by: 1)), array3D.scalars) + expectEqual(Array(stride(from: 20.0, to: 30, by: 1)), array2D.scalars) + expectEqual(Array(stride(from: 3.0, to: 5, by: 1)), array1D.scalars) +} + TensorTests.test("WholeTensorSlicing") { let t: Tensor = [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], @@ -173,6 +388,23 @@ TensorTests.test("WholeTensorSlicing") { slice2.array) } +TensorTests.testAllBackends("AdvancedIndexing") { + // NOTE: cannot test multiple `Tensor.shape` or `Tensor.scalars` directly + // until send and receive are implemented (without writing a bunch of mini + // tests). Instead, `Tensor.array` is called to make a ShapedArray host copy + // and the ShapedArray is tested. + let tensor3D = Tensor(shape: [3, 4, 5], + scalars: Array(stride(from: 0.0, to: 60, by: 1))) + let element2D = tensor3D[1..<3, 0, 3...] + let array2D = element2D.array + + // Test shape + expectEqual([2, 2], array2D.shape) + + // Test scalars + expectEqual(Array([23.0, 24.0, 43.0, 44.0]), array2D.scalars) +} + TensorTests.testAllBackends("Reduction") { // TODO(b/111815968): triage and fix this TPU issue #if !TPU diff --git a/utils/update_checkout/update-checkout-config.json b/utils/update_checkout/update-checkout-config.json index f3f838203d174..a0822f53b83ed 100644 --- a/utils/update_checkout/update-checkout-config.json +++ b/utils/update_checkout/update-checkout-config.json @@ -229,7 +229,7 @@ "lldb": "bdb96e8b352f7cb18e2b3b66f2d3f75d92f81dcd", "cmark": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", "llbuild": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", - "swiftpm": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", + "swiftpm": "f7deeaac96dc42475e15d045bb093436c98f89ee", "swift-syntax": "1aeb642da66a23a66c9ac80d74813e1a4b963999", "swift-stress-tester": "2fc093642df924f6adf9de9e4397c7c6fc8b5fc8", "compiler-rt": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", @@ -240,7 +240,7 @@ "swift-xcode-playground-support": "swift-DEVELOPMENT-SNAPSHOT-2018-11-26-a", "ninja": "253e94c1fa511704baeb61cf69995bbf09ba435e", "icu": "release-61-1", - "tensorflow": "d1db9860a24af2ce64626fe4c3bee69f83700afa", + "tensorflow": "447e512d332ab86172a3b13119900b4d021d0c65", "tensorflow-swift-bindings": "a7ccb727514414d31df9e403f34fa923bdf6a519", "tensorflow-swift-apis": "23c16ae33a3826399b01caeb1b0b736531d00bde" }