From 486e0548f76bbc975a449ea6fea6f225d1357116 Mon Sep 17 00:00:00 2001 From: Brett Koonce Date: Wed, 19 Jun 2019 16:06:30 -0500 Subject: [PATCH] transformer: upstream api changes --- Transformer/Model.swift | 10 +++++----- Transformer/Operators.swift | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Transformer/Model.swift b/Transformer/Model.swift index 3fcf71ccd50..70633007606 100644 --- a/Transformer/Model.swift +++ b/Transformer/Model.swift @@ -51,9 +51,9 @@ struct FeedForward: Layer { } struct AttentionInput: Differentiable { - let query: Tensor - let key: Tensor - let value: Tensor + var query: Tensor + var key: Tensor + var value: Tensor } @differentiable(wrt: (query, key, value), vjp: _vjpMakeAttentionInput) @@ -69,8 +69,8 @@ func _vjpMakeAttentionInput(query: Tensor, key: Tensor, value: Ten } struct AttentionContext: Differentiable { - let key: Tensor - let value: Tensor + var key: Tensor + var value: Tensor } @differentiable(wrt: (key, value), vjp: _vjpMakeAttentionContext) diff --git a/Transformer/Operators.swift b/Transformer/Operators.swift index ed16aa215ac..20944d45bd9 100644 --- a/Transformer/Operators.swift +++ b/Transformer/Operators.swift @@ -29,7 +29,7 @@ func gelu(_ x: Tensor) -> Tensor( _ left: Tensor, @@ -41,7 +41,7 @@ func batchedMatmul( } @usableFromInline -func _vjpBatchedMatmul( +func _vjpBatchedMatmul( _ left: Tensor, _ right: Tensor, adjointLeft: Bool,