@@ -104,14 +104,16 @@ outputs = method.execute([torch.randn(1, 3, 224, 224)])
104104
105105Module module ("model.pte");
106106auto tensor = make_tensor_ptr({2, 2}, {1.0f, 2.0f, 3.0f, 4.0f});
107- auto outputs = module.forward({ tensor} );
107+ auto outputs = module.forward(tensor);
108108```
109109
110110**[Swift (iOS)](https://docs.pytorch.org/executorch/main/ios-section.html)**
111111```swift
112+ import ExecuTorch
113+
112114let module = Module(filePath: "model.pte")
113- let input = Tensor<Float>([1.0, 2.0, 3.0, 4.0])
114- let outputs: [Value] = try module.forward([ input] )
115+ let input = Tensor<Float>([1.0, 2.0, 3.0, 4.0], shape: [2, 2] )
116+ let outputs = try module.forward(input)
115117```
116118
117119** [ Kotlin (Android)] ( https://docs.pytorch.org/executorch/main/android-section.html ) **
@@ -151,6 +153,8 @@ runner->generate("Hello, how are you?", config);
151153
152154**[Swift (iOS)](https://docs.pytorch.org/executorch/main/llm/run-on-ios.html)**
153155```swift
156+ import ExecuTorchLLM
157+
154158let runner = TextRunner(modelPath: "llama.pte", tokenizerPath: "tiktoken.bin")
155159try runner.generate("Hello, how are you?", Config {
156160 $0.sequenceLength = 128
0 commit comments