diff --git a/examples/lazy_jit/lazyjit.en.ipynb b/examples/eager_jit/eagerjit.en.ipynb similarity index 98% rename from examples/lazy_jit/lazyjit.en.ipynb rename to examples/eager_jit/eagerjit.en.ipynb index 9cb343a1e..6a2bf8453 100644 --- a/examples/lazy_jit/lazyjit.en.ipynb +++ b/examples/eager_jit/eagerjit.en.ipynb @@ -21,7 +21,7 @@ "id": "1ca2c56d", "metadata": {}, "source": [ - "# Tilelang Lazy JIT" + "# Tilelang Eager JIT" ] }, { @@ -37,7 +37,7 @@ "id": "b070c109", "metadata": {}, "source": [ - "Tilelang Lazy JIT merges JIT kernel generation and invocation into a single workflow.\n", + "Tilelang Eager JIT merges JIT kernel generation and invocation into a single workflow.\n", "\n", "The function signature looks similar to Triton, but we add many enhancements; the most important one is allowing rich Tensor annotations:\n", "\n", @@ -551,7 +551,7 @@ "id": "860a2972", "metadata": {}, "source": [ - "LazyJIT has very small overhead; each additional constant annotation costs about 200 ns.\n", + "EagerJIT has very small overhead; each additional constant annotation costs about 200 ns.\n", "* 200 ns is roughly the cost of an FFI call that reads parameters from a `torch.Tensor`'s shape/stride." ] }, @@ -618,7 +618,7 @@ "id": "8c6fbe08", "metadata": {}, "source": [ - "Both `lazyjit` and the original `jit` support parallel compilation.\n", + "Both EagerJIT and the original `jit` (i.e. LazyJIT) support parallel compilation.\n", "\n", "To avoid wasting memory on temporary `torch.Tensor` objects, you can use `T.Tensor` to create placeholders." ] diff --git a/examples/lazy_jit/lazyjit.zh.ipynb b/examples/eager_jit/eagerjit.zh.ipynb similarity index 99% rename from examples/lazy_jit/lazyjit.zh.ipynb rename to examples/eager_jit/eagerjit.zh.ipynb index d7afafe69..0f7c9be99 100644 --- a/examples/lazy_jit/lazyjit.zh.ipynb +++ b/examples/eager_jit/eagerjit.zh.ipynb @@ -551,7 +551,7 @@ "id": "860a2972", "metadata": {}, "source": [ - "LazyJIT overhead 很小,每个 constant 添加约 200ns 的 overhead\n", + "EagerJIT overhead 很小,每个 constant 添加约 200ns 的 overhead\n", "* 200ns 大约是从 torch.Tensor 的 shape/stride 中拿参数的 ffi call 的代价" ] }, @@ -618,7 +618,7 @@ "id": "8c6fbe08", "metadata": {}, "source": [ - "lazyjit 和原来的 jit 都支持并行编译\n", + "Eager JIT 和原来的 jit(即 LazyJIT) 都支持并行编译\n", "\n", "为了防止 torch.tensor 白白浪费内存,可以使用 T.Tensor 来创建 placeholder" ]