Skip to content

Commit

Permalink
Add transpose to ttir_builder (#2214)
Browse files Browse the repository at this point in the history
Closes #1961
Closes #1759

### Problem description
`transpose` wasn't exposed to `ttir_builder`, preventing llama attention
layer modelling

### Special Thanks
To @vprajapati-tt for inadvertently fixing a blocking memory issue that
was exposed by this bug (See #2136 )
  • Loading branch information
ctodTT authored and vwellsTT committed Feb 20, 2025
1 parent 6de020e commit fb9b38c
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 0 deletions.
11 changes: 11 additions & 0 deletions python/test_infra/ttir_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -684,6 +684,17 @@ def matmul(
organize_ttir_args=lambda i, o, shape: (self._get_type(o), i[0], i[1], o),
)

def transpose(self, in0: Operand, dim0: int = 0, dim1: int = 1) -> OpView:
kwargs = {"dim0": dim0, "dim1": dim1}
return self.op_proxy(
torch.transpose,
ttir.TransposeOp,
[in0],
golden_kwargs=kwargs,
ttir_kwargs=kwargs,
organize_ttir_args=lambda i, o, _: (self._get_type(o), i[0], o),
)

def softmax(self, in0: Operand, dimension: int = 1) -> OpView:
return self.op_proxy(
torch.softmax,
Expand Down
10 changes: 10 additions & 0 deletions test/python/golden/test_ttir_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,16 @@ def test_reshape(in0: Operand, builder: TTIRBuilder):
return builder.reshape(in0, [2048])


@compile_to_flatbuffer(
[
(32, 64),
],
targets=["ttnn"],
)
def test_transpose(in0: Operand, builder: TTIRBuilder):
return builder.transpose(in0)


# @compile_to_flatbuffer(
# [
# (64, 64),
Expand Down

0 comments on commit fb9b38c

Please sign in to comment.