Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[mlir][sparse] Print new syntax #68130

Merged
merged 7 commits into from
Oct 4, 2023
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h
Original file line number Diff line number Diff line change
Expand Up @@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
case DimLevelType::Compressed:
return "compressed";
case DimLevelType::CompressedNu:
return "compressed_nu";
return "compressed(nonunique)";
case DimLevelType::CompressedNo:
return "compressed_no";
return "compressed(nonordered)";
case DimLevelType::CompressedNuNo:
return "compressed_nu_no";
return "compressed(nonunique, nonordered)";
case DimLevelType::Singleton:
return "singleton";
case DimLevelType::SingletonNu:
return "singleton_nu";
return "singleton(nonunique)";
case DimLevelType::SingletonNo:
return "singleton_no";
return "singleton(nonordered)";
case DimLevelType::SingletonNuNo:
return "singleton_nu_no";
return "singleton(nonunique, nonordered)";
case DimLevelType::LooseCompressed:
return "loose_compressed";
case DimLevelType::LooseCompressedNu:
return "loose_compressed_nu";
return "loose_compressed(nonunique)";
case DimLevelType::LooseCompressedNo:
return "loose_compressed_no";
return "loose_compressed(nonordered)";
case DimLevelType::LooseCompressedNuNo:
return "loose_compressed_nu_no";
return "loose_compressed(nonunique, nonordered)";
case DimLevelType::TwoOutOfFour:
return "compressed24";
return "block2_4";
}
return "";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,14 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
std::optional<uint64_t> getStaticLvlSliceOffset(::mlir::sparse_tensor::Level lvl) const;
std::optional<uint64_t> getStaticLvlSliceSize(::mlir::sparse_tensor::Level lvl) const;
std::optional<uint64_t> getStaticLvlSliceStride(::mlir::sparse_tensor::Level lvl) const;

//
// Printing methods.
//

void printSymbol(AffineMap &map, AsmPrinter &printer) const;
yinying-lisa-li marked this conversation as resolved.
Show resolved Hide resolved
void printDimension(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr> dimSlices) const;
void printLevel(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::DimLevelType> lvlTypes) const;
}];

let genVerifyDecl = 1;
Expand Down
72 changes: 54 additions & 18 deletions mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -586,31 +586,67 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
}

void SparseTensorEncodingAttr::print(AsmPrinter &printer) const {
// Print the struct-like storage in dictionary fashion.
printer << "<{ lvlTypes = [ ";
llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
printer << "\"" << toMLIRString(dlt) << "\"";
});
printer << " ]";
auto map = static_cast<AffineMap>(getDimToLvl());
// Empty affine map indicates identity map
if (!map) {
map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), getContext());
}
printer << "<{ map = ";
printSymbol(map, printer);
printer << '(';
printDimension(map, printer, getDimSlices());
printer << ") -> (";
printLevel(map, printer, getLvlTypes());
printer << ')';
// Print remaining members only for non-default values.
if (!isIdentity())
printer << ", dimToLvl = affine_map<" << getDimToLvl() << ">";
if (getPosWidth())
printer << ", posWidth = " << getPosWidth();
if (getCrdWidth())
printer << ", crdWidth = " << getCrdWidth();
if (!getDimSlices().empty()) {
printer << ", dimSlices = [ ";
llvm::interleaveComma(getDimSlices(), printer,
[&](SparseTensorDimSliceAttr attr) {
// Calls SparseTensorDimSliceAttr::print directly to
// skip mnemonic.
attr.print(printer);
});
printer << " ]";
printer << " }>";
}

void SparseTensorEncodingAttr::printSymbol(AffineMap &map,
AsmPrinter &printer) const {
if (map.getNumSymbols() != 0) {
yinying-lisa-li marked this conversation as resolved.
Show resolved Hide resolved
printer << '[';
for (unsigned i = 0; i < map.getNumSymbols() - 1; ++i)
printer << 's' << i << ", ";
if (map.getNumSymbols() >= 1)
printer << 's' << map.getNumSymbols() - 1;
printer << ']';
}
}

printer << " }>";
void SparseTensorEncodingAttr::printDimension(
AffineMap &map, AsmPrinter &printer,
ArrayRef<SparseTensorDimSliceAttr> dimSlices) const {
if (!dimSlices.empty()) {
for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
yinying-lisa-li marked this conversation as resolved.
Show resolved Hide resolved
printer << 'd' << i << " : " << dimSlices[i] << ", ";
if (map.getNumDims() >= 1)
printer << 'd' << map.getNumDims() - 1 << " : "
<< dimSlices[map.getNumDims() - 1];
} else {
for (unsigned i = 0; i < map.getNumDims() - 1; ++i)
yinying-lisa-li marked this conversation as resolved.
Show resolved Hide resolved
printer << 'd' << i << ", ";
if (map.getNumDims() >= 1)
printer << 'd' << map.getNumDims() - 1;
}
}

void SparseTensorEncodingAttr::printLevel(
AffineMap &map, AsmPrinter &printer,
ArrayRef<DimLevelType> lvlTypes) const {
for (unsigned i = 0; i < map.getNumResults() - 1; ++i) {
yinying-lisa-li marked this conversation as resolved.
Show resolved Hide resolved
map.getResult(i).print(printer.getStream());
printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
}
if (map.getNumResults() >= 1) {
auto lastIndex = map.getNumResults() - 1;
map.getResult(lastIndex).print(printer.getStream());
printer << " : " << toMLIRString(lvlTypes[lastIndex]);
}
}

LogicalResult
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -472,8 +472,17 @@ class SparseInsertGenerator
llvm::raw_svector_ostream nameOstream(nameBuffer);
nameOstream << kInsertFuncNamePrefix;
const Level lvlRank = stt.getLvlRank();
for (Level l = 0; l < lvlRank; l++)
nameOstream << toMLIRString(stt.getLvlType(l)) << "_";
for (Level l = 0; l < lvlRank; l++) {
std::string lvlType = toMLIRString(stt.getLvlType(l));
// Replace/remove punctuations in level properties.
std::replace_if(
lvlType.begin(), lvlType.end(),
[](char c) { return c == '(' || c == ','; }, '_');
lvlType.erase(std::remove_if(lvlType.begin(), lvlType.end(),
[](char c) { return c == ')' || c == ' '; }),
lvlType.end());
nameOstream << lvlType << "_";
}
// Static dim sizes are used in the generated code while dynamic sizes are
// loaded from the dimSizes buffer. This is the reason for adding the shape
// to the function name.
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/Dialect/SparseTensor/codegen.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
return %1 : tensor<8x8xf64, #CSR>
}

// CHECK-LABEL: func.func private @_insert_dense_compressed_no_8_8_f64_0_0(
// CHECK-LABEL: func.func private @_insert_dense_compressed_nonordered_8_8_f64_0_0(
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
Expand All @@ -533,7 +533,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
// CHECK: %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], %[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
// CHECK: %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : memref<?xindex>
// CHECK: %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_no_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_nonordered_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
// CHECK: memref.store %[[A10]], %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
// CHECK: memref.store %[[A9]], %[[A5]]{{\[}}%[[A19]]] : memref<?xi1>
// CHECK: scf.yield %[[A21]]#0, %[[A21]]#1, %[[A21]]#2, %[[A21]]#3 : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
Expand Down Expand Up @@ -611,7 +611,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
return %1 : tensor<128xf64, #SparseVector>
}

// CHECK-LABEL: func.func private @_insert_compressed_nu_singleton_5_6_f64_0_0(
// CHECK-LABEL: func.func private @_insert_compressed_nonunique_singleton_5_6_f64_0_0(
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
Expand All @@ -627,7 +627,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
// CHECK-SAME: %[[A3:.*3]]: !sparse_tensor.storage_specifier
// CHECK-SAME: %[[A4:.*4]]: index,
// CHECK-SAME: %[[A5:.*5]]: f64)
// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nu_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nonunique_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
// CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3
func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> {
%0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo>
Expand Down
32 changes: 16 additions & 16 deletions mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// RUN: mlir-opt %s -split-input-file | mlir-opt | FileCheck %s

// CHECK-LABEL: func private @sparse_1d_tensor(
// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>)
// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>>)
func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>>)

// -----
Expand All @@ -13,7 +13,7 @@ func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ map
}>

// CHECK-LABEL: func private @sparse_csr(
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], posWidth = 64, crdWidth = 64 }>>)
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed), posWidth = 64, crdWidth = 64 }>>)
func.func private @sparse_csr(tensor<?x?xf32, #CSR>)

// -----
Expand All @@ -23,7 +23,7 @@ func.func private @sparse_csr(tensor<?x?xf32, #CSR>)
}>

// CHECK-LABEL: func private @CSR_explicit(
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>>
func.func private @CSR_explicit(%arg0: tensor<?x?xf64, #CSR_explicit>) {
return
}
Expand All @@ -37,7 +37,7 @@ func.func private @CSR_explicit(%arg0: tensor<?x?xf64, #CSR_explicit>) {
}>

// CHECK-LABEL: func private @sparse_csc(
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>>)
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed) }>>)
func.func private @sparse_csc(tensor<?x?xf32, #CSC>)

// -----
Expand All @@ -49,7 +49,7 @@ func.func private @sparse_csc(tensor<?x?xf32, #CSC>)
}>

// CHECK-LABEL: func private @sparse_dcsc(
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>, crdWidth = 64 }>>)
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : compressed, d0 : compressed), crdWidth = 64 }>>)
func.func private @sparse_dcsc(tensor<?x?xf32, #DCSC>)

// -----
Expand All @@ -59,7 +59,7 @@ func.func private @sparse_dcsc(tensor<?x?xf32, #DCSC>)
}>

// CHECK-LABEL: func private @sparse_coo(
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu_no", "singleton_no" ] }>>)
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered)) }>>)
func.func private @sparse_coo(tensor<?x?xf32, #COO>)

// -----
Expand All @@ -69,7 +69,7 @@ func.func private @sparse_coo(tensor<?x?xf32, #COO>)
}>

// CHECK-LABEL: func private @sparse_bcoo(
// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "loose_compressed_nu", "singleton" ] }>>)
// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton) }>>)
func.func private @sparse_bcoo(tensor<?x?x?xf32, #BCOO>)

// -----
Expand All @@ -79,7 +79,7 @@ func.func private @sparse_bcoo(tensor<?x?x?xf32, #BCOO>)
}>

// CHECK-LABEL: func private @sparse_sorted_coo(
// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>)
// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }>>)
func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)

// -----
Expand All @@ -94,7 +94,7 @@ func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
}>

// CHECK-LABEL: func private @sparse_bcsr(
// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>>
func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>)


Expand All @@ -105,7 +105,7 @@ func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>)
}>

// CHECK-LABEL: func private @sparse_ell(
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ], dimToLvl = affine_map<(d0, d1)[s0] -> (d0 * (s0 * 4), d0, d1)> }>>
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed) }>>
func.func private @sparse_ell(tensor<?x?xf64, #ELL>)

// -----
Expand All @@ -115,7 +115,7 @@ func.func private @sparse_ell(tensor<?x?xf64, #ELL>)
}>

// CHECK-LABEL: func private @sparse_slice(
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed) }>>
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)

// -----
Expand All @@ -125,7 +125,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
}>

// CHECK-LABEL: func private @sparse_slice(
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, ?, 1), (?, 4, 2) ] }>>
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor<slice(1, ?, 1)>, d1 : #sparse_tensor<slice(?, 4, 2)>) -> (d0 : dense, d1 : compressed) }>>
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)

// -----
Expand All @@ -138,7 +138,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
}>

// CHECK-LABEL: func private @sparse_2_out_of_4(
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed24" ] }>>
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : block2_4) }>>
func.func private @sparse_2_out_of_4(tensor<?x?xf64, #NV_24>)

// -----
Expand All @@ -153,7 +153,7 @@ func.func private @sparse_2_out_of_4(tensor<?x?xf64, #NV_24>)
}>

// CHECK-LABEL: func private @BCSR(
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>>
func.func private @BCSR(%arg0: tensor<?x?xf64, #BCSR>) {
return
}
Expand All @@ -174,7 +174,7 @@ func.func private @BCSR(%arg0: tensor<?x?xf64, #BCSR>) {
}>

// CHECK-LABEL: func private @BCSR_explicit(
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>>
func.func private @BCSR_explicit(%arg0: tensor<?x?xf64, #BCSR_explicit>) {
return
}
Expand All @@ -190,7 +190,7 @@ func.func private @BCSR_explicit(%arg0: tensor<?x?xf64, #BCSR_explicit>) {
}>

// CHECK-LABEL: func private @NV_24(
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed24" ], dimToLvl = affine_map<(d0, d1) -> (d0, d1 floordiv 4, d1 mod 4)> }>>
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 floordiv 4 : dense, d1 mod 4 : block2_4) }>>
func.func private @NV_24(%arg0: tensor<?x?xf64, #NV_24>) {
return
}
8 changes: 4 additions & 4 deletions mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
// CHECK-ROUND: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
// CHECK-LABEL: func.func @sparse_expand(
// CHECK-SAME: %[[S:.*]]:
// CHECK-SAME: %[[S:[a-zA-Z0-9_]*]]:
yinying-lisa-li marked this conversation as resolved.
Show resolved Hide resolved
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
Expand Down Expand Up @@ -53,7 +53,7 @@ func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10x
// CHECK-ROUND: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
// CHECK-LABEL: func.func @sparse_collapse(
// CHECK-SAME: %[[S:.*]]:
// CHECK-SAME: %[[S:[a-zA-Z0-9_]*]]:
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
Expand Down Expand Up @@ -99,7 +99,7 @@ func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<10
// CHECK-ROUND: return %[[E]] : tensor<?x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
// CHECK-LABEL: func.func @dynamic_sparse_expand(
// CHECK-SAME: %[[S:.*]]:
// CHECK-SAME: %[[S:[a-zA-Z0-9_]*]]:
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
Expand Down Expand Up @@ -142,7 +142,7 @@ func.func @dynamic_sparse_expand(%arg0: tensor<?xf64, #SparseVector>) -> tensor<
// CHECK-ROUND: return %[[C]] : tensor<?xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
// CHECK-LABEL: func.func @dynamic_sparse_collapse(
// CHECK-SAME: %[[S:.*]]:
// CHECK-SAME: %[[S:[a-zA-Z0-9_]*]]:
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }>

// CHECK: func.func @sparse_reshape(
// CHECK-SAME: %[[S:.*]]:
// CHECK-SAME: %[[S:[a-zA-Z0-9_]*]]:
// CHECK-DAG: %[[C25:.*]] = arith.constant 25 : index
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
Expand Down
Loading
Loading