diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h index bc351ec52c0946..2920ef79f461c6 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h @@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) { case DimLevelType::Compressed: return "compressed"; case DimLevelType::CompressedNu: - return "compressed_nu"; + return "compressed(nonunique)"; case DimLevelType::CompressedNo: - return "compressed_no"; + return "compressed(nonordered)"; case DimLevelType::CompressedNuNo: - return "compressed_nu_no"; + return "compressed(nonunique, nonordered)"; case DimLevelType::Singleton: return "singleton"; case DimLevelType::SingletonNu: - return "singleton_nu"; + return "singleton(nonunique)"; case DimLevelType::SingletonNo: - return "singleton_no"; + return "singleton(nonordered)"; case DimLevelType::SingletonNuNo: - return "singleton_nu_no"; + return "singleton(nonunique, nonordered)"; case DimLevelType::LooseCompressed: return "loose_compressed"; case DimLevelType::LooseCompressedNu: - return "loose_compressed_nu"; + return "loose_compressed(nonunique)"; case DimLevelType::LooseCompressedNo: - return "loose_compressed_no"; + return "loose_compressed(nonordered)"; case DimLevelType::LooseCompressedNuNo: - return "loose_compressed_nu_no"; + return "loose_compressed(nonunique, nonordered)"; case DimLevelType::TwoOutOfFour: - return "compressed24"; + return "block2_4"; } return ""; } diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td index 4e38f314a27391..cacc8176c67824 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -422,6 +422,14 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", std::optional getStaticLvlSliceOffset(::mlir::sparse_tensor::Level lvl) const; std::optional getStaticLvlSliceSize(::mlir::sparse_tensor::Level lvl) const; std::optional getStaticLvlSliceStride(::mlir::sparse_tensor::Level lvl) const; + + // + // Printing methods. + // + + void printSymbols(AffineMap &map, AsmPrinter &printer) const; + void printDimensions(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr> dimSlices) const; + void printLevels(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::DimLevelType> lvlTypes) const; }]; let genVerifyDecl = 1; diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp index af262cef6d1d5e..96ed5f13b9d9ec 100644 --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -586,31 +586,67 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) { } void SparseTensorEncodingAttr::print(AsmPrinter &printer) const { - // Print the struct-like storage in dictionary fashion. - printer << "<{ lvlTypes = [ "; - llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) { - printer << "\"" << toMLIRString(dlt) << "\""; - }); - printer << " ]"; + auto map = static_cast(getDimToLvl()); + // Empty affine map indicates identity map + if (!map) + map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), getContext()); + printer << "<{ map = "; + printSymbols(map, printer); + printer << '('; + printDimensions(map, printer, getDimSlices()); + printer << ") -> ("; + printLevels(map, printer, getLvlTypes()); + printer << ')'; // Print remaining members only for non-default values. - if (!isIdentity()) - printer << ", dimToLvl = affine_map<" << getDimToLvl() << ">"; if (getPosWidth()) printer << ", posWidth = " << getPosWidth(); if (getCrdWidth()) printer << ", crdWidth = " << getCrdWidth(); - if (!getDimSlices().empty()) { - printer << ", dimSlices = [ "; - llvm::interleaveComma(getDimSlices(), printer, - [&](SparseTensorDimSliceAttr attr) { - // Calls SparseTensorDimSliceAttr::print directly to - // skip mnemonic. - attr.print(printer); - }); - printer << " ]"; + printer << " }>"; +} + +void SparseTensorEncodingAttr::printSymbols(AffineMap &map, + AsmPrinter &printer) const { + if (map.getNumSymbols() == 0) + return; + printer << '['; + for (unsigned i = 0, n = map.getNumSymbols() - 1; i < n; i++) + printer << 's' << i << ", "; + if (map.getNumSymbols() >= 1) + printer << 's' << map.getNumSymbols() - 1; + printer << ']'; +} + +void SparseTensorEncodingAttr::printDimensions( + AffineMap &map, AsmPrinter &printer, + ArrayRef dimSlices) const { + if (!dimSlices.empty()) { + for (unsigned i = 0, n = map.getNumDims() - 1; i < n; i++) + printer << 'd' << i << " : " << dimSlices[i] << ", "; + if (map.getNumDims() >= 1) { + printer << 'd' << map.getNumDims() - 1 << " : " + << dimSlices[map.getNumDims() - 1]; + } + } else { + for (unsigned i = 0, n = map.getNumDims() - 1; i < n; i++) + printer << 'd' << i << ", "; + if (map.getNumDims() >= 1) + printer << 'd' << map.getNumDims() - 1; } +} - printer << " }>"; +void SparseTensorEncodingAttr::printLevels( + AffineMap &map, AsmPrinter &printer, + ArrayRef lvlTypes) const { + for (unsigned i = 0, n = map.getNumResults() - 1; i < n; i++) { + map.getResult(i).print(printer.getStream()); + printer << " : " << toMLIRString(lvlTypes[i]) << ", "; + } + if (map.getNumResults() >= 1) { + auto lastIndex = map.getNumResults() - 1; + map.getResult(lastIndex).print(printer.getStream()); + printer << " : " << toMLIRString(lvlTypes[lastIndex]); + } } LogicalResult diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp index 41b88fe4c9554c..7c362c086623b4 100644 --- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp +++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp @@ -472,8 +472,17 @@ class SparseInsertGenerator llvm::raw_svector_ostream nameOstream(nameBuffer); nameOstream << kInsertFuncNamePrefix; const Level lvlRank = stt.getLvlRank(); - for (Level l = 0; l < lvlRank; l++) - nameOstream << toMLIRString(stt.getLvlType(l)) << "_"; + for (Level l = 0; l < lvlRank; l++) { + std::string lvlType = toMLIRString(stt.getLvlType(l)); + // Replace/remove punctuations in level properties. + std::replace_if( + lvlType.begin(), lvlType.end(), + [](char c) { return c == '(' || c == ','; }, '_'); + lvlType.erase(std::remove_if(lvlType.begin(), lvlType.end(), + [](char c) { return c == ')' || c == ' '; }), + lvlType.end()); + nameOstream << lvlType << "_"; + } // Static dim sizes are used in the generated code while dynamic sizes are // loaded from the dimSizes buffer. This is the reason for adding the shape // to the function name. diff --git a/mlir/test/Dialect/SparseTensor/codegen.mlir b/mlir/test/Dialect/SparseTensor/codegen.mlir index 9af5a46deadc9d..4ac768c21aff8f 100644 --- a/mlir/test/Dialect/SparseTensor/codegen.mlir +++ b/mlir/test/Dialect/SparseTensor/codegen.mlir @@ -507,7 +507,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>, return %1 : tensor<8x8xf64, #CSR> } -// CHECK-LABEL: func.func private @_insert_dense_compressed_no_8_8_f64_0_0( +// CHECK-LABEL: func.func private @_insert_dense_compressed_nonordered_8_8_f64_0_0( // CHECK-SAME: %[[A1:.*0]]: memref, // CHECK-SAME: %[[A2:.*1]]: memref, // CHECK-SAME: %[[A3:.*2]]: memref, @@ -533,7 +533,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>, // CHECK: %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], %[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref, memref, memref, !sparse_tensor.storage_specifier // CHECK: %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : memref // CHECK: %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : memref -// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_no_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref, memref, memref, !sparse_tensor.storage_specifier +// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_nonordered_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref, memref, memref, !sparse_tensor.storage_specifier // CHECK: memref.store %[[A10]], %[[A4]]{{\[}}%[[A19]]] : memref // CHECK: memref.store %[[A9]], %[[A5]]{{\[}}%[[A19]]] : memref // CHECK: scf.yield %[[A21]]#0, %[[A21]]#1, %[[A21]]#2, %[[A21]]#3 : memref, memref, memref, !sparse_tensor.storage_specifier @@ -611,7 +611,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind return %1 : tensor<128xf64, #SparseVector> } -// CHECK-LABEL: func.func private @_insert_compressed_nu_singleton_5_6_f64_0_0( +// CHECK-LABEL: func.func private @_insert_compressed_nonunique_singleton_5_6_f64_0_0( // CHECK-SAME: %[[A1:.*0]]: memref, // CHECK-SAME: %[[A2:.*1]]: memref, // CHECK-SAME: %[[A3:.*2]]: memref, @@ -627,7 +627,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind // CHECK-SAME: %[[A3:.*3]]: !sparse_tensor.storage_specifier // CHECK-SAME: %[[A4:.*4]]: index, // CHECK-SAME: %[[A5:.*5]]: f64) -// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nu_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]]) +// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nonunique_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]]) // CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3 func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> { %0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo> diff --git a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir index 39e3ef10242352..c4ef50bee01ea2 100644 --- a/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir @@ -1,7 +1,7 @@ // RUN: mlir-opt %s -split-input-file | mlir-opt | FileCheck %s // CHECK-LABEL: func private @sparse_1d_tensor( -// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>) +// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>>) func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>>) // ----- @@ -13,7 +13,7 @@ func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ map }> // CHECK-LABEL: func private @sparse_csr( -// CHECK-SAME: tensor>) +// CHECK-SAME: tensor (d0 : dense, d1 : compressed), posWidth = 64, crdWidth = 64 }>>) func.func private @sparse_csr(tensor) // ----- @@ -23,7 +23,7 @@ func.func private @sparse_csr(tensor) }> // CHECK-LABEL: func private @CSR_explicit( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor (d0 : dense, d1 : compressed) }>> func.func private @CSR_explicit(%arg0: tensor) { return } @@ -37,7 +37,7 @@ func.func private @CSR_explicit(%arg0: tensor) { }> // CHECK-LABEL: func private @sparse_csc( -// CHECK-SAME: tensor (d1, d0)> }>>) +// CHECK-SAME: tensor (d1 : dense, d0 : compressed) }>>) func.func private @sparse_csc(tensor) // ----- @@ -49,7 +49,7 @@ func.func private @sparse_csc(tensor) }> // CHECK-LABEL: func private @sparse_dcsc( -// CHECK-SAME: tensor (d1, d0)>, crdWidth = 64 }>>) +// CHECK-SAME: tensor (d1 : compressed, d0 : compressed), crdWidth = 64 }>>) func.func private @sparse_dcsc(tensor) // ----- @@ -59,7 +59,7 @@ func.func private @sparse_dcsc(tensor) }> // CHECK-LABEL: func private @sparse_coo( -// CHECK-SAME: tensor>) +// CHECK-SAME: tensor (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered)) }>>) func.func private @sparse_coo(tensor) // ----- @@ -69,7 +69,7 @@ func.func private @sparse_coo(tensor) }> // CHECK-LABEL: func private @sparse_bcoo( -// CHECK-SAME: tensor>) +// CHECK-SAME: tensor (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton) }>>) func.func private @sparse_bcoo(tensor) // ----- @@ -79,7 +79,7 @@ func.func private @sparse_bcoo(tensor) }> // CHECK-LABEL: func private @sparse_sorted_coo( -// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>) +// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }>>) func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>) // ----- @@ -94,7 +94,7 @@ func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>) }> // CHECK-LABEL: func private @sparse_bcsr( -// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>> +// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>> func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>) @@ -105,7 +105,7 @@ func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>) }> // CHECK-LABEL: func private @sparse_ell( -// CHECK-SAME: tensor (d0 * (s0 * 4), d0, d1)> }>> +// CHECK-SAME: tensor (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed) }>> func.func private @sparse_ell(tensor) // ----- @@ -115,7 +115,7 @@ func.func private @sparse_ell(tensor) }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed) }>> func.func private @sparse_slice(tensor) // ----- @@ -125,7 +125,7 @@ func.func private @sparse_slice(tensor) }> // CHECK-LABEL: func private @sparse_slice( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor, d1 : #sparse_tensor) -> (d0 : dense, d1 : compressed) }>> func.func private @sparse_slice(tensor) // ----- @@ -138,7 +138,7 @@ func.func private @sparse_slice(tensor) }> // CHECK-LABEL: func private @sparse_2_out_of_4( -// CHECK-SAME: tensor> +// CHECK-SAME: tensor (d0 : dense, d1 : block2_4) }>> func.func private @sparse_2_out_of_4(tensor) // ----- @@ -153,7 +153,7 @@ func.func private @sparse_2_out_of_4(tensor) }> // CHECK-LABEL: func private @BCSR( -// CHECK-SAME: tensor (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>> +// CHECK-SAME: tensor (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>> func.func private @BCSR(%arg0: tensor) { return } @@ -174,7 +174,7 @@ func.func private @BCSR(%arg0: tensor) { }> // CHECK-LABEL: func private @BCSR_explicit( -// CHECK-SAME: tensor (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>> +// CHECK-SAME: tensor (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>> func.func private @BCSR_explicit(%arg0: tensor) { return } @@ -190,7 +190,7 @@ func.func private @BCSR_explicit(%arg0: tensor) { }> // CHECK-LABEL: func private @NV_24( -// CHECK-SAME: tensor (d0, d1 floordiv 4, d1 mod 4)> }>> +// CHECK-SAME: tensor (d0 : dense, d1 floordiv 4 : dense, d1 mod 4 : block2_4) }>> func.func private @NV_24(%arg0: tensor) { return } diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir index 7f8edac1530261..543fcaededf496 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir @@ -16,7 +16,7 @@ // CHECK-ROUND: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> // // CHECK-LABEL: func.func @sparse_expand( -// CHECK-SAME: %[[S:.*]]: +// CHECK-SAME: %[[S:.*0]]: // CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index @@ -53,7 +53,7 @@ func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10x // CHECK-ROUND: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> // // CHECK-LABEL: func.func @sparse_collapse( -// CHECK-SAME: %[[S:.*]]: +// CHECK-SAME: %[[S:.*0]]: // CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index @@ -99,7 +99,7 @@ func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<10 // CHECK-ROUND: return %[[E]] : tensor> // // CHECK-LABEL: func.func @dynamic_sparse_expand( -// CHECK-SAME: %[[S:.*]]: +// CHECK-SAME: %[[S:.*0]]: // CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index @@ -142,7 +142,7 @@ func.func @dynamic_sparse_expand(%arg0: tensor) -> tensor< // CHECK-ROUND: return %[[C]] : tensor> // // CHECK-LABEL: func.func @dynamic_sparse_collapse( -// CHECK-SAME: %[[S:.*]]: +// CHECK-SAME: %[[S:.*0]]: // CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index // CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index diff --git a/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir index 9368cc71c5faa4..a1578eb20b8ba3 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir @@ -4,7 +4,7 @@ #SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }> // CHECK: func.func @sparse_reshape( -// CHECK-SAME: %[[S:.*]]: +// CHECK-SAME: %[[S:.*0]]: // CHECK-DAG: %[[C25:.*]] = arith.constant 25 : index // CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index // CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index diff --git a/mlir/test/python/dialects/sparse_tensor/dialect.py b/mlir/test/python/dialects/sparse_tensor/dialect.py index e1048edce184a5..d80b878323377a 100644 --- a/mlir/test/python/dialects/sparse_tensor/dialect.py +++ b/mlir/test/python/dialects/sparse_tensor/dialect.py @@ -21,7 +21,7 @@ def testEncodingAttr1D(): " crdWidth = 32" "}>" ) - # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 16, crdWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 16, crdWidth = 32 }> print(parsed) casted = st.EncodingAttr(parsed) @@ -38,7 +38,7 @@ def testEncodingAttr1D(): print(f"crd_width: {casted.crd_width}") created = st.EncodingAttr.get(casted.lvl_types, None, 0, 0) - # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }> + # CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }> print(created) # CHECK: created_equal: False print(f"created_equal: {created == casted}") @@ -61,7 +61,7 @@ def testEncodingAttr2D(): " crdWidth = 32" "}>" ) - # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed), posWidth = 8, crdWidth = 32 }> print(parsed) casted = st.EncodingAttr(parsed) @@ -77,10 +77,8 @@ def testEncodingAttr2D(): # CHECK: crd_width: 32 print(f"crd_width: {casted.crd_width}") - created = st.EncodingAttr.get( - casted.lvl_types, casted.dim_to_lvl, 8, 32 - ) - # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>, posWidth = 8, crdWidth = 32 }> + created = st.EncodingAttr.get(casted.lvl_types, casted.dim_to_lvl, 8, 32) + # CHECK: #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed), posWidth = 8, crdWidth = 32 }> print(created) # CHECK: created_equal: True print(f"created_equal: {created == casted}") @@ -100,8 +98,8 @@ def testEncodingAttrOnTensorType(): ) ) tt = RankedTensorType.get((1024,), F32Type.get(), encoding=encoding) - # CHECK: tensor<1024xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 64, crdWidth = 32 }>> + # CHECK: tensor<1024xf32, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 64, crdWidth = 32 }>> print(tt) - # CHECK: #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ], posWidth = 64, crdWidth = 32 }> + # CHECK: #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed), posWidth = 64, crdWidth = 32 }> print(tt.encoding) assert tt.encoding == encoding