Skip to content

Commit

Permalink
Integrate llvm-project at 9816fada1667ecc3343d285295c95848826693af (#…
Browse files Browse the repository at this point in the history
…8612)

* Reset third_party/llvm-project: 9816fada1667ecc3343d285295c95848826693af (2022-03-18 10:50:55 +0100): [bazel] Port a954ade8ed41
  * Cherry-picked llvm/llvm-project@3b74aac additionally to fix bufferization issues
* TensorFlow commit: c9ddfac348b1f423e1463ae78b046514a8b03a48
  * Cherry-picked tensorflow/tensorflow@9642bbd additionally to fix leftover `dump()` calls for debugging
* MHLO commit: 467cd37703dc0c4195ce6351617ef320bb60e927
* Updated external model registration
* Updated FuncOp usages
  * The attribute carrying type information now should be `function_type`, instead of `type`.
  * `mlir::FuncOp` is now in `mlir::func::FuncOp`.
  * `builtin.func` in assembly should be `func.func` now.
* Updated MHLO attribute assembly for `comparison_direction`, `precision`, etc.
* Fixed TFL/StripMetadata.cpp to avoid dropping `tf_saved_model` attributes for ABI information
  • Loading branch information
antiagainst authored Mar 23, 2022
1 parent 5e5cbd4 commit 9240f1c
Show file tree
Hide file tree
Showing 207 changed files with 885 additions and 767 deletions.
2 changes: 1 addition & 1 deletion integrations/tensorflow/WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

TENSORFLOW_COMMIT = "05f17fca35623f4ab6d275ed95f0e1363c939f73"
TENSORFLOW_COMMIT = "9d9e78f6735a933b42febaafe2f303eec25f70ee"

git_repository(
name = "org_tensorflow",
Expand Down
1 change: 1 addition & 0 deletions integrations/tensorflow/iree-dialects/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -631,6 +631,7 @@ cc_library(
"@llvm-project//mlir:ArithmeticDialect",
"@llvm-project//mlir:BufferizationDialect",
"@llvm-project//mlir:BufferizationTransforms",
"@llvm-project//mlir:FuncDialect",
"@llvm-project//mlir:LinalgOps",
"@llvm-project//mlir:LLVMDialect",
"@llvm-project//mlir:PDLDialect",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#ifndef IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASS_DETAIL_H_
#define IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASS_DETAIL_H_

#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"

namespace mlir {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,17 @@
#ifndef IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASSES_H_
#define IREE_DIALECTS_DIALECT_LINALGEXT_TRANSFORMS_PASSES_H_

#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/Pass.h"

namespace mlir {
namespace iree_compiler {
namespace IREE {
namespace LinalgExt {

std::unique_ptr<OperationPass<FuncOp>> createTiledOpInterfaceTilingPass();
std::unique_ptr<OperationPass<func::FuncOp>> createTiledOpInterfaceTilingPass();

std::unique_ptr<OperationPass<FuncOp>> createLinalgExtToLoopsPass();
std::unique_ptr<OperationPass<func::FuncOp>> createLinalgExtToLoopsPass();

std::unique_ptr<OperationPass<>> createPadContractionToBlockSizePass();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@
include "mlir/Pass/PassBase.td"

def LinalgExtToLoops :
Pass<"iree-linalg-ext-to-loops", "FuncOp"> {
Pass<"iree-linalg-ext-to-loops", "func::FuncOp"> {
let summary = "Convert LinalgExt ops to loops and Linalg ops.";
let constructor = "mlir::iree_compiler::IREE::LinalgExt::createLinalgExtToLoopsPass()";
}

def TiledOpInterfaceTiling :
Pass<"iree-linalg-ext-tile", "FuncOp"> {
Pass<"iree-linalg-ext-tile", "func::FuncOp"> {
let summary = "Test pass for tiling using TiledOpInterface";
let constructor = "mlir::iree_compiler::IREE::LinalgExt::createTiledOpInterfaceTilingPass()";
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.h"
#include "iree-dialects/Dialect/LinalgTransform/TrackingListener.h"
#include "iree-dialects/Dialect/LinalgTransform/TransformOpInterface.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/PDL/IR/PDLTypes.h"
#include "mlir/IR/BuiltinAttributes.h"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,12 @@
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/FunctionInterfaces.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/Interfaces/CallInterfaces.h"
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/SideEffectInterfaces.h"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def IREEPyDM_FuncOp : IREEPyDM_Op<"func", [
}];

let arguments = (ins SymbolNameAttr:$sym_name,
TypeAttr:$type,
TypeAttr:$function_type,
OptionalAttr<StrArrayAttr>:$arg_names,
OptionalAttr<StrArrayAttr>:$free_vars,
OptionalAttr<StrArrayAttr>:$cell_vars,
Expand All @@ -143,21 +143,21 @@ def IREEPyDM_FuncOp : IREEPyDM_Op<"func", [
}

/// Returns the type of this function.
FunctionType getType() {
FunctionType getFunctionType() {
return getOperation()->getAttrOfType<TypeAttr>(getTypeAttrName())
.getValue()
.cast<FunctionType>();
}

/// Returns the argument types of this function.
ArrayRef<Type> getArgumentTypes() { return getType().getInputs(); }
ArrayRef<Type> getArgumentTypes() { return getFunctionType().getInputs(); }

/// Returns the result types of this function.
ArrayRef<Type> getResultTypes() { return getType().getResults(); }
ArrayRef<Type> getResultTypes() { return getFunctionType().getResults(); }

/// Returns the python return type of the function (second return type).
Type getPyReturnType() {
return getType().getResult(1);
return getFunctionType().getResult(1);
}

/// Hook for Trait::FunctionLike, called after verifying that the 'type'
Expand All @@ -167,7 +167,7 @@ def IREEPyDM_FuncOp : IREEPyDM_Op<"func", [

Region *getCallableRegion() { return &body(); }
ArrayRef<Type> getCallableResults() {
return getType().getResults();
return getFunctionType().getResults();
}

/// Defines SymbolOpInterface::isDeclaration().
Expand All @@ -177,8 +177,8 @@ def IREEPyDM_FuncOp : IREEPyDM_Op<"func", [
}];

let builders = [
OpBuilder<(ins "StringAttr":$name, "FunctionType":$type), [{
build($_builder, $_state, name, TypeAttr::get(type),
OpBuilder<(ins "StringAttr":$name, "FunctionType":$function_type), [{
build($_builder, $_state, name, TypeAttr::get(function_type),
nullptr, nullptr, nullptr, nullptr);
}]>
];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -305,9 +305,12 @@ void IREE::LinalgExt::registerTiledOpInterfaceExternalModels(
DialectRegistry &registry) {
LLVM_DEBUG(
{ llvm::dbgs() << "Adding external models of tiled op interface\n"; });
registry
.addOpInterface<tensor::ExtractSliceOp, ExtractSliceTiledOpInterface>();
registry.addOpInterface<tensor::InsertSliceOp, InsertSliceTiledOpInterface>();

registry.addExtension(+[](MLIRContext *ctx, tensor::TensorDialect *dialect) {
tensor::ExtractSliceOp::attachInterface<ExtractSliceTiledOpInterface>(*ctx);
tensor::InsertSliceOp::attachInterface<InsertSliceTiledOpInterface>(*ctx);
});

// TODO(ravishankarm): Needs custom PadTiledOpInterface or equiv.
// registry.addOpInterface<tensor::PadOp,
// ForwardToTilingInterface<tensor::PadOp>>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@

#include "iree-dialects/Dialect/LinalgExt/LinalgExtBufferization.h"

#include <mlir/IR/BuiltinOps.h>

#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtDialect.h"
#include "iree-dialects/Dialect/LinalgExt/IR/LinalgExtOps.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/PatternMatch.h"

using namespace mlir;
Expand All @@ -22,7 +22,6 @@ using bufferization::BufferizationState;
using bufferization::BufferRelation;
using bufferization::getMemRefType;
using bufferization::replaceOpWithBufferizedValues;
using bufferization::replaceOpWithNewBufferizedOp;
using tensor::ExtractSliceOp;

/// Return the destinations that an InParallelOp is inserting into. One per
Expand Down Expand Up @@ -341,9 +340,12 @@ struct ParallelInsertSliceOpInterface

void mlir::iree_compiler::IREE::LinalgExt::
registerBufferizableOpInterfaceExternalModels(DialectRegistry &registry) {
registry.addOpInterface<InParallelOp, InParallelOpInterface>();
registry
.addOpInterface<PerformConcurrentlyOp, PerformConcurrentlyOpInterface>();
registry
.addOpInterface<ParallelInsertSliceOp, ParallelInsertSliceOpInterface>();
registry.addExtension(
+[](MLIRContext *ctx, LinalgExt::IREELinalgExtDialect *dialect) {
InParallelOp::attachInterface<InParallelOpInterface>(*ctx);
PerformConcurrentlyOp::attachInterface<PerformConcurrentlyOpInterface>(
*ctx);
ParallelInsertSliceOp::attachInterface<ParallelInsertSliceOpInterface>(
*ctx);
});
}
Original file line number Diff line number Diff line change
Expand Up @@ -157,23 +157,25 @@ struct LinalgOpTilingInterface
} // namespace

template <typename OpType>
void registerOne(DialectRegistry &registry) {
registry.addOpInterface<OpType, LinalgOpTilingInterface<OpType>>();
void registerOne(MLIRContext *ctx) {
OpType::template attachInterface<LinalgOpTilingInterface<OpType>>(*ctx);
}

/// Variadic helper function.
template <typename... OpTypes>
void registerAll(DialectRegistry &registry) {
void registerAll(MLIRContext *ctx) {
// FIXME: In c++17 this can be simplified by using 'fold expressions'.
(void)std::initializer_list<int>{0, (registerOne<OpTypes>(registry), 0)...};
(void)std::initializer_list<int>{0, (registerOne<OpTypes>(ctx), 0)...};
}

#define GET_OP_LIST

void mlir::iree_compiler::IREE::LinalgExt::
registerTilingInterfaceExternalModels(DialectRegistry &registry) {
registerOne<linalg::GenericOp>(registry);
registerAll<
registry.addExtension(+[](MLIRContext *ctx, linalg::LinalgDialect *dialect) {
registerOne<linalg::GenericOp>(ctx);
registerAll<
#include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
>(registry);
>(ctx);
});
}
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.h"
#include "mlir/Dialect/Linalg/Passes.h"
Expand Down Expand Up @@ -581,7 +582,7 @@ LogicalResult transform::BufferizeOp::apply(transform::TransformResults &result,

// Perform buffer-level hoistings.
state.getTopLevel()->walk(
[&](FuncOp funcOp) { hoistRedundantVectorTransfers(funcOp); });
[&](func::FuncOp funcOp) { hoistRedundantVectorTransfers(funcOp); });
return success();
}

Expand All @@ -597,8 +598,8 @@ transform::LowerToLLVMOp::apply(transform::TransformResults &result,
// the end. Keep module-level for now.
PassManager pm(getContext());

pm.addNestedPass<FuncOp>(createConvertVectorToSCFPass());
pm.addNestedPass<FuncOp>(createConvertLinalgToLoopsPass());
pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
if (enable_async()) {
pm.addPass(createAsyncToAsyncRuntimePass());
pm.addPass(createAsyncRuntimeRefCountingPass());
Expand All @@ -618,7 +619,7 @@ transform::LowerToLLVMOp::apply(transform::TransformResults &result,
.enableAMX(enable_amx())
.enableX86Vector(enable_x86vector())));
// clang-format on
pm.addNestedPass<FuncOp>(createConvertMathToLLVMPass());
pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
pm.addPass(createMemRefToLLVMPass());
if (enable_async())
pm.addPass(createConvertAsyncToLLVMPass());
Expand All @@ -631,7 +632,9 @@ transform::LowerToLLVMOp::apply(transform::TransformResults &result,
// FIXME: this is a terrible hack!
state.getTopLevel()->walk([](LLVM::LLVMFuncOp funcOp) {
for (int64_t i = 0; i < funcOp.getNumArguments(); ++i) {
if (!funcOp.getType().getParamType(i).isa<LLVM::LLVMPointerType>())
if (!funcOp.getFunctionType()
.getParamType(i)
.isa<LLVM::LLVMPointerType>())
continue;
funcOp.setArgAttr(i, "llvm.noalias", UnitAttr::get(funcOp.getContext()));
}
Expand Down Expand Up @@ -760,15 +763,15 @@ static scf::ExecuteRegionOp outlineInExecuteRegion(RewriterBase &b,
return executeRegionOp;
}

static FailureOr<FuncOp> outlineLoop(scf::ForOp loop, StringRef funcName,
transform::TransformState &state) {
static FailureOr<func::FuncOp> outlineLoop(scf::ForOp loop, StringRef funcName,
transform::TransformState &state) {
PatternRewriterListener rewriter(loop->getContext());
auto &listener = state.getExtension<TrackingListener>();
rewriter.addListener(&listener);
Location loc = loop.getLoc();
scf::ExecuteRegionOp exec = outlineInExecuteRegion(rewriter, loop);
assert(exec && "failed to produce execute_region");
FailureOr<FuncOp> outlined =
FailureOr<func::FuncOp> outlined =
outlineSingleBlockRegion(rewriter, loc, exec.getRegion(), funcName);
if (failed(listener.checkErrorState()))
return failure();
Expand All @@ -781,7 +784,7 @@ transform::OutlineLoopOp::apply(transform::TransformResults &results,
SmallVector<Operation *> resultVector;
auto res =
applyTransformToEach(state.getPayloadOps(target()), resultVector,
[&](scf::ForOp loop) -> FailureOr<FuncOp> {
[&](scf::ForOp loop) -> FailureOr<func::FuncOp> {
return outlineLoop(loop, func_name(), state);
});
if (failed(res))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -567,9 +567,7 @@ ParseResult PyFuncOp::parse(OpAsmParser &parser, OperationState &result) {
}

void PyFuncOp::print(OpAsmPrinter &p) {
FunctionType fnType = getType();
function_interface_impl::printFunctionOp(
p, *this, fnType.getInputs(), /*isVariadic=*/false, fnType.getResults());
function_interface_impl::printFunctionOp(p, *this, /*isVariadic=*/false);
}

//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -764,7 +762,7 @@ LogicalResult PyCallOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
<< "' does not reference a valid function";

// Verify that the operand and result types match the callee.
auto fnType = fn.getType();
auto fnType = fn.getFunctionType();
if (fnType.getNumInputs() != getNumOperands())
return emitOpError("incorrect number of operands for callee");

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ struct FixateWeakNumericPass

// Special cases for operations.
if (auto funcOp = llvm::dyn_cast<PYDM::FuncOp>(op)) {
FunctionType existingFt = funcOp.getType();
FunctionType existingFt = funcOp.getFunctionType();
FunctionType newFt = convertFunctionType(existingFt);
if (newFt != existingFt) {
funcOp.setType(newFt);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,7 @@ class FuncOpConversion : public OpConversionPattern<PYDM::FuncOp> {
LogicalResult
matchAndRewrite(PYDM::FuncOp srcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
FunctionType srcFuncType = srcOp.getType();
FunctionType srcFuncType = srcOp.getFunctionType();
TypeConverter::SignatureConversion signatureConversion(
srcOp.getNumArguments());

Expand Down Expand Up @@ -839,7 +839,7 @@ class RaiseOnFailureOpConversion
auto parentFunc = srcOp->getParentOfType<mlir::FuncOp>();
if (!parentFunc)
return rewriter.notifyMatchFailure(srcOp, "not contained by a func");
Type convertedReturnType = parentFunc.getType().getResult(1);
Type convertedReturnType = parentFunc.getFunctionType().getResult(1);

// Split the entry block.
Block *entryBlock = rewriter.getInsertionBlock();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def body(self):

@property
def type(self):
return ir.FunctionType(ir.TypeAttr(self.attributes["type"]).value)
return ir.FunctionType(ir.TypeAttr(self.attributes["function_type"]).value)

@property
def py_return_type(self) -> ir.Type:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def import_global_function(self,
context=ic.context)
f_op = d.FuncOp(
ir.StringAttr.get(symbol),
type=ir.TypeAttr.get(ir_f_type),
function_type=ir.TypeAttr.get(ir_f_type),
arg_names=f_arg_names,
free_vars=f_var_names,
cell_vars=ir.ArrayAttr.get([]),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ func @nested_isolated() -> i32 {
%0 = arith.constant 1 : i32

// CHECK-NEXT: @nested_func
builtin.func @nested_func() {
func.func @nested_func() {
// CHECK-NEXT: arith.constant 1
%foo = arith.constant 1 : i32
"foo.yield"(%foo) : (i32) -> ()
Expand Down
2 changes: 1 addition & 1 deletion integrations/tensorflow/iree_tf_compiler/MHLO/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ void buildMHLOImportPassPipeline(OpPassManager &pm) {

// Import pipelines should end with canonicalization because they may have
// access to dialects and patterns that the core compiler does not.
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<func::FuncOp>(mlir::createCanonicalizerPass());
}

void registerMHLOImportPassPipeline() {
Expand Down
Loading

0 comments on commit 9240f1c

Please sign in to comment.