Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions llvm/include/llvm/CodeGen/GlobalISel/IRTranslator.h
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,10 @@ class IRTranslator : public MachineFunctionPass {
/// \pre \p U is a call instruction.
bool translateCall(const User &U, MachineIRBuilder &MIRBuilder);

bool translateIntrinsic(
const CallBase &CB, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder,
const TargetLowering::IntrinsicInfo *TgtMemIntrinsicInfo = nullptr);

/// When an invoke or a cleanupret unwinds to the next EH pad, there are
/// many places it could ultimately go. In the IR, we have a single unwind
/// destination, but in the machine CFG, we enumerate all the possible blocks.
Expand Down
62 changes: 40 additions & 22 deletions llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2817,20 +2817,34 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
if (translateKnownIntrinsic(CI, ID, MIRBuilder))
return true;

TargetLowering::IntrinsicInfo Info;
bool IsTgtMemIntrinsic = TLI->getTgtMemIntrinsic(Info, CI, *MF, ID);

return translateIntrinsic(CI, ID, MIRBuilder,
IsTgtMemIntrinsic ? &Info : nullptr);
}

/// Translate a call to an intrinsic.
/// Depending on whether TLI->getTgtMemIntrinsic() is true, TgtMemIntrinsicInfo
/// is a pointer to the correspondingly populated IntrinsicInfo object.
/// Otherwise, this pointer is null.
bool IRTranslator::translateIntrinsic(
const CallBase &CB, Intrinsic::ID ID, MachineIRBuilder &MIRBuilder,
const TargetLowering::IntrinsicInfo *TgtMemIntrinsicInfo) {
ArrayRef<Register> ResultRegs;
if (!CI.getType()->isVoidTy())
ResultRegs = getOrCreateVRegs(CI);
if (!CB.getType()->isVoidTy())
ResultRegs = getOrCreateVRegs(CB);

// Ignore the callsite attributes. Backend code is most likely not expecting
// an intrinsic to sometimes have side effects and sometimes not.
MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, ResultRegs);
if (isa<FPMathOperator>(CI))
MIB->copyIRFlags(CI);
if (isa<FPMathOperator>(CB))
MIB->copyIRFlags(CB);

for (const auto &Arg : enumerate(CI.args())) {
for (const auto &Arg : enumerate(CB.args())) {
// If this is required to be an immediate, don't materialize it in a
// register.
if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
if (CB.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
// imm arguments are more convenient than cimm (and realistically
// probably sufficient), so use them.
Expand Down Expand Up @@ -2859,29 +2873,33 @@ bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
}

// Add a MachineMemOperand if it is a target mem intrinsic.
TargetLowering::IntrinsicInfo Info;
// TODO: Add a GlobalISel version of getTgtMemIntrinsic.
if (TLI->getTgtMemIntrinsic(Info, CI, *MF, ID)) {
Align Alignment = Info.align.value_or(
DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
LLT MemTy = Info.memVT.isSimple()
? getLLTForMVT(Info.memVT.getSimpleVT())
: LLT::scalar(Info.memVT.getStoreSizeInBits());
if (TgtMemIntrinsicInfo) {
const Function *F = CB.getCalledFunction();

Align Alignment = TgtMemIntrinsicInfo->align.value_or(DL->getABITypeAlign(
TgtMemIntrinsicInfo->memVT.getTypeForEVT(F->getContext())));
LLT MemTy =
TgtMemIntrinsicInfo->memVT.isSimple()
? getLLTForMVT(TgtMemIntrinsicInfo->memVT.getSimpleVT())
: LLT::scalar(TgtMemIntrinsicInfo->memVT.getStoreSizeInBits());

// TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
// didn't yield anything useful.
MachinePointerInfo MPI;
if (Info.ptrVal)
MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
else if (Info.fallbackAddressSpace)
MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
if (TgtMemIntrinsicInfo->ptrVal) {
MPI = MachinePointerInfo(TgtMemIntrinsicInfo->ptrVal,
TgtMemIntrinsicInfo->offset);
} else if (TgtMemIntrinsicInfo->fallbackAddressSpace) {
MPI = MachinePointerInfo(*TgtMemIntrinsicInfo->fallbackAddressSpace);
}
MIB.addMemOperand(MF->getMachineMemOperand(
MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata(),
/*Ranges=*/nullptr, Info.ssid, Info.order, Info.failureOrder));
MPI, TgtMemIntrinsicInfo->flags, MemTy, Alignment, CB.getAAMetadata(),
/*Ranges=*/nullptr, TgtMemIntrinsicInfo->ssid,
TgtMemIntrinsicInfo->order, TgtMemIntrinsicInfo->failureOrder));
}

if (CI.isConvergent()) {
if (auto Bundle = CI.getOperandBundle(LLVMContext::OB_convergencectrl)) {
if (CB.isConvergent()) {
if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_convergencectrl)) {
auto *Token = Bundle->Inputs[0].get();
Register TokenReg = getOrCreateVReg(*Token);
MIB.addUse(TokenReg, RegState::Implicit);
Expand Down
151 changes: 95 additions & 56 deletions llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3526,8 +3526,7 @@ void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {

// Update successor info.
addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
BasicBlock *Dest = I.getIndirectDest(i);
for (BasicBlock *Dest : I.getIndirectDests()) {
MachineBasicBlock *Target = FuncInfo.getMBB(Dest);
Target->setIsInlineAsmBrIndirectTarget();
// If we introduce a type of asm goto statement that is permitted to use an
Expand Down Expand Up @@ -5313,18 +5312,26 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
DAG.setRoot(OutChain);
}

/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
/// node.
void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
unsigned Intrinsic) {
// Ignore the callsite's attributes. A specific call site may be marked with
// readnone, but the lowering code will expect the chain based on the
// definition.
/// Check if this intrinsic call depends on the chain (1st return value)
/// and if it only *loads* memory.
/// Ignore the callsite's attributes. A specific call site may be marked with
/// readnone, but the lowering code will expect the chain based on the
/// definition.
std::pair<bool, bool>
SelectionDAGBuilder::getTargetIntrinsicCallProperties(const CallBase &I) {
const Function *F = I.getCalledFunction();
bool HasChain = !F->doesNotAccessMemory();
bool OnlyLoad =
HasChain && F->onlyReadsMemory() && F->willReturn() && F->doesNotThrow();

return {HasChain, OnlyLoad};
}

SmallVector<SDValue, 8> SelectionDAGBuilder::getTargetIntrinsicOperands(
const CallBase &I, bool HasChain, bool OnlyLoad,
TargetLowering::IntrinsicInfo *TgtMemIntrinsicInfo) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();

// Build the operand list.
SmallVector<SDValue, 8> Ops;
if (HasChain) { // If this intrinsic has side-effects, chainify it.
Expand All @@ -5336,17 +5343,10 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
}
}

// Info is set by getTgtMemIntrinsic
TargetLowering::IntrinsicInfo Info;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
DAG.getMachineFunction(),
Intrinsic);

// Add the intrinsic ID as an integer operand if it's not a target intrinsic.
if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
Info.opc == ISD::INTRINSIC_W_CHAIN)
Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
if (!TgtMemIntrinsicInfo || TgtMemIntrinsicInfo->opc == ISD::INTRINSIC_VOID ||
TgtMemIntrinsicInfo->opc == ISD::INTRINSIC_W_CHAIN)
Ops.push_back(DAG.getTargetConstant(I.getIntrinsicID(), getCurSDLoc(),
TLI.getPointerTy(DAG.getDataLayout())));

// Add all operands of the call to the operand list.
Expand All @@ -5369,13 +5369,85 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
}
}

if (std::optional<OperandBundleUse> Bundle =
I.getOperandBundle(LLVMContext::OB_convergencectrl)) {
Value *Token = Bundle->Inputs[0].get();
SDValue ConvControlToken = getValue(Token);
assert(Ops.back().getValueType() != MVT::Glue &&
"Did not expect another glue node here.");
ConvControlToken =
DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
Ops.push_back(ConvControlToken);
}

return Ops;
}

SDVTList SelectionDAGBuilder::getTargetIntrinsicVTList(const CallBase &I,
bool HasChain) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();

SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);

if (HasChain)
ValueVTs.push_back(MVT::Other);

SDVTList VTs = DAG.getVTList(ValueVTs);
return DAG.getVTList(ValueVTs);
}

/// Get an INTRINSIC node for a target intrinsic which does not touch memory.
SDValue SelectionDAGBuilder::getTargetNonMemIntrinsicNode(
const Type &IntrinsicVT, bool HasChain, ArrayRef<SDValue> Ops,
const SDVTList &VTs) {
if (!HasChain)
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
if (!IntrinsicVT.isVoidTy())
return DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
return DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
}

/// Set root, convert return type if necessary and check alignment.
SDValue SelectionDAGBuilder::handleTargetIntrinsicRet(const CallBase &I,
bool HasChain,
bool OnlyLoad,
SDValue Result) {
if (HasChain) {
SDValue Chain = Result.getValue(Result.getNode()->getNumValues() - 1);
if (OnlyLoad)
PendingLoads.push_back(Chain);
else
DAG.setRoot(Chain);
}

if (I.getType()->isVoidTy())
return Result;

if (MaybeAlign Alignment = I.getRetAlign(); InsertAssertAlign && Alignment) {
// Insert `assertalign` node if there's an alignment.
Result = DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
} else if (!isa<VectorType>(I.getType())) {
Result = lowerRangeToAssertZExt(DAG, I, Result);
}

return Result;
}

/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
/// node.
void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
unsigned Intrinsic) {
auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(I);

// Info is set by getTgtMemIntrinsic
TargetLowering::IntrinsicInfo Info;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
bool IsTgtMemIntrinsic =
TLI.getTgtMemIntrinsic(Info, I, DAG.getMachineFunction(), Intrinsic);

SmallVector<SDValue, 8> Ops = getTargetIntrinsicOperands(
I, HasChain, OnlyLoad, IsTgtMemIntrinsic ? &Info : nullptr);
SDVTList VTs = getTargetIntrinsicVTList(I, HasChain);

// Propagate fast-math-flags from IR to node(s).
SDNodeFlags Flags;
Expand All @@ -5386,19 +5458,9 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
// Create the node.
SDValue Result;

if (auto Bundle = I.getOperandBundle(LLVMContext::OB_convergencectrl)) {
auto *Token = Bundle->Inputs[0].get();
SDValue ConvControlToken = getValue(Token);
assert(Ops.back().getValueType() != MVT::Glue &&
"Did not expected another glue node here.");
ConvControlToken =
DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
Ops.push_back(ConvControlToken);
}

// In some cases, custom collection of operands from CallInst I may be needed.
TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
if (IsTgtIntrinsic) {
if (IsTgtMemIntrinsic) {
// This is target intrinsic that touches memory
//
// TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
Expand All @@ -5418,34 +5480,11 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
Info.ssid, Info.order, Info.failureOrder);
Result =
DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops, MemVT, MMO);
} else if (!HasChain) {
Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
} else if (!I.getType()->isVoidTy()) {
Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
} else {
Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
Result = getTargetNonMemIntrinsicNode(*I.getType(), HasChain, Ops, VTs);
}

if (HasChain) {
SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
if (OnlyLoad)
PendingLoads.push_back(Chain);
else
DAG.setRoot(Chain);
}

if (!I.getType()->isVoidTy()) {
if (!isa<VectorType>(I.getType()))
Result = lowerRangeToAssertZExt(DAG, I, Result);

MaybeAlign Alignment = I.getRetAlign();

// Insert `assertalign` node if there's an alignment.
if (InsertAssertAlign && Alignment) {
Result =
DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
}
}
Result = handleTargetIntrinsicRet(I, HasChain, OnlyLoad, Result);

setValue(&I, Result);
}
Expand Down
11 changes: 11 additions & 0 deletions llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -727,6 +727,17 @@ class SelectionDAGBuilder {
MCSymbol *&BeginLabel);
SDValue lowerEndEH(SDValue Chain, const InvokeInst *II,
const BasicBlock *EHPadBB, MCSymbol *BeginLabel);

std::pair<bool, bool> getTargetIntrinsicCallProperties(const CallBase &I);
SmallVector<SDValue, 8> getTargetIntrinsicOperands(
const CallBase &I, bool HasChain, bool OnlyLoad,
TargetLowering::IntrinsicInfo *TgtMemIntrinsicInfo = nullptr);
SDVTList getTargetIntrinsicVTList(const CallBase &I, bool HasChain);
SDValue getTargetNonMemIntrinsicNode(const Type &IntrinsicVT, bool HasChain,
ArrayRef<SDValue> Ops,
const SDVTList &VTs);
SDValue handleTargetIntrinsicRet(const CallBase &I, bool HasChain,
bool OnlyLoad, SDValue Result);
};

/// This struct represents the registers (physical or virtual)
Expand Down
Loading