Skip to content
18 changes: 4 additions & 14 deletions llvm/include/llvm/MC/MCAsmBackend.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,20 +106,6 @@ class LLVM_ABI MCAsmBackend {
/// Get information on a fixup kind.
virtual MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const;

/// Hook to check if extra nop bytes must be inserted for alignment directive.
/// For some targets this may be necessary in order to support linker
/// relaxation. The number of bytes to insert are returned in Size.
virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF,
unsigned &Size) {
return false;
}

/// Hook which indicates if the target requires a fixup to be generated when
/// handling an align directive in an executable section
virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) {
return false;
}

// Evaluate a fixup, returning std::nullopt to use default handling for
// `Value` and `IsResolved`. Otherwise, returns `IsResolved` with the
// expectation that the hook updates `Value`.
Expand Down Expand Up @@ -177,6 +163,10 @@ class LLVM_ABI MCAsmBackend {
}

// Defined by linker relaxation targets.

// Return false to use default handling. Otherwise, set `Size` to the number
// of padding bytes.
virtual bool relaxAlign(MCFragment &F, unsigned &Size) { return false; }
virtual bool relaxDwarfLineAddr(MCFragment &, bool &WasRelaxed) const {
return false;
}
Expand Down
126 changes: 62 additions & 64 deletions llvm/lib/MC/MCAssembler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,7 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
switch (F.getKind()) {
case MCFragment::FT_Data:
case MCFragment::FT_Relaxable:
case MCFragment::FT_Align:
case MCFragment::FT_LEB:
case MCFragment::FT_Dwarf:
case MCFragment::FT_DwarfFrame:
Expand Down Expand Up @@ -226,27 +227,6 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
case MCFragment::FT_SymbolId:
return 4;

case MCFragment::FT_Align: {
unsigned Offset = F.Offset + F.getFixedSize();
unsigned Size = offsetToAlignment(Offset, F.getAlignment());

// Insert extra Nops for code alignment if the target define
// shouldInsertExtraNopBytesForCodeAlign target hook.
if (F.getParent()->useCodeAlign() && F.hasAlignEmitNops() &&
getBackend().shouldInsertExtraNopBytesForCodeAlign(F, Size))
return F.getFixedSize() + Size;

// If we are padding with nops, force the padding to be larger than the
// minimum nop size.
if (Size > 0 && F.hasAlignEmitNops()) {
while (Size % getBackend().getMinimumNopSize())
Size += F.getAlignment().value();
}
if (Size > F.getAlignMaxBytesToEmit())
Size = 0;
return F.getFixedSize() + Size;
}

case MCFragment::FT_Org: {
const MCOrgFragment &OF = cast<MCOrgFragment>(F);
MCValue Value;
Expand Down Expand Up @@ -418,7 +398,6 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
switch (F.getKind()) {
case MCFragment::FT_Data:
case MCFragment::FT_Relaxable:
case MCFragment::FT_Align:
case MCFragment::FT_LEB:
case MCFragment::FT_Dwarf:
case MCFragment::FT_DwarfFrame:
Expand All @@ -431,42 +410,41 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
const auto &EF = cast<MCFragment>(F);
OS << StringRef(EF.getContents().data(), EF.getContents().size());
OS << StringRef(EF.getVarContents().data(), EF.getVarContents().size());
if (F.getKind() == MCFragment::FT_Align) {
++stats::EmittedAlignFragments;
assert(F.getAlignFillLen() &&
"Invalid virtual align in concrete fragment!");

uint64_t Count = (FragmentSize - F.getFixedSize()) / F.getAlignFillLen();
assert((FragmentSize - F.getFixedSize()) % F.getAlignFillLen() == 0 &&
"computeFragmentSize computed size is incorrect");

// See if we are aligning with nops, and if so do that first to try to
// fill the Count bytes. Then if that did not fill any bytes or there are
// any bytes left to fill use the Value and ValueSize to fill the rest. If
// we are aligning with nops, ask that target to emit the right data.
if (F.hasAlignEmitNops()) {
if (!Asm.getBackend().writeNopData(OS, Count, F.getSubtargetInfo()))
report_fatal_error("unable to write nop sequence of " + Twine(Count) +
" bytes");
} else {
// Otherwise, write out in multiples of the value size.
for (uint64_t i = 0; i != Count; ++i) {
switch (F.getAlignFillLen()) {
default:
llvm_unreachable("Invalid size!");
case 1:
OS << char(F.getAlignFill());
break;
case 2:
support::endian::write<uint16_t>(OS, F.getAlignFill(), Endian);
break;
case 4:
support::endian::write<uint32_t>(OS, F.getAlignFill(), Endian);
break;
case 8:
support::endian::write<uint64_t>(OS, F.getAlignFill(), Endian);
break;
}
} break;

case MCFragment::FT_Align: {
++stats::EmittedAlignFragments;
OS << StringRef(F.getContents().data(), F.getContents().size());
assert(F.getAlignFillLen() &&
"Invalid virtual align in concrete fragment!");

uint64_t Count = (FragmentSize - F.getFixedSize()) / F.getAlignFillLen();
assert((FragmentSize - F.getFixedSize()) % F.getAlignFillLen() == 0 &&
"computeFragmentSize computed size is incorrect");

// In the nops mode, call the backend hook to write `Count` nops.
if (F.hasAlignEmitNops()) {
if (!Asm.getBackend().writeNopData(OS, Count, F.getSubtargetInfo()))
reportFatalInternalError("unable to write nop sequence of " +
Twine(Count) + " bytes");
} else {
// Otherwise, write out in multiples of the value size.
for (uint64_t i = 0; i != Count; ++i) {
switch (F.getAlignFillLen()) {
default:
llvm_unreachable("Invalid size!");
case 1:
OS << char(F.getAlignFill());
break;
case 2:
support::endian::write<uint16_t>(OS, F.getAlignFill(), Endian);
break;
case 4:
support::endian::write<uint32_t>(OS, F.getAlignFill(), Endian);
break;
case 8:
support::endian::write<uint64_t>(OS, F.getAlignFill(), Endian);
break;
}
}
}
Expand Down Expand Up @@ -720,11 +698,6 @@ void MCAssembler::layout() {
evaluateFixup(F, Fixup, Target, FixedValue,
/*RecordReloc=*/true, Contents);
}
} else if (F.getKind() == MCFragment::FT_Align) {
// For RISC-V linker relaxation, an alignment relocation might be
// needed.
if (F.hasAlignEmitNops())
getBackend().shouldInsertFixupForCodeAlign(*this, F);
}
}
}
Expand Down Expand Up @@ -975,7 +948,32 @@ void MCAssembler::layoutSection(MCSection &Sec) {
uint64_t Offset = 0;
for (MCFragment &F : Sec) {
F.Offset = Offset;
Offset += computeFragmentSize(F);
if (F.getKind() == MCFragment::FT_Align) {
Offset += F.getFixedSize();
unsigned Size = offsetToAlignment(Offset, F.getAlignment());
// In the nops mode, RISC-V style linker relaxation might adjust the size
// and add a fixup, even if `Size` is originally 0.
bool AlignFixup = false;
if (F.hasAlignEmitNops()) {
AlignFixup = getBackend().relaxAlign(F, Size);
// If the backend does not handle the fragment specially, pad with nops,
// but ensure that the padding is larger than the minimum nop size.
if (!AlignFixup)
while (Size % getBackend().getMinimumNopSize())
Size += F.getAlignment().value();
}
if (!AlignFixup && Size > F.getAlignMaxBytesToEmit())
Size = 0;
// Update the variable tail size. The content is ignored.
assert(F.VarContentStart == 0 &&
"VarContentStart should not be modified");
F.VarContentEnd = Size;
if (F.VarContentEnd > F.getParent()->ContentStorage.size())
F.getParent()->ContentStorage.resize(F.VarContentEnd);
Offset += Size;
} else {
Offset += computeFragmentSize(F);
}
}
}

Expand Down
5 changes: 1 addition & 4 deletions llvm/lib/MC/MCExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,6 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
}

int64_t Num;
unsigned Count;
if (DF) {
Displacement += DF->getContents().size();
} else if (F->getKind() == MCFragment::FT_Relaxable &&
Expand All @@ -380,9 +379,7 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
// data fragment.
Displacement += F->getSize();
} else if (F->getKind() == MCFragment::FT_Align && Layout &&
F->hasAlignEmitNops() &&
!Asm->getBackend().shouldInsertExtraNopBytesForCodeAlign(
*F, Count)) {
F->isLinkerRelaxable()) {
Displacement += Asm->computeFragmentSize(*F);
} else if (auto *FF = dyn_cast<MCFillFragment>(F);
FF && FF->getNumValues().evaluateAsAbsolute(Num)) {
Expand Down
7 changes: 6 additions & 1 deletion llvm/lib/MC/MCFragment.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,13 @@ LLVM_DUMP_METHOD void MCFragment::dump() const {
auto Fixed = getContents();
auto Var = getVarContents();
OS << " Size:" << Fixed.size();
if (getKind() != MCFragment::FT_Data)
if (getKind() != MCFragment::FT_Data) {
OS << '+' << Var.size();
// FT_Align uses getVarContents to track the size, but the content is
// ignored and not useful.
if (getKind() == MCFragment::FT_Align)
Var = {};
}
OS << " [";
for (unsigned i = 0, e = Fixed.size(); i != e; ++i) {
if (i) OS << ",";
Expand Down
8 changes: 0 additions & 8 deletions llvm/lib/MC/MCObjectStreamer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -535,14 +535,6 @@ void MCObjectStreamer::emitCodeAlignment(Align Alignment,
emitValueToAlignment(Alignment, 0, 1, MaxBytesToEmit);
F->u.align.EmitNops = true;
F->STI = STI;

// With RISC-V style linker relaxation, mark the section as linker-relaxable
// if the alignment is larger than the minimum NOP size.
unsigned Size;
if (getAssembler().getBackend().shouldInsertExtraNopBytesForCodeAlign(*F,
Size)) {
F->getParent()->setLinkerRelaxable();
}
}

void MCObjectStreamer::emitValueToOffset(const MCExpr *Offset,
Expand Down
115 changes: 47 additions & 68 deletions llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,74 +177,6 @@ void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
}
}

// Linker relaxation may change code size. We have to insert Nops
// for .align directive when linker relaxation enabled. So then Linker
// could satisfy alignment by removing Nops.
// The function returns the total Nops Size we need to insert.
bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign(
const MCFragment &AF, unsigned &Size) {
// Calculate Nops Size only when linker relaxation enabled.
if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax))
return false;

// Ignore alignment if MaxBytesToEmit is less than the minimum Nop size.
const unsigned MinNopLen = 4;
if (AF.getAlignMaxBytesToEmit() < MinNopLen)
return false;
Size = AF.getAlignment().value() - MinNopLen;
return AF.getAlignment() > MinNopLen;
}

// We need to insert R_LARCH_ALIGN relocation type to indicate the
// position of Nops and the total bytes of the Nops have been inserted
// when linker relaxation enabled.
// The function inserts fixup_loongarch_align fixup which eventually will
// transfer to R_LARCH_ALIGN relocation type.
// The improved R_LARCH_ALIGN requires symbol index. The lowest 8 bits of
// addend represent alignment and the other bits of addend represent the
// maximum number of bytes to emit. The maximum number of bytes is zero
// means ignore the emit limit.
bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm,
MCFragment &AF) {
// Insert the fixup only when linker relaxation enabled.
if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax))
return false;

// Calculate total Nops we need to insert. If there are none to insert
// then simply return.
unsigned InsertedNopBytes;
if (!shouldInsertExtraNopBytesForCodeAlign(AF, InsertedNopBytes))
return false;

MCSection *Sec = AF.getParent();
MCContext &Ctx = getContext();
const MCExpr *Dummy = MCConstantExpr::create(0, Ctx);
MCFixup Fixup = MCFixup::create(AF.getFixedSize(), Dummy, ELF::R_LARCH_ALIGN);
unsigned MaxBytesToEmit = AF.getAlignMaxBytesToEmit();

auto createExtendedValue = [&]() {
const MCSymbolRefExpr *MCSym = getSecToAlignSym()[Sec];
if (MCSym == nullptr) {
// Define a marker symbol at the section with an offset of 0.
MCSymbol *Sym = Ctx.createNamedTempSymbol("la-relax-align");
Sym->setFragment(&*Sec->getBeginSymbol()->getFragment());
Asm.registerSymbol(*Sym);
MCSym = MCSymbolRefExpr::create(Sym, Ctx);
getSecToAlignSym()[Sec] = MCSym;
}
return MCValue::get(&MCSym->getSymbol(), nullptr,
MaxBytesToEmit << 8 | Log2(AF.getAlignment()));
};

uint64_t FixedValue = 0;
MCValue Value = MaxBytesToEmit >= InsertedNopBytes
? MCValue::get(InsertedNopBytes)
: createExtendedValue();
Asm.getWriter().recordRelocation(AF, Fixup, Value, FixedValue);

return true;
}

bool LoongArchAsmBackend::shouldForceRelocation(const MCFixup &Fixup,
const MCValue &Target) {
switch (Fixup.getKind()) {
Expand Down Expand Up @@ -279,6 +211,53 @@ getRelocPairForSize(unsigned Size) {
}
}

// Check if an R_LARCH_ALIGN relocation is needed for an alignment directive.
// If conditions are met, compute the padding size and create a fixup encoding
// the padding size in the addend. If MaxBytesToEmit is smaller than the padding
// size, the fixup encodes MaxBytesToEmit in the higher bits and references a
// per-section marker symbol.
bool LoongArchAsmBackend::relaxAlign(MCFragment &F, unsigned &Size) {
// Use default handling unless linker relaxation is enabled and the
// MaxBytesToEmit >= the nop size.
if (!F.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax))
return false;
const unsigned MinNopLen = 4;
unsigned MaxBytesToEmit = F.getAlignMaxBytesToEmit();
if (MaxBytesToEmit < MinNopLen)
return false;

Size = F.getAlignment().value() - MinNopLen;
if (F.getAlignment() <= MinNopLen)
return false;

MCContext &Ctx = getContext();
const MCExpr *Expr = nullptr;
if (MaxBytesToEmit >= Size) {
Expr = MCConstantExpr::create(Size, getContext());
} else {
MCSection *Sec = F.getParent();
const MCSymbolRefExpr *SymRef = getSecToAlignSym()[Sec];
if (SymRef == nullptr) {
// Define a marker symbol at the section with an offset of 0.
MCSymbol *Sym = Ctx.createNamedTempSymbol("la-relax-align");
Sym->setFragment(&*Sec->getBeginSymbol()->getFragment());
Asm->registerSymbol(*Sym);
SymRef = MCSymbolRefExpr::create(Sym, Ctx);
getSecToAlignSym()[Sec] = SymRef;
}
Expr = MCBinaryExpr::createAdd(
SymRef,
MCConstantExpr::create((MaxBytesToEmit << 8) | Log2(F.getAlignment()),
Ctx),
Ctx);
}
MCFixup Fixup =
MCFixup::create(0, Expr, FirstLiteralRelocationKind + ELF::R_LARCH_ALIGN);
F.setVarFixups({Fixup});
F.getParent()->setLinkerRelaxable();
return true;
}

std::pair<bool, bool> LoongArchAsmBackend::relaxLEB128(MCFragment &F,
int64_t &Value) const {
const MCExpr &Expr = F.getLEBValue();
Expand Down
8 changes: 1 addition & 7 deletions llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,19 +45,13 @@ class LoongArchAsmBackend : public MCAsmBackend {
MutableArrayRef<char> Data, uint64_t Value,
bool IsResolved) override;

// Return Size with extra Nop Bytes for alignment directive in code section.
bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF,
unsigned &Size) override;

// Insert target specific fixup type for alignment directive in code section.
bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) override;

bool shouldForceRelocation(const MCFixup &Fixup, const MCValue &Target);

std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;

MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;

bool relaxAlign(MCFragment &F, unsigned &Size) override;
bool relaxDwarfLineAddr(MCFragment &F, bool &WasRelaxed) const override;
bool relaxDwarfCFA(MCFragment &F, bool &WasRelaxed) const override;
std::pair<bool, bool> relaxLEB128(MCFragment &F,
Expand Down
Loading
Loading