Skip to content

Commit

Permalink
[BPF] Do atomic_fetch_*() pattern matching with memory ordering (llvm…
Browse files Browse the repository at this point in the history
…#107343)

Three commits in this pull request:
commit 1: implement pattern matching for memory ordering seq_cst,
acq_rel, release, acquire and monotonic. Specially, for monotonic memory
ordering (relaxed memory model), if no return value is used, locked insn
is used.
commit 2: add support to handle dwarf atomic modifier in BTF generation.
Actually atomic modifier is ignored in BTF.
commit 3: add tests for new atomic ordering support and BTF support with
_Atomic type.
I removed RFC tag as now patch sets are in reasonable states.

For atomic fetch_and_*() operations, do pattern matching with memory
ordering
seq_cst, acq_rel, release, acquire and monotonic (relaxed). For
fetch_and_*()
operations with seq_cst/acq_rel/release/acquire ordering,
atomic_fetch_*()
instructions are generated. For monotonic ordering, locked insns are
generated
if return value is not used. Otherwise, atomic_fetch_*() insns are used.
The main motivation is to resolve the kernel issue [1].
   
The following are memory ordering are supported:
  seq_cst, acq_rel, release, acquire, relaxed
Current gcc style __sync_fetch_and_*() operations are all seq_cst.

To use explicit memory ordering, the _Atomic type is needed. The
following is
an example:

```
$ cat test.c
\#include <stdatomic.h>
void f1(_Atomic int *i) {
   (void)__c11_atomic_fetch_and(i, 10, memory_order_relaxed);
}
void f2(_Atomic int *i) {
   (void)__c11_atomic_fetch_and(i, 10, memory_order_acquire);
}
void f3(_Atomic int *i) {
   (void)__c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
}
$ cat run.sh
clang  -I/home/yhs/work/bpf-next/tools/testing/selftests/bpf -O2 --target=bpf -c test.c -o test.o && llvm-objdum
p -d test.o
$ ./run.sh
       
test.o: file format elf64-bpf
       
Disassembly of section .text:

0000000000000000 <f1>:
       0:       b4 02 00 00 0a 00 00 00 w2 = 0xa
       1:       c3 21 00 00 50 00 00 00 lock *(u32 *)(r1 + 0x0) &= w2
       2:       95 00 00 00 00 00 00 00 exit
       
0000000000000018 <f2>:
       3:       b4 02 00 00 0a 00 00 00 w2 = 0xa
       4:       c3 21 00 00 51 00 00 00 w2 = atomic_fetch_and((u32 *)(r1 + 0x0), w2)
       5:       95 00 00 00 00 00 00 00 exit
       
0000000000000030 <f3>:
       6:       b4 02 00 00 0a 00 00 00 w2 = 0xa
       7:       c3 21 00 00 51 00 00 00 w2 = atomic_fetch_and((u32 *)(r1 + 0x0), w2)
       8:       95 00 00 00 00 00 00 00 exit
```    

The following is another example where return value is used:

```
$ cat test1.c
\#include <stdatomic.h>
int f1(_Atomic int *i) {
   return __c11_atomic_fetch_and(i, 10, memory_order_relaxed);
}  
int f2(_Atomic int *i) {
   return __c11_atomic_fetch_and(i, 10, memory_order_acquire);
}  
int f3(_Atomic int *i) {
   return __c11_atomic_fetch_and(i, 10, memory_order_seq_cst);
}  
$ cat run.sh
clang  -I/home/yhs/work/bpf-next/tools/testing/selftests/bpf -O2 --target=bpf -c test1.c -o test1.o && llvm-objdump -d test1.o
$ ./run.sh

test.o: file format elf64-bpf

Disassembly of section .text:

0000000000000000 <f1>:
       0:       b4 00 00 00 0a 00 00 00 w0 = 0xa
       1:       c3 01 00 00 51 00 00 00 w0 = atomic_fetch_and((u32 *)(r1 + 0x0), w0)
       2:       95 00 00 00 00 00 00 00 exit
       
0000000000000018 <f2>:
       3:       b4 00 00 00 0a 00 00 00 w0 = 0xa
       4:       c3 01 00 00 51 00 00 00 w0 = atomic_fetch_and((u32 *)(r1 + 0x0), w0)
       5:       95 00 00 00 00 00 00 00 exit
       
0000000000000030 <f3>:
       6:       b4 00 00 00 0a 00 00 00 w0 = 0xa
       7:       c3 01 00 00 51 00 00 00 w0 = atomic_fetch_and((u32 *)(r1 + 0x0), w0)
       8:       95 00 00 00 00 00 00 00 exit
```    

You can see that for relaxed memory ordering, if return value is used,
atomic_fetch_and()
insn is used. Otherwise, if return value is not used, locked insn is
used.

Here is another example with global _Atomic variable:

```
$ cat test3.c
\#include <stdatomic.h>

_Atomic int i;

void f1(void) {
   (void)__c11_atomic_fetch_and(&i, 10, memory_order_relaxed);
}
void f2(void) {
   (void)__c11_atomic_fetch_and(&i, 10, memory_order_seq_cst);
}
$ cat run.sh
clang  -I/home/yhs/work/bpf-next/tools/testing/selftests/bpf -O2 --target=bpf -c test3.c -o test3.o && llvm-objdump -d test3.o
$ ./run.sh

test3.o:        file format elf64-bpf

Disassembly of section .text:

0000000000000000 <f1>:
       0:       b4 01 00 00 0a 00 00 00 w1 = 0xa
       1:       18 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r2 = 0x0 ll
       3:       c3 12 00 00 50 00 00 00 lock *(u32 *)(r2 + 0x0) &= w1
       4:       95 00 00 00 00 00 00 00 exit
       
0000000000000028 <f2>:
       5:       b4 01 00 00 0a 00 00 00 w1 = 0xa
       6:       18 02 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r2 = 0x0 ll
       8:       c3 12 00 00 51 00 00 00 w1 = atomic_fetch_and((u32 *)(r2 + 0x0), w1)
       9:       95 00 00 00 00 00 00 00 exit
```    

Note that in the above compilations, '-g' is not used. The reason is due
to the following IR
related to _Atomic type:
```
$clang  -I/home/yhs/work/bpf-next/tools/testing/selftests/bpf -O2 --target=bpf -g -S -emit-llvm test3.c
```
The related debug info for test3.c:
```
!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
!1 = distinct !DIGlobalVariable(name: "i", scope: !2, file: !3, line: 3, type: !16, isLocal: false, isDefinition: true)
...
!16 = !DIDerivedType(tag: DW_TAG_atomic_type, baseType: !17)
!17 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
```

If compiling test.c, the related debug info:
```
...
!19 = distinct !DISubprogram(name: "f1", scope: !1, file: !1, line: 3, type: !20, scopeLine: 3, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !25)
!20 = !DISubroutineType(types: !21)
!21 = !{null, !22}
!22 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !23, size: 64)
!23 = !DIDerivedType(tag: DW_TAG_atomic_type, baseType: !24)
!24 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
!25 = !{!26}
!26 = !DILocalVariable(name: "i", arg: 1, scope: !19, file: !1, line: 3, type: !22)
```

All the above suggests _Atomic behaves like a modifier (e.g. const,
restrict, volatile).
This seems true based on doc [1].

Without proper handling DW_TAG_atomic_type, llvm BTF generation will be
incorrect since
the current implementation assumes no existence of DW_TAG_atomic_type.
So we have
two choices here:
(1). llvm bpf backend processes DW_TAG_atomic_type but ignores it in BTF
encoding.
(2). Add another type, e.g., BTF_KIND_ATOMIC to BTF. BTF_KIND_ATOMIC
behaves as a
       modifier like const/volatile/restrict.

For choice (1), llvm bpf backend should skip dwarf::DW_TAG_atomic_type
during
BTF generation whenever necessary.

For choice (2), BTF_KIND_ATOMIC will be added to BTF so llvm backend and
kernel
needs to handle that properly. The main advantage of it probably is to
maintain
this atomic type so it is also available to skeleton. But I think for
skeleton
a raw type might be good enough unless user space intends to do some
atomic
operation with that, which is a unlikely case.
    
So I choose choice (1) in this RFC implementation. See the commit
message of the second commit for details.

[1]
https://lore.kernel.org/bpf/[email protected]/
 [2] https://dwarfstd.org/issues/131112.1.html

---------
  • Loading branch information
yonghong-song authored and xgupta committed Oct 4, 2024
1 parent 9b12dc6 commit 9687aa2
Show file tree
Hide file tree
Showing 12 changed files with 1,827 additions and 29 deletions.
1 change: 1 addition & 0 deletions clang/lib/Basic/Targets/BPF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ void BPFTargetInfo::getTargetDefines(const LangOptions &Opts,

Builder.defineMacro("__BPF_FEATURE_ADDR_SPACE_CAST");
Builder.defineMacro("__BPF_FEATURE_MAY_GOTO");
Builder.defineMacro("__BPF_FEATURE_ATOMIC_MEM_ORDERING");

if (CPU.empty())
CPU = "v3";
Expand Down
6 changes: 5 additions & 1 deletion clang/lib/CodeGen/CGDebugInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1249,8 +1249,12 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
CGM.getTarget().getDWARFAddressSpace(
CGM.getTypes().getTargetAddressSpace(PointeeTy));

const BTFTagAttributedType *BTFAttrTy;
if (auto *Atomic = PointeeTy->getAs<AtomicType>())
BTFAttrTy = dyn_cast<BTFTagAttributedType>(Atomic->getValueType());
else
BTFAttrTy = dyn_cast<BTFTagAttributedType>(PointeeTy);
SmallVector<llvm::Metadata *, 4> Annots;
auto *BTFAttrTy = dyn_cast<BTFTagAttributedType>(PointeeTy);
while (BTFAttrTy) {
StringRef Tag = BTFAttrTy->getAttr()->getBTFTypeTag();
if (!Tag.empty()) {
Expand Down
16 changes: 16 additions & 0 deletions clang/test/CodeGen/bpf-attr-type-tag-atomic.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
// REQUIRES: bpf-registered-target
// RUN: %clang_cc1 -triple bpf -emit-llvm -disable-llvm-passes -debug-info-kind=limited %s -o - | FileCheck %s

#define __tag1 __attribute__((btf_type_tag("tag1")))
int _Atomic __tag1 *g1;
volatile int _Atomic __tag1 *g2;

// CHECK: distinct !DIGlobalVariable(name: "g1", scope: ![[#]], file: ![[#]], line: [[#]], type: ![[PTR1:[0-9]+]]
// CHECK: distinct !DIGlobalVariable(name: "g2", scope: ![[#]], file: ![[#]], line: [[#]], type: ![[PTR2:[0-9]+]]
// CHECK: ![[PTR2]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: ![[BASE2:[0-9]+]], size: [[#]], annotations: ![[ANNOT:[0-9]+]])
// CHECK: ![[BASE2]] = !DIDerivedType(tag: DW_TAG_volatile_type, baseType: ![[BASE1:[0-9]+]])
// CHECK: ![[BASE1]] = !DIDerivedType(tag: DW_TAG_atomic_type, baseType: ![[BASIC:[0-9]+]])
// CHECK: ![[BASIC]] = !DIBasicType(name: "int", size: [[#]], encoding: DW_ATE_signed)
// CHECK: ![[ANNOT]] = !{![[ENTRY:[0-9]+]]}
// CHECK: ![[ENTRY]] = !{!"btf_type_tag", !"tag1"}
// CHECK: ![[PTR1]] = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: ![[BASE1]], size: [[#]], annotations: ![[ANNOT]])
134 changes: 114 additions & 20 deletions llvm/lib/Target/BPF/BPFInstrInfo.td
Original file line number Diff line number Diff line change
Expand Up @@ -826,13 +826,12 @@ let Predicates = [BPFNoALU32] in {
}

// Atomic Fetch-and-<add, and, or, xor> operations
class XFALU64<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
string OpcStr, PatFrag OpNode>
class XFALU64<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr, string OpcStr>
: TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
(outs GPR:$dst),
(ins MEMri:$addr, GPR:$val),
"$dst = atomic_fetch_"#OpcStr#"(("#OpcodeStr#" *)($addr), $val)",
[(set GPR:$dst, (OpNode ADDRri:$addr, GPR:$val))]> {
[]> {
bits<4> dst;
bits<20> addr;

Expand All @@ -844,13 +843,12 @@ class XFALU64<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
let BPFClass = BPF_STX;
}

class XFALU32<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,
string OpcStr, PatFrag OpNode>
class XFALU32<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr, string OpcStr>
: TYPE_LD_ST<BPF_ATOMIC.Value, SizeOp.Value,
(outs GPR32:$dst),
(ins MEMri:$addr, GPR32:$val),
"$dst = atomic_fetch_"#OpcStr#"(("#OpcodeStr#" *)($addr), $val)",
[(set GPR32:$dst, (OpNode ADDRri:$addr, GPR32:$val))]> {
[]> {
bits<4> dst;
bits<20> addr;

Expand All @@ -864,26 +862,122 @@ class XFALU32<BPFWidthModifer SizeOp, BPFArithOp Opc, string OpcodeStr,

let Constraints = "$dst = $val" in {
let Predicates = [BPFHasALU32], DecoderNamespace = "BPFALU32" in {
def XFADDW32 : XFALU32<BPF_W, BPF_ADD, "u32", "add", atomic_load_add_i32>;
def XFANDW32 : XFALU32<BPF_W, BPF_AND, "u32", "and", atomic_load_and_i32>;
def XFORW32 : XFALU32<BPF_W, BPF_OR, "u32", "or", atomic_load_or_i32>;
def XFXORW32 : XFALU32<BPF_W, BPF_XOR, "u32", "xor", atomic_load_xor_i32>;
def XFADDW32 : XFALU32<BPF_W, BPF_ADD, "u32", "add">;
def XFANDW32 : XFALU32<BPF_W, BPF_AND, "u32", "and">;
def XFORW32 : XFALU32<BPF_W, BPF_OR, "u32", "or">;
def XFXORW32 : XFALU32<BPF_W, BPF_XOR, "u32", "xor">;
}

let Predicates = [BPFHasALU32] in {
def XFADDD : XFALU64<BPF_DW, BPF_ADD, "u64", "add", atomic_load_add_i64>;
def XFADDD : XFALU64<BPF_DW, BPF_ADD, "u64", "add">;
}
def XFANDD : XFALU64<BPF_DW, BPF_AND, "u64", "and", atomic_load_and_i64>;
def XFORD : XFALU64<BPF_DW, BPF_OR, "u64", "or", atomic_load_or_i64>;
def XFXORD : XFALU64<BPF_DW, BPF_XOR, "u64", "xor", atomic_load_xor_i64>;
def XFANDD : XFALU64<BPF_DW, BPF_AND, "u64", "and">;
def XFORD : XFALU64<BPF_DW, BPF_OR, "u64", "or">;
def XFXORD : XFALU64<BPF_DW, BPF_XOR, "u64", "xor">;
}

// atomic_load_sub can be represented as a neg followed
// by an atomic_load_add.
def : Pat<(atomic_load_sub_i32 ADDRri:$addr, GPR32:$val),
(XFADDW32 ADDRri:$addr, (NEG_32 GPR32:$val))>;
def : Pat<(atomic_load_sub_i64 ADDRri:$addr, GPR:$val),
(XFADDD ADDRri:$addr, (NEG_64 GPR:$val))>;
let Predicates = [BPFHasALU32] in {
foreach P = [// add
[atomic_load_add_i32_monotonic, XADDW32],
[atomic_load_add_i32_acquire, XFADDW32],
[atomic_load_add_i32_release, XFADDW32],
[atomic_load_add_i32_acq_rel, XFADDW32],
[atomic_load_add_i32_seq_cst, XFADDW32],
// and
[atomic_load_and_i32_monotonic, XANDW32],
[atomic_load_and_i32_acquire, XFANDW32],
[atomic_load_and_i32_release, XFANDW32],
[atomic_load_and_i32_acq_rel, XFANDW32],
[atomic_load_and_i32_seq_cst, XFANDW32],
// or
[atomic_load_or_i32_monotonic, XORW32],
[atomic_load_or_i32_acquire, XFORW32],
[atomic_load_or_i32_release, XFORW32],
[atomic_load_or_i32_acq_rel, XFORW32],
[atomic_load_or_i32_seq_cst, XFORW32],
// xor
[atomic_load_xor_i32_monotonic, XXORW32],
[atomic_load_xor_i32_acquire, XFXORW32],
[atomic_load_xor_i32_release, XFXORW32],
[atomic_load_xor_i32_acq_rel, XFXORW32],
[atomic_load_xor_i32_seq_cst, XFXORW32],
] in {
def : Pat<(P[0] ADDRri:$addr, GPR32:$val), (P[1] ADDRri:$addr, GPR32:$val)>;
}

// atomic_load_sub can be represented as a neg followed
// by an atomic_load_add.
foreach P = [[atomic_load_sub_i32_monotonic, XADDW32],
[atomic_load_sub_i32_acquire, XFADDW32],
[atomic_load_sub_i32_release, XFADDW32],
[atomic_load_sub_i32_acq_rel, XFADDW32],
[atomic_load_sub_i32_seq_cst, XFADDW32],
] in {
def : Pat<(P[0] ADDRri:$addr, GPR32:$val), (P[1] ADDRri:$addr, (NEG_32 GPR32:$val))>;
}

foreach P = [// add
[atomic_load_add_i64_monotonic, XADDD],
[atomic_load_add_i64_acquire, XFADDD],
[atomic_load_add_i64_release, XFADDD],
[atomic_load_add_i64_acq_rel, XFADDD],
[atomic_load_add_i64_seq_cst, XFADDD],
] in {
def : Pat<(P[0] ADDRri:$addr, GPR:$val), (P[1] ADDRri:$addr, GPR:$val)>;
}
}

foreach P = [[atomic_load_sub_i64_monotonic, XADDD],
[atomic_load_sub_i64_acquire, XFADDD],
[atomic_load_sub_i64_release, XFADDD],
[atomic_load_sub_i64_acq_rel, XFADDD],
[atomic_load_sub_i64_seq_cst, XFADDD],
] in {
def : Pat<(P[0] ADDRri:$addr, GPR:$val), (P[1] ADDRri:$addr, (NEG_64 GPR:$val))>;
}

// Borrow the idea from X86InstrFragments.td
class binop_no_use<SDPatternOperator operator>
: PatFrag<(ops node:$A, node:$B),
(operator node:$A, node:$B),
[{ return SDValue(N, 0).use_empty(); }]>;

class binop_has_use<SDPatternOperator operator>
: PatFrag<(ops node:$A, node:$B),
(operator node:$A, node:$B),
[{ return !SDValue(N, 0).use_empty(); }]>;

foreach op = [add, and, or, xor] in {
def atomic_load_ # op # _i64_monotonic_nu:
binop_no_use <!cast<SDPatternOperator>("atomic_load_"#op# _i64_monotonic)>;
def atomic_load_ # op # _i64_monotonic_hu:
binop_has_use<!cast<SDPatternOperator>("atomic_load_"#op# _i64_monotonic)>;
}

foreach P = [// and
[atomic_load_and_i64_monotonic_nu, XANDD],
[atomic_load_and_i64_monotonic_hu, XFANDD],
[atomic_load_and_i64_acquire, XFANDD],
[atomic_load_and_i64_release, XFANDD],
[atomic_load_and_i64_acq_rel, XFANDD],
[atomic_load_and_i64_seq_cst, XFANDD],
// or
[atomic_load_or_i64_monotonic_nu, XORD],
[atomic_load_or_i64_monotonic_hu, XFORD],
[atomic_load_or_i64_acquire, XFORD],
[atomic_load_or_i64_release, XFORD],
[atomic_load_or_i64_acq_rel, XFORD],
[atomic_load_or_i64_seq_cst, XFORD],
// xor
[atomic_load_xor_i64_monotonic_nu, XXORD],
[atomic_load_xor_i64_monotonic_hu, XFXORD],
[atomic_load_xor_i64_acquire, XFXORD],
[atomic_load_xor_i64_release, XFXORD],
[atomic_load_xor_i64_acq_rel, XFXORD],
[atomic_load_xor_i64_seq_cst, XFXORD],
] in {
def : Pat<(P[0] ADDRri:$addr, GPR:$val), (P[1] ADDRri:$addr, GPR:$val)>;
}

// Atomic Exchange
class XCHG<BPFWidthModifer SizeOp, string OpcodeStr, PatFrag OpNode>
Expand Down
6 changes: 5 additions & 1 deletion llvm/lib/Target/BPF/BPFMIChecking.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ static bool hasLiveDefs(const MachineInstr &MI, const TargetRegisterInfo *TRI) {

RegIsGPR64 = GPR64RegClass->contains(MO.getReg());
if (!MO.isDead()) {
// It is a GPR64 live Def, we are sure it is live. */
// It is a GPR64 live Def, we are sure it is live.
if (RegIsGPR64)
return true;
// It is a GPR32 live Def, we are unsure whether it is really dead due to
Expand Down Expand Up @@ -153,6 +153,10 @@ static bool hasLiveDefs(const MachineInstr &MI, const TargetRegisterInfo *TRI) {
}

void BPFMIPreEmitChecking::processAtomicInsts() {
if (MF->getSubtarget<BPFSubtarget>().getHasJmp32())
return;

// Only check for cpu version 1 and 2.
for (MachineBasicBlock &MBB : *MF) {
for (MachineInstr &MI : MBB) {
if (MI.getOpcode() != BPF::XADDW && MI.getOpcode() != BPF::XADDD)
Expand Down
29 changes: 22 additions & 7 deletions llvm/lib/Target/BPF/BTFDebug.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,15 @@ static const char *BTFKindStr[] = {
#include "llvm/DebugInfo/BTF/BTF.def"
};

static const DIType *tryRemoveAtomicType(const DIType *Ty) {
if (!Ty)
return Ty;
auto DerivedTy = dyn_cast<DIDerivedType>(Ty);
if (DerivedTy && DerivedTy->getTag() == dwarf::DW_TAG_atomic_type)
return DerivedTy->getBaseType();
return Ty;
}

/// Emit a BTF common type.
void BTFTypeBase::emitType(MCStreamer &OS) {
OS.AddComment(std::string(BTFKindStr[Kind]) + "(id = " + std::to_string(Id) +
Expand Down Expand Up @@ -90,7 +99,7 @@ void BTFTypeDerived::completeType(BTFDebug &BDebug) {
return;

// The base type for PTR/CONST/VOLATILE could be void.
const DIType *ResolvedType = DTy->getBaseType();
const DIType *ResolvedType = tryRemoveAtomicType(DTy->getBaseType());
if (!ResolvedType) {
assert((Kind == BTF::BTF_KIND_PTR || Kind == BTF::BTF_KIND_CONST ||
Kind == BTF::BTF_KIND_VOLATILE) &&
Expand Down Expand Up @@ -305,7 +314,7 @@ void BTFTypeStruct::completeType(BTFDebug &BDebug) {
} else {
BTFMember.Offset = DDTy->getOffsetInBits();
}
const auto *BaseTy = DDTy->getBaseType();
const auto *BaseTy = tryRemoveAtomicType(DDTy->getBaseType());
BTFMember.Type = BDebug.getTypeId(BaseTy);
Members.push_back(BTFMember);
}
Expand Down Expand Up @@ -342,15 +351,15 @@ void BTFTypeFuncProto::completeType(BTFDebug &BDebug) {
IsCompleted = true;

DITypeRefArray Elements = STy->getTypeArray();
auto RetType = Elements[0];
auto RetType = tryRemoveAtomicType(Elements[0]);
BTFType.Type = RetType ? BDebug.getTypeId(RetType) : 0;
BTFType.NameOff = 0;

// For null parameter which is typically the last one
// to represent the vararg, encode the NameOff/Type to be 0.
for (unsigned I = 1, N = Elements.size(); I < N; ++I) {
struct BTF::BTFParam Param;
auto Element = Elements[I];
auto Element = tryRemoveAtomicType(Elements[I]);
if (Element) {
Param.NameOff = BDebug.addString(FuncArgNames[I]);
Param.Type = BDebug.getTypeId(Element);
Expand Down Expand Up @@ -483,7 +492,7 @@ void BTFTypeTypeTag::completeType(BTFDebug &BDebug) {
IsCompleted = true;
BTFType.NameOff = BDebug.addString(Tag);
if (DTy) {
const DIType *ResolvedType = DTy->getBaseType();
const DIType *ResolvedType = tryRemoveAtomicType(DTy->getBaseType());
if (!ResolvedType)
BTFType.Type = 0;
else
Expand Down Expand Up @@ -800,6 +809,10 @@ void BTFDebug::visitDerivedType(const DIDerivedType *DTy, uint32_t &TypeId,
bool CheckPointer, bool SeenPointer) {
unsigned Tag = DTy->getTag();

if (Tag == dwarf::DW_TAG_atomic_type)
return visitTypeEntry(DTy->getBaseType(), TypeId, CheckPointer,
SeenPointer);

/// Try to avoid chasing pointees, esp. structure pointees which may
/// unnecessary bring in a lot of types.
if (CheckPointer && !SeenPointer) {
Expand Down Expand Up @@ -1444,8 +1457,10 @@ void BTFDebug::processGlobals(bool ProcessingMapDef) {
DIGlobal = GVE->getVariable();
if (SecName.starts_with(".maps"))
visitMapDefType(DIGlobal->getType(), GVTypeId);
else
visitTypeEntry(DIGlobal->getType(), GVTypeId, false, false);
else {
const DIType *Ty = tryRemoveAtomicType(DIGlobal->getType());
visitTypeEntry(Ty, GVTypeId, false, false);
}
break;
}

Expand Down
Loading

0 comments on commit 9687aa2

Please sign in to comment.