Skip to content

Conversation

@arsenm
Copy link
Contributor

@arsenm arsenm commented Jul 14, 2025

No description provided.

Copy link
Contributor Author

arsenm commented Jul 14, 2025

This stack of pull requests is managed by Graphite. Learn more about stacking.

@arsenm arsenm requested a review from SixWeining July 14, 2025 10:25
@arsenm arsenm marked this pull request as ready for review July 14, 2025 10:25
@llvmbot
Copy link
Member

llvmbot commented Jul 14, 2025

@llvm/pr-subscribers-backend-loongarch

Author: Matt Arsenault (arsenm)

Changes

Full diff: https://github.com/llvm/llvm-project/pull/148606.diff

1 Files Affected:

  • (added) llvm/test/CodeGen/LoongArch/llvm.exp10.ll (+281)
diff --git a/llvm/test/CodeGen/LoongArch/llvm.exp10.ll b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll
new file mode 100644
index 0000000000000..677cca8f05337
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll
@@ -0,0 +1,281 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=loongarch32 -mattr=+d < %s | FileCheck -check-prefix=LA32 %s
+; RUN: llc -mtriple=loongarch64 -mattr=+d < %s | FileCheck -check-prefix=LA64 %s
+
+define half @exp10_f16(half %x) #0 {
+; LA32-LABEL: exp10_f16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    bl exp10f
+; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a0, $fa0
+; LA32-NEXT:    lu12i.w $a1, -16
+; LA32-NEXT:    or $a0, $a0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: exp10_f16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    lu12i.w $a1, -16
+; LA64-NEXT:    or $a0, $a0, $a1
+; LA64-NEXT:    movgr2fr.w $fa0, $a0
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+  %r = call half @llvm.exp10.f16(half %x)
+  ret half %r
+}
+
+define <2 x half> @exp10_v2f16(<2 x half> %x) #0 {
+; LA32-LABEL: exp10_v2f16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 0 # 8-byte Folded Spill
+; LA32-NEXT:    movgr2fr.w $fs0, $a1
+; LA32-NEXT:    movgr2fr.w $fa0, $a0
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    bl exp10f
+; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $fp, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
+; LA32-NEXT:    bl __extendhfsf2
+; LA32-NEXT:    bl exp10f
+; LA32-NEXT:    bl __truncsfhf2
+; LA32-NEXT:    movfr2gr.s $a1, $fa0
+; LA32-NEXT:    move $a0, $fp
+; LA32-NEXT:    fld.d $fs0, $sp, 0 # 8-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: exp10_v2f16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -32
+; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT:    move $fp, $a0
+; LA64-NEXT:    movgr2fr.w $fa0, $a1
+; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    st.h $a0, $sp, 2
+; LA64-NEXT:    movgr2fr.w $fa0, $fp
+; LA64-NEXT:    pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.s $a0, $fa0
+; LA64-NEXT:    st.h $a0, $sp, 0
+; LA64-NEXT:    vld $vr0, $sp, 0
+; LA64-NEXT:    vpickve2gr.h $a0, $vr0, 0
+; LA64-NEXT:    vpickve2gr.h $a1, $vr0, 1
+; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 32
+; LA64-NEXT:    ret
+  %r = call <2 x half> @llvm.exp10.v2f16(<2 x half> %x)
+  ret <2 x half> %r
+}
+
+define float @exp10_f32(float %x) #0 {
+; LA32-LABEL: exp10_f32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    b exp10f
+;
+; LA64-LABEL: exp10_f32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    pcaddu18i $t8, %call36(exp10f)
+; LA64-NEXT:    jr $t8
+  %r = call float @llvm.exp10.f32(float %x)
+  ret float %r
+}
+
+define <2 x float> @exp10_v2f32(<2 x float> %x) #0 {
+; LA32-LABEL: exp10_v2f32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT:    fmov.s $fs0, $fa1
+; LA32-NEXT:    bl exp10f
+; LA32-NEXT:    fmov.s $fs1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs0
+; LA32-NEXT:    bl exp10f
+; LA32-NEXT:    fmov.s $fa1, $fa0
+; LA32-NEXT:    fmov.s $fa0, $fs1
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: exp10_v2f32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -48
+; LA64-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64-NEXT:    vst $vr0, $sp, 0 # 16-byte Folded Spill
+; LA64-NEXT:    vreplvei.w $vr0, $vr0, 0
+; LA64-NEXT:    # kill: def $f0 killed $f0 killed $vr0
+; LA64-NEXT:    pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    # kill: def $f0 killed $f0 def $vr0
+; LA64-NEXT:    vst $vr0, $sp, 16 # 16-byte Folded Spill
+; LA64-NEXT:    vld $vr0, $sp, 0 # 16-byte Folded Reload
+; LA64-NEXT:    vreplvei.w $vr0, $vr0, 1
+; LA64-NEXT:    # kill: def $f0 killed $f0 killed $vr0
+; LA64-NEXT:    pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    # kill: def $f0 killed $f0 def $vr0
+; LA64-NEXT:    vld $vr1, $sp, 16 # 16-byte Folded Reload
+; LA64-NEXT:    vpackev.w $vr0, $vr0, $vr1
+; LA64-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 48
+; LA64-NEXT:    ret
+  %r = call <2 x float> @llvm.exp10.v2f32(<2 x float> %x)
+  ret <2 x float> %r
+}
+
+define double @exp10_f64(double %x) #0 {
+; LA32-LABEL: exp10_f64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    b exp10
+;
+; LA64-LABEL: exp10_f64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    pcaddu18i $t8, %call36(exp10)
+; LA64-NEXT:    jr $t8
+  %r = call double @llvm.exp10.f64(double %x)
+  ret double %r
+}
+
+define <2 x double> @exp10_v2f64(<2 x double> %x) #0 {
+; LA32-LABEL: exp10_v2f64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT:    fmov.d $fs0, $fa1
+; LA32-NEXT:    bl exp10
+; LA32-NEXT:    fmov.d $fs1, $fa0
+; LA32-NEXT:    fmov.d $fa0, $fs0
+; LA32-NEXT:    bl exp10
+; LA32-NEXT:    fmov.d $fa1, $fa0
+; LA32-NEXT:    fmov.d $fa0, $fs1
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: exp10_v2f64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -48
+; LA64-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64-NEXT:    vst $vr0, $sp, 0 # 16-byte Folded Spill
+; LA64-NEXT:    vreplvei.d $vr0, $vr0, 0
+; LA64-NEXT:    # kill: def $f0_64 killed $f0_64 killed $vr0
+; LA64-NEXT:    pcaddu18i $ra, %call36(exp10)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
+; LA64-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; LA64-NEXT:    vst $vr0, $sp, 16 # 16-byte Folded Spill
+; LA64-NEXT:    vld $vr0, $sp, 0 # 16-byte Folded Reload
+; LA64-NEXT:    vreplvei.d $vr0, $vr0, 1
+; LA64-NEXT:    # kill: def $f0_64 killed $f0_64 killed $vr0
+; LA64-NEXT:    pcaddu18i $ra, %call36(exp10)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
+; LA64-NEXT:    vld $vr0, $sp, 16 # 16-byte Folded Reload
+; LA64-NEXT:    vinsgr2vr.d $vr0, $a0, 1
+; LA64-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 48
+; LA64-NEXT:    ret
+  %r = call <2 x double> @llvm.exp10.v2f64(<2 x double> %x)
+  ret <2 x double> %r
+}
+
+define double @exp10_f128(double %x) #0 {
+; LA32-LABEL: exp10_f128:
+; LA32:       # %bb.0:
+; LA32-NEXT:    b exp10
+;
+; LA64-LABEL: exp10_f128:
+; LA64:       # %bb.0:
+; LA64-NEXT:    pcaddu18i $t8, %call36(exp10)
+; LA64-NEXT:    jr $t8
+  %r = call double @llvm.exp10.f128(double %x)
+  ret double %r
+}
+
+define <2 x double> @exp10_v2f128(<2 x double> %x) #0 {
+; LA32-LABEL: exp10_v2f128:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -32
+; LA32-NEXT:    st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT:    fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT:    fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT:    fmov.d $fs0, $fa1
+; LA32-NEXT:    bl exp10
+; LA32-NEXT:    fmov.d $fs1, $fa0
+; LA32-NEXT:    fmov.d $fa0, $fs0
+; LA32-NEXT:    bl exp10
+; LA32-NEXT:    fmov.d $fa1, $fa0
+; LA32-NEXT:    fmov.d $fa0, $fs1
+; LA32-NEXT:    fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT:    fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 32
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: exp10_v2f128:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -48
+; LA64-NEXT:    st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64-NEXT:    vst $vr0, $sp, 0 # 16-byte Folded Spill
+; LA64-NEXT:    vreplvei.d $vr0, $vr0, 0
+; LA64-NEXT:    # kill: def $f0_64 killed $f0_64 killed $vr0
+; LA64-NEXT:    pcaddu18i $ra, %call36(exp10)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
+; LA64-NEXT:    vinsgr2vr.d $vr0, $a0, 0
+; LA64-NEXT:    vst $vr0, $sp, 16 # 16-byte Folded Spill
+; LA64-NEXT:    vld $vr0, $sp, 0 # 16-byte Folded Reload
+; LA64-NEXT:    vreplvei.d $vr0, $vr0, 1
+; LA64-NEXT:    # kill: def $f0_64 killed $f0_64 killed $vr0
+; LA64-NEXT:    pcaddu18i $ra, %call36(exp10)
+; LA64-NEXT:    jirl $ra, $ra, 0
+; LA64-NEXT:    movfr2gr.d $a0, $fa0
+; LA64-NEXT:    vld $vr0, $sp, 16 # 16-byte Folded Reload
+; LA64-NEXT:    vinsgr2vr.d $vr0, $a0, 1
+; LA64-NEXT:    ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 48
+; LA64-NEXT:    ret
+  %r = call <2 x double> @llvm.exp10.v2f128(<2 x double> %x)
+  ret <2 x double> %r
+}
+
+attributes #0 = { nounwind }

@arsenm arsenm force-pushed the users/arsenm/loongarch/add-test-exp10-intrinsic branch from f4ecebf to fd3e166 Compare July 14, 2025 12:52
Copy link
Contributor

@SixWeining SixWeining left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM. Thanks.

@arsenm arsenm merged commit f04650b into main Jul 17, 2025
9 checks passed
@arsenm arsenm deleted the users/arsenm/loongarch/add-test-exp10-intrinsic branch July 17, 2025 10:08
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

4 participants