Skip to content

Commit

Permalink
Auto merge of #54012 - denismerigoux:cg-llvm-gen, r=<try>
Browse files Browse the repository at this point in the history
rustc_codegen_llvm: traitification of LLVM-specific CodegenCx and Builder methods

This PR is the continuation of #52461 in the grand plan of #45274 to allow for multiple codegen backends. A first attempt at this was #52987 but since @irinagpopa is no longer working on it I'm taking ownership of the PR.

The changes are refactoring only and do not affect the logic of the code. Performance should not be impacted since all parametrization is done with generics (no trait objects).

The `librustc_codegen_llvm` crate now contains a new folder `interfaces` that describes with traits part of how the compiler interfaces with LLVM during codegen. `CodegenCx` and `Builder` implement those traits.

Many things are still missing. All the calls to LLVM are not yet under a trait, and later LLVM-agnostic code should be parametrized.
  • Loading branch information
bors committed Sep 7, 2018
2 parents a8c11d2 + 5a880b7 commit 12c6a75
Show file tree
Hide file tree
Showing 42 changed files with 2,387 additions and 1,464 deletions.
107 changes: 64 additions & 43 deletions src/librustc_codegen_llvm/abi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,16 @@
use llvm::{self, AttributePlace};
use base;
use builder::{Builder, MemFlags};
use common::{ty_fn_sig, C_usize};
use common::ty_fn_sig;
use context::CodegenCx;
use mir::place::PlaceRef;
use mir::operand::OperandValue;
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
use value::Value;

use interfaces::{BuilderMethods, ConstMethods, TypeMethods};

use rustc_target::abi::{LayoutOf, Size, TyLayout};
use rustc::ty::{self, Ty};
use rustc::ty::layout;
Expand Down Expand Up @@ -104,29 +106,29 @@ impl ArgAttributesExt for ArgAttributes {
}

pub trait LlvmType {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type;
}

impl LlvmType for Reg {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type {
match self.kind {
RegKind::Integer => Type::ix(cx, self.size.bits()),
RegKind::Integer => cx.type_ix(self.size.bits()),
RegKind::Float => {
match self.size.bits() {
32 => Type::f32(cx),
64 => Type::f64(cx),
32 => cx.type_f32(),
64 => cx.type_f64(),
_ => bug!("unsupported float: {:?}", self)
}
}
RegKind::Vector => {
Type::vector(Type::i8(cx), self.size.bytes())
cx.type_vector(cx.type_i8(), self.size.bytes())
}
}
}
}

impl LlvmType for CastTarget {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type {
let rest_ll_unit = self.rest.unit.llvm_type(cx);
let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
(0, 0)
Expand All @@ -143,7 +145,7 @@ impl LlvmType for CastTarget {

// Simplify to array when all chunks are the same size and type
if rem_bytes == 0 {
return Type::array(rest_ll_unit, rest_count);
return cx.type_array(rest_ll_unit, rest_count);
}
}

Expand All @@ -158,35 +160,49 @@ impl LlvmType for CastTarget {
if rem_bytes != 0 {
// Only integers can be really split further.
assert_eq!(self.rest.unit.kind, RegKind::Integer);
args.push(Type::ix(cx, rem_bytes * 8));
args.push(cx.type_ix(rem_bytes * 8));
}

Type::struct_(cx, &args, false)
cx.type_struct(&args, false)
}
}

pub trait ArgTypeExt<'ll, 'tcx> {
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>);
fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>);
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type;
fn store(
&self,
bx: &Builder<'_, 'll, 'tcx, &'ll Value>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>
);
fn store_fn_arg(
&self,
bx: &Builder<'_, 'll, 'tcx, &'ll Value>,
idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value>
);
}

impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
/// Get the LLVM type for a place of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type {
self.layout.llvm_type(cx)
}

/// Store a direct/indirect value described by this ArgType into a
/// place for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>) {
fn store(
&self,
bx: &Builder<'_, 'll, 'tcx, &'ll Value>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>
) {
if self.is_ignore() {
return;
}
let cx = bx.cx;
let cx = bx.cx();
if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
} else if self.is_unsized_indirect() {
Expand All @@ -196,7 +212,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to());
let cast_dst = bx.pointercast(dst.llval, cx.type_ptr_to(cast.llvm_type(cx)));
bx.store(val, cast_dst, self.layout.align);
} else {
// The actual return type is a struct, but the ABI
Expand Down Expand Up @@ -224,9 +240,9 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {

// ...and then memcpy it to the intended destination.
base::call_memcpy(bx,
bx.pointercast(dst.llval, Type::i8p(cx)),
bx.pointercast(llscratch, Type::i8p(cx)),
C_usize(cx, self.layout.size.bytes()),
bx.pointercast(dst.llval, cx.type_i8p()),
bx.pointercast(llscratch, cx.type_i8p()),
cx.const_usize(self.layout.size.bytes()),
self.layout.align.min(scratch_align),
MemFlags::empty());

Expand All @@ -237,7 +253,12 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
}
}

fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>) {
fn store_fn_arg(
&self,
bx: &Builder<'a, 'll, 'tcx, &'ll Value>,
idx: &mut usize,
dst: PlaceRef<'tcx, &'ll Value>
) {
let mut next = || {
let val = llvm::get_param(bx.llfn(), *idx as c_uint);
*idx += 1;
Expand All @@ -259,47 +280,47 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
}

pub trait FnTypeExt<'tcx> {
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>)
fn of_instance(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: &ty::Instance<'tcx>)
-> Self;
fn new(cx: &CodegenCx<'ll, 'tcx>,
fn new(cx: &CodegenCx<'ll, 'tcx, &'ll Value>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self;
fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
fn new_vtable(cx: &CodegenCx<'ll, 'tcx, &'ll Value>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self;
fn new_internal(
cx: &CodegenCx<'ll, 'tcx>,
cx: &CodegenCx<'ll, 'tcx, &'ll Value>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
) -> Self;
fn adjust_for_abi(&mut self,
cx: &CodegenCx<'ll, 'tcx>,
cx: &CodegenCx<'ll, 'tcx, &'ll Value>,
abi: Abi);
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type;
fn llvm_cconv(&self) -> llvm::CallConv;
fn apply_attrs_llfn(&self, llfn: &'ll Value);
fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value);
}

impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>)
fn of_instance(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: &ty::Instance<'tcx>)
-> Self {
let fn_ty = instance.ty(cx.tcx);
let sig = ty_fn_sig(cx, fn_ty);
let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
FnType::new(cx, sig, &[])
}

fn new(cx: &CodegenCx<'ll, 'tcx>,
fn new(cx: &CodegenCx<'ll, 'tcx, &'ll Value>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self {
FnType::new_internal(cx, sig, extra_args, |ty, _| {
ArgType::new(cx.layout_of(ty))
})
}

fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
fn new_vtable(cx: &CodegenCx<'ll, 'tcx, &'ll Value>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self {
FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
Expand All @@ -326,7 +347,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
}

fn new_internal(
cx: &CodegenCx<'ll, 'tcx>,
cx: &CodegenCx<'ll, 'tcx, &'ll Value>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
Expand Down Expand Up @@ -507,7 +528,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
}

fn adjust_for_abi(&mut self,
cx: &CodegenCx<'ll, 'tcx>,
cx: &CodegenCx<'ll, 'tcx, &'ll Value>,
abi: Abi) {
if abi == Abi::Unadjusted { return }

Expand Down Expand Up @@ -574,7 +595,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
}
}

fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type {
let args_capacity: usize = self.args.iter().map(|arg|
if arg.pad.is_some() { 1 } else { 0 } +
if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
Expand All @@ -584,14 +605,14 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
);

let llreturn_ty = match self.ret.mode {
PassMode::Ignore => Type::void(cx),
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => {
self.ret.layout.immediate_llvm_type(cx)
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(..) => {
llargument_tys.push(self.ret.memory_ty(cx).ptr_to());
Type::void(cx)
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
cx.type_void()
}
};

Expand All @@ -617,15 +638,15 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
continue;
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(_, None) => arg.memory_ty(cx).ptr_to(),
PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
};
llargument_tys.push(llarg_ty);
}

if self.variadic {
Type::variadic_func(&llargument_tys, llreturn_ty)
cx.type_variadic_func(&llargument_tys, llreturn_ty)
} else {
Type::func(&llargument_tys, llreturn_ty)
cx.type_func(&llargument_tys, llreturn_ty)
}
}

Expand Down Expand Up @@ -680,7 +701,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
}
}

fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx, &'ll Value>, callsite: &'ll Value) {
let mut i = 0;
let mut apply = |attrs: &ArgAttributes| {
attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
Expand All @@ -699,7 +720,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
// by the LLVM verifier.
match scalar.value {
layout::Int(..) if !scalar.is_bool() => {
let range = scalar.valid_range_exclusive(bx.cx);
let range = scalar.valid_range_exclusive(bx.cx());
if range.start != range.end {
bx.range_metadata(callsite, range);
}
Expand Down
30 changes: 12 additions & 18 deletions src/librustc_codegen_llvm/asm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,26 +9,25 @@
// except according to those terms.

use llvm;
use common::*;
use type_::Type;
use context::CodegenCx;
use type_of::LayoutLlvmExt;
use builder::Builder;
use value::Value;

use rustc::hir;
use interfaces::{BuilderMethods, ConstMethods, TypeMethods};

use mir::place::PlaceRef;
use mir::operand::OperandValue;

use std::ffi::CString;
use syntax::ast::AsmDialect;
use libc::{c_uint, c_char};

// Take an inline assembly expression and splat it out via LLVM
pub fn codegen_inline_asm(
bx: &Builder<'a, 'll, 'tcx>,
bx: &Builder<'a, 'll, 'tcx, &'ll Value>,
ia: &hir::InlineAsm,
outputs: Vec<PlaceRef<'ll, 'tcx>>,
outputs: Vec<PlaceRef<'tcx, &'ll Value>>,
mut inputs: Vec<&'ll Value>
) {
let mut ext_constraints = vec![];
Expand All @@ -44,7 +43,7 @@ pub fn codegen_inline_asm(
if out.is_indirect {
indirect_outputs.push(place.load(bx).immediate());
} else {
output_types.push(place.layout.llvm_type(bx.cx));
output_types.push(place.layout.llvm_type(bx.cx()));
}
}
if !indirect_outputs.is_empty() {
Expand Down Expand Up @@ -76,14 +75,9 @@ pub fn codegen_inline_asm(
// Depending on how many outputs we have, the return type is different
let num_outputs = output_types.len();
let output_type = match num_outputs {
0 => Type::void(bx.cx),
0 => bx.cx().type_void(),
1 => output_types[0],
_ => Type::struct_(bx.cx, &output_types, false)
};

let dialect = match ia.dialect {
AsmDialect::Att => llvm::AsmDialect::Att,
AsmDialect::Intel => llvm::AsmDialect::Intel,
_ => bx.cx().type_struct(&output_types, false)
};

let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
Expand All @@ -95,7 +89,7 @@ pub fn codegen_inline_asm(
output_type,
ia.volatile,
ia.alignstack,
dialect
ia.dialect
);

// Again, based on how many outputs we have
Expand All @@ -109,17 +103,17 @@ pub fn codegen_inline_asm(
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx,
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx,
key.as_ptr() as *const c_char, key.len() as c_uint);

let val: &'ll Value = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32);
let val: &'ll Value = bx.cx().const_i32(ia.ctxt.outer().as_u32() as i32);

llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1));
llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1));
}
}

pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>,
ga: &hir::GlobalAsm) {
let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap();
unsafe {
Expand Down
Loading

0 comments on commit 12c6a75

Please sign in to comment.