Skip to content

Commit

Permalink
LLVM value address space optimizations: leave values in their origina…
Browse files Browse the repository at this point in the history
…l address space where possible.
  • Loading branch information
DiamondLovesYou committed Jan 21, 2019
1 parent e041b4a commit 1019e20
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 36 deletions.
4 changes: 2 additions & 2 deletions src/librustc_codegen_llvm/abi.rs
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(..) => {
llargument_tys.push(cx.type_ptr_to_flat(self.ret.memory_ty(cx)));
llargument_tys.push(cx.type_ptr_to_alloca(self.ret.memory_ty(cx)));
cx.type_void()
}
};
Expand Down Expand Up @@ -682,7 +682,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
continue;
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(_, None) => cx.type_ptr_to_flat(arg.memory_ty(cx)),
PassMode::Indirect(_, None) => cx.type_ptr_to_alloca(arg.memory_ty(cx)),
};
llargument_tys.push(llarg_ty);
}
Expand Down
19 changes: 10 additions & 9 deletions src/librustc_codegen_ssa/mir/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let scratch =
PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret");
PlaceRef::alloca_addr_space(&mut bx, self.fn_ty.ret.layout,
"ret");
op.val.store(&mut bx, scratch);
scratch.llval
}
Expand Down Expand Up @@ -791,7 +792,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Immediate(_) | Pair(..) => {
match arg.mode {
PassMode::Indirect(..) | PassMode::Cast(_) => {
let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
let scratch = PlaceRef::alloca_addr_space(bx, arg.layout, "arg");
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
}
Expand All @@ -806,12 +807,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.

let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
let scratch = PlaceRef::alloca_addr_space(bx, arg.layout, "arg");
base::memcpy_ty(bx, scratch.llval, scratch.align, llval, align,
op.layout, MemFlags::empty());
(scratch.llval, scratch.align, true)
} else {
(llval, align, true)
(bx.flat_addr_cast(llval), align, true)
}
}
};
Expand Down Expand Up @@ -883,7 +884,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cx.tcx().mk_mut_ptr(cx.tcx().types.u8),
cx.tcx().types.i32
]));
let slot = PlaceRef::alloca(bx, layout, "personalityslot");
let slot = PlaceRef::alloca_addr_space(bx, layout, "personalityslot");
self.personality_slot = Some(slot);
slot
}
Expand Down Expand Up @@ -979,15 +980,15 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
return if fn_ret.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return.
let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
let tmp = PlaceRef::alloca_addr_space(bx, fn_ret.layout, "tmp_ret");
tmp.storage_live(bx);
llargs.push(tmp.llval);
ReturnDest::IndirectOperand(tmp, index)
} else if is_intrinsic {
// Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the
// result
let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
let tmp = PlaceRef::alloca_addr_space(bx, fn_ret.layout, "tmp_ret");
tmp.storage_live(bx);
ReturnDest::IndirectOperand(tmp, index)
} else {
Expand Down Expand Up @@ -1031,7 +1032,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
LocalRef::Operand(None) => {
let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst));
assert!(!dst_layout.ty.has_erasable_regions());
let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
let place = PlaceRef::alloca_addr_space(bx, dst_layout, "transmute_temp");
place.storage_live(bx);
self.codegen_transmute_into(bx, src, place);
let op = bx.load_operand(place);
Expand Down Expand Up @@ -1084,7 +1085,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
let op = if let PassMode::Cast(_) = ret_ty.mode {
let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret");
let tmp = PlaceRef::alloca_addr_space(bx, ret_ty.layout, "tmp_ret");
tmp.storage_live(bx);
bx.store_arg_ty(&ret_ty, llval, tmp);
let op = bx.load_operand(tmp);
Expand Down
9 changes: 5 additions & 4 deletions src/librustc_codegen_ssa/mir/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
// FIXME: add an appropriate debuginfo
LocalRef::UnsizedPlace(indirect_place)
} else {
let place = PlaceRef::alloca(&mut bx, layout, &name.as_str());
let place = PlaceRef::alloca_addr_space(&mut bx, layout, &name.as_str());
if dbg {
let (scope, span) = fx.debug_loc(mir::SourceInfo {
span: decl.source_info.span,
Expand Down Expand Up @@ -305,7 +305,8 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
);
LocalRef::UnsizedPlace(indirect_place)
} else {
LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local)))
LocalRef::Place(PlaceRef::alloca_addr_space(&mut bx, layout,
&format!("{:?}", local)))
}
} else {
// If this is an immediate local, we do not create an
Expand Down Expand Up @@ -468,7 +469,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
_ => bug!("spread argument isn't a tuple?!")
};

let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty), &name);
let place = PlaceRef::alloca_addr_space(bx, bx.layout_of(arg_ty), &name);
for i in 0..tupled_arg_tys.len() {
let arg = &fx.fn_ty.args[idx];
idx += 1;
Expand Down Expand Up @@ -559,7 +560,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
indirect_operand.store(bx, tmp);
tmp
} else {
let tmp = PlaceRef::alloca(bx, arg.layout, &name);
let tmp = PlaceRef::alloca_addr_space(bx, arg.layout, &name);
bx.store_fn_arg(arg, &mut llarg_idx, tmp);
tmp
};
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_codegen_ssa/mir/place.rs
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
let ptr_layout = bx.cx().layout_of(ptr_ty);
Self::alloca(bx, ptr_layout, name)
Self::alloca_addr_space(bx, ptr_layout, name)
}

pub fn len<Cx: CodegenMethods<'tcx, Value = V>>(
Expand Down
55 changes: 35 additions & 20 deletions src/librustc_codegen_ssa/mir/rvalue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,36 @@ use super::{FunctionCx, LocalRef};
use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;

fn codegen_binop_fixup<'a, 'tcx: 'a, Bx>(bx: &mut Bx,
lhs: Bx::Value,
rhs: Bx::Value)
-> (Bx::Value, Bx::Value)
where Bx: BuilderMethods<'a, 'tcx>,
{
// In case we're in separate addr spaces.
// Can happen when cmp against null_mut, eg.
// `infer-addr-spaces` should propagate.
// But, empirically, `infer-addr-spaces` doesn't.
let fix_null_ty = |val, this_ty, other_ty| {
if bx.cx().const_null(this_ty) == val {
bx.cx().const_null(other_ty)
} else {
val
}
};
let lhs_ty = bx.cx().val_ty(lhs);
let rhs_ty = bx.cx().val_ty(rhs);
let lhs = fix_null_ty(lhs, lhs_ty, rhs_ty);
let rhs = fix_null_ty(rhs, rhs_ty, lhs_ty);
if bx.cx().type_addr_space(lhs_ty).is_some() {
assert!(bx.cx().type_addr_space(rhs_ty).is_some());
(bx.flat_addr_cast(lhs),
bx.flat_addr_cast(rhs))
} else {
(lhs, rhs)
}
}

impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_rvalue(
&mut self,
Expand Down Expand Up @@ -63,7 +93,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// index into the struct, and this case isn't
// important enough for it.
debug!("codegen_rvalue: creating ugly alloca");
let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp");
let scratch = PlaceRef::alloca_addr_space(&mut bx, operand.layout,
"__unsize_temp");
scratch.storage_live(&mut bx);
operand.val.store(&mut bx, scratch);
base::coerce_unsized_into(&mut bx, scratch, dest);
Expand All @@ -89,7 +120,6 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
let zero = bx.cx().const_usize(0);
let start = dest.project_index(&mut bx, zero).llval;
let start = bx.flat_addr_cast(start);

if let OperandValue::Immediate(v) = cg_elem.val {
let size = bx.cx().const_usize(dest.layout.size.bytes());
Expand All @@ -111,6 +141,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {

let count = bx.cx().const_usize(count);
let end = dest.project_index(&mut bx, count).llval;
let start = bx.flat_addr_cast(start);
let end = bx.flat_addr_cast(end);

let mut header_bx = bx.build_sibling_block("repeat_loop_header");
Expand Down Expand Up @@ -245,7 +276,6 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// until LLVM removes pointee types.
let lldata = bx.pointercast(lldata,
bx.cx().scalar_pair_element_backend_type(cast, 0, true));
let lldata = bx.flat_addr_cast(lldata);
OperandValue::Pair(lldata, llextra)
}
OperandValue::Immediate(lldata) => {
Expand Down Expand Up @@ -618,17 +648,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
lhs, rhs
)
} else {
// In case we're in separate addr spaces.
// Can happen when cmp against null_mut, eg.
// `infer-addr-spaces` should propagate.
let lhs_ty = bx.cx().val_ty(rhs);
let (lhs, rhs) = if bx.cx().type_addr_space(lhs_ty).is_some() {
assert!(bx.cx().type_addr_space(bx.cx().val_ty(rhs)).is_some());
(bx.flat_addr_cast(lhs),
bx.flat_addr_cast(rhs))
} else {
(lhs, rhs)
};
let (lhs, rhs) = codegen_binop_fixup(bx, lhs, rhs);
bx.icmp(
base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
lhs, rhs
Expand All @@ -647,12 +667,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
rhs_extra: Bx::Value,
_input_ty: Ty<'tcx>,
) -> Bx::Value {
// In case we're in separate addr spaces.
// Can happen when cmp against null_mut, eg.
// `infer-addr-spaces` should propagate.
let lhs_addr = bx.flat_addr_cast(lhs_addr);
let rhs_addr = bx.flat_addr_cast(rhs_addr);

let (lhs_addr, rhs_addr) = codegen_binop_fixup(bx, lhs_addr, rhs_addr);
match op {
mir::BinOp::Eq => {
let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
Expand Down

0 comments on commit 1019e20

Please sign in to comment.