Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Some small cleanups in rustc_abi #116313

Merged
merged 4 commits into from
Oct 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 38 additions & 38 deletions compiler/rustc_abi/src/layout.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,32 +53,32 @@ pub trait LayoutCalculator {
kind: StructKind,
) -> Option<LayoutS> {
let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
// Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
// data-carrying variants can be packed into the space after/before the niche.
// Enums prefer niches close to the beginning or the end of the variants so that other
// (smaller) data-carrying variants can be packed into the space after/before the niche.
// If the default field ordering does not give us a niche at the front then we do a second
// run and bias niches to the right and then check which one is closer to one of the struct's
// edges.
// run and bias niches to the right and then check which one is closer to one of the
// struct's edges.
if let Some(layout) = &layout {
// Don't try to calculate an end-biased layout for unsizable structs,
// otherwise we could end up with different layouts for
// Foo<Type> and Foo<dyn Trait> which would break unsizing
// Foo<Type> and Foo<dyn Trait> which would break unsizing.
if !matches!(kind, StructKind::MaybeUnsized) {
if let Some(niche) = layout.largest_niche {
let head_space = niche.offset.bytes();
let niche_length = niche.value.size(dl).bytes();
let tail_space = layout.size.bytes() - head_space - niche_length;
let niche_len = niche.value.size(dl).bytes();
let tail_space = layout.size.bytes() - head_space - niche_len;

// This may end up doing redundant work if the niche is already in the last field
// (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
// the unpadded size so we try anyway.
// This may end up doing redundant work if the niche is already in the last
// field (e.g. a trailing bool) and there is tail padding. But it's non-trivial
// to get the unpadded size so we try anyway.
if fields.len() > 1 && head_space != 0 && tail_space > 0 {
let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
.expect("alt layout should always work");
let niche = alt_layout
let alt_niche = alt_layout
.largest_niche
.expect("alt layout should have a niche like the regular one");
let alt_head_space = niche.offset.bytes();
let alt_niche_len = niche.value.size(dl).bytes();
let alt_head_space = alt_niche.offset.bytes();
let alt_niche_len = alt_niche.value.size(dl).bytes();
let alt_tail_space =
alt_layout.size.bytes() - alt_head_space - alt_niche_len;

Expand All @@ -93,7 +93,7 @@ pub trait LayoutCalculator {
alt_layout: {}\n",
layout.size.bytes(),
head_space,
niche_length,
niche_len,
tail_space,
alt_head_space,
alt_niche_len,
Expand Down Expand Up @@ -684,7 +684,8 @@ pub trait LayoutCalculator {
// Also do not overwrite any already existing "clever" ABIs.
if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
variant.abi = abi;
// Also need to bump up the size and alignment, so that the entire value fits in here.
// Also need to bump up the size and alignment, so that the entire value fits
// in here.
variant.size = cmp::max(variant.size, size);
variant.align.abi = cmp::max(variant.align.abi, align.abi);
}
Expand Down Expand Up @@ -868,15 +869,15 @@ fn univariant(

// If `-Z randomize-layout` was enabled for the type definition we can shuffle
// the field ordering to try and catch some code making assumptions about layouts
// we don't guarantee
// we don't guarantee.
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
#[cfg(feature = "randomize")]
{
// `ReprOptions.layout_seed` is a deterministic seed that we can use to
// randomize field ordering with
// `ReprOptions.layout_seed` is a deterministic seed we can use to randomize field
// ordering.
let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());

// Shuffle the ordering of the fields
// Shuffle the ordering of the fields.
optimizing.shuffle(&mut rng);
}
// Otherwise we just leave things alone and actually optimize the type's fields
Expand All @@ -892,27 +893,26 @@ fn univariant(
.max()
.unwrap_or(0);

// Calculates a sort key to group fields by their alignment or possibly some size-derived
// pseudo-alignment.
// Calculates a sort key to group fields by their alignment or possibly some
// size-derived pseudo-alignment.
let alignment_group_key = |layout: Layout<'_>| {
if let Some(pack) = pack {
// return the packed alignment in bytes
// Return the packed alignment in bytes.
layout.align().abi.min(pack).bytes()
} else {
// returns log2(effective-align).
// This is ok since `pack` applies to all fields equally.
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
//
// Returns `log2(effective-align)`. This is ok since `pack` applies to all
// fields equally. The calculation assumes that size is an integer multiple of
// align, except for ZSTs.
let align = layout.align().abi.bytes();
let size = layout.size().bytes();
let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
// Group [u8; 4] with align-4 or [u8; 6] with align-2 fields.
let size_as_align = align.max(size).trailing_zeros();
let size_as_align = if largest_niche_size > 0 {
match niche_bias {
// Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
// to the front in the first case (for aligned loads) but keep the bool in front
// in the second case for its niches.
// Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the
// array to the front in the first case (for aligned loads) but keep
// the bool in front in the second case for its niches.
NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
// When moving niches towards the end of the struct then for
// A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
Expand All @@ -931,14 +931,14 @@ fn univariant(

match kind {
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
// Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
// to get one niche into the preferred position. If it ever supported multiple niches
// then a more advanced pick-and-pack approach could provide better results.
// But even for the single-niche cache it's not optimal. E.g. for
// A(u32, (bool, u8), u16) it would be possible to move the bool to the front
// but it would require packing the tuple together with the u16 to build a 4-byte
// group so that the u32 can be placed after it without padding. This kind
// of packing can't be achieved by sorting.
// Currently `LayoutS` only exposes a single niche so sorting is usually
// sufficient to get one niche into the preferred position. If it ever
// supported multiple niches then a more advanced pick-and-pack approach could
// provide better results. But even for the single-niche cache it's not
// optimal. E.g. for A(u32, (bool, u8), u16) it would be possible to move the
// bool to the front but it would require packing the tuple together with the
// u16 to build a 4-byte group so that the u32 can be placed after it without
// padding. This kind of packing can't be achieved by sorting.
optimizing.sort_by_key(|&x| {
let f = fields[x];
let field_size = f.size().bytes();
Expand Down
69 changes: 36 additions & 33 deletions compiler/rustc_abi/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,11 @@ bitflags! {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub enum IntegerType {
/// Pointer sized integer type, i.e. isize and usize. The field shows signedness, that
/// is, `Pointer(true)` is isize.
/// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
/// `Pointer(true)` means `isize`.
Pointer(bool),
/// Fix sized integer type, e.g. i8, u32, i128 The bool field shows signedness, `Fixed(I8, false)` means `u8`
/// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
/// `Fixed(I8, false)` means `u8`.
Fixed(Integer, bool),
}

Expand All @@ -69,7 +70,7 @@ impl IntegerType {
}
}

/// Represents the repr options provided by the user,
/// Represents the repr options provided by the user.
#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]
pub struct ReprOptions {
Expand Down Expand Up @@ -139,7 +140,7 @@ impl ReprOptions {
}

/// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
/// was enabled for its declaration crate
/// was enabled for its declaration crate.
pub fn can_randomize_type_layout(&self) -> bool {
!self.inhibit_struct_field_reordering_opt()
&& self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
Expand Down Expand Up @@ -217,7 +218,8 @@ pub enum TargetDataLayoutErrors<'a> {
}

impl TargetDataLayout {
/// Parse data layout from an [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
/// Parse data layout from an
/// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
///
/// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
/// determined from llvm string.
Expand All @@ -242,10 +244,11 @@ impl TargetDataLayout {
};

// Parse a size string.
let size = |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
let parse_size =
|s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);

// Parse an alignment string.
let align = |s: &[&'a str], cause: &'a str| {
let parse_align = |s: &[&'a str], cause: &'a str| {
if s.is_empty() {
return Err(TargetDataLayoutErrors::MissingAlignment { cause });
}
Expand All @@ -269,22 +272,22 @@ impl TargetDataLayout {
[p] if p.starts_with('P') => {
dl.instruction_address_space = parse_address_space(&p[1..], "P")?
}
["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
["a", ref a @ ..] => dl.aggregate_align = parse_align(a, "a")?,
["f32", ref a @ ..] => dl.f32_align = parse_align(a, "f32")?,
["f64", ref a @ ..] => dl.f64_align = parse_align(a, "f64")?,
// FIXME(erikdesjardins): we should be parsing nonzero address spaces
// this will require replacing TargetDataLayout::{pointer_size,pointer_align}
// with e.g. `fn pointer_size_in(AddressSpace)`
[p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
dl.pointer_size = size(s, p)?;
dl.pointer_align = align(a, p)?;
dl.pointer_size = parse_size(s, p)?;
dl.pointer_align = parse_align(a, p)?;
}
[s, ref a @ ..] if s.starts_with('i') => {
let Ok(bits) = s[1..].parse::<u64>() else {
size(&s[1..], "i")?; // For the user error.
parse_size(&s[1..], "i")?; // For the user error.
continue;
};
let a = align(a, s)?;
let a = parse_align(a, s)?;
match bits {
1 => dl.i1_align = a,
8 => dl.i8_align = a,
Expand All @@ -301,8 +304,8 @@ impl TargetDataLayout {
}
}
[s, ref a @ ..] if s.starts_with('v') => {
let v_size = size(&s[1..], "v")?;
let a = align(a, s)?;
let v_size = parse_size(&s[1..], "v")?;
let a = parse_align(a, s)?;
if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
v.1 = a;
continue;
Expand Down Expand Up @@ -747,7 +750,6 @@ impl Align {
/// A pair of alignments, ABI-mandated and preferred.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]

pub struct AbiAndPrefAlign {
pub abi: Align,
pub pref: Align,
Expand All @@ -773,7 +775,6 @@ impl AbiAndPrefAlign {
/// Integers, also used for enum discriminants.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(Encodable, Decodable, HashStable_Generic))]

pub enum Integer {
I8,
I16,
Expand Down Expand Up @@ -937,8 +938,7 @@ impl Primitive {
}

/// Inclusive wrap-around range of valid values, that is, if
/// start > end, it represents `start..=MAX`,
/// followed by `0..=end`.
/// start > end, it represents `start..=MAX`, followed by `0..=end`.
///
/// That is, for an i8 primitive, a range of `254..=2` means following
/// sequence:
Expand Down Expand Up @@ -970,21 +970,21 @@ impl WrappingRange {

/// Returns `self` with replaced `start`
#[inline(always)]
pub fn with_start(mut self, start: u128) -> Self {
fn with_start(mut self, start: u128) -> Self {
self.start = start;
self
}

/// Returns `self` with replaced `end`
#[inline(always)]
pub fn with_end(mut self, end: u128) -> Self {
fn with_end(mut self, end: u128) -> Self {
self.end = end;
self
}

/// Returns `true` if `size` completely fills the range.
#[inline]
pub fn is_full_for(&self, size: Size) -> bool {
fn is_full_for(&self, size: Size) -> bool {
let max_value = size.unsigned_int_max();
debug_assert!(self.start <= max_value && self.end <= max_value);
self.start == (self.end.wrapping_add(1) & max_value)
Expand Down Expand Up @@ -1066,15 +1066,17 @@ impl Scalar {
}

#[inline]
/// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
/// Allows the caller to mutate the valid range. This operation will panic if attempted on a
/// union.
pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
match self {
Scalar::Initialized { valid_range, .. } => valid_range,
Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
}
}

/// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
/// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
/// layout.
#[inline]
pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
match *self {
Expand Down Expand Up @@ -1252,7 +1254,6 @@ impl AddressSpace {
/// in terms of categories of C types there are ABI rules for.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]

pub enum Abi {
Uninhabited,
Scalar(Scalar),
Expand Down Expand Up @@ -1457,17 +1458,19 @@ impl Niche {
return None;
}

// Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
// Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
// This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
// Having `None` in niche zero can enable some special optimizations.
// Extend the range of valid values being reserved by moving either `v.start` or `v.end`
// bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
// the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
// and always taking the shortest path to niche zero. Having `None` in niche zero can
// enable some special optimizations.
//
// Bound selection criteria:
// 1. Select closest to zero given wrapping semantics.
// 2. Avoid moving past zero if possible.
//
// In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
// If niche zero is already reserved, the selection of bounds are of little interest.
// In practice this means that enums with `count > 1` are unlikely to claim niche zero,
// since they have to fit perfectly. If niche zero is already reserved, the selection of
// bounds are of little interest.
let move_start = |v: WrappingRange| {
let start = v.start.wrapping_sub(count) & max_value;
Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
Expand Down
Loading