Skip to content

Commit

Permalink
Rollup merge of rust-lang#70226 - RalfJung:checked, r=oli-obk
Browse files Browse the repository at this point in the history
use checked casts and arithmetic in Miri engine

This is unfortunately pretty annoying because we have to cast back and forth between `u64` and `usize` more often that should be necessary, and that cast is considered fallible.

For example, should [this](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/mir/interpret/value/enum.ConstValue.html) really be `usize`?

Also, `LayoutDetails` uses `usize` for field indices, but in Miri we use `u64` to be able to also handle array indexing. Maybe methods like `mplace_field` should be suitably generalized to accept both `u64` and `usize`?

r? @oli-obk Cc @eddyb
  • Loading branch information
Dylan-DPC authored Mar 25, 2020
2 parents 3c1d9ad + 7400955 commit ea00fbc
Show file tree
Hide file tree
Showing 24 changed files with 337 additions and 280 deletions.
65 changes: 29 additions & 36 deletions src/librustc/mir/interpret/allocation.rs
Original file line number Diff line number Diff line change
@@ -1,18 +1,20 @@
//! The virtual memory representation of the MIR interpreter.

use std::borrow::Cow;
use std::convert::TryFrom;
use std::iter;
use std::ops::{Deref, DerefMut, Range};

use rustc_ast::ast::Mutability;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_target::abi::HasDataLayout;

use super::{
read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUndef,
};

use crate::ty::layout::{Align, Size};

use rustc_ast::ast::Mutability;
use rustc_data_structures::sorted_map::SortedMap;
use rustc_target::abi::HasDataLayout;
use std::borrow::Cow;
use std::iter;
use std::ops::{Deref, DerefMut, Range};

// NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
// `src/librustc_mir/interpret/snapshot.rs`.
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
Expand Down Expand Up @@ -90,7 +92,7 @@ impl<Tag> Allocation<Tag> {
/// Creates a read-only allocation initialized by the given bytes
pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
let bytes = slice.into().into_owned();
let size = Size::from_bytes(bytes.len() as u64);
let size = Size::from_bytes(bytes.len());
Self {
bytes,
relocations: Relocations::new(),
Expand All @@ -107,9 +109,8 @@ impl<Tag> Allocation<Tag> {
}

pub fn undef(size: Size, align: Align) -> Self {
assert_eq!(size.bytes() as usize as u64, size.bytes());
Allocation {
bytes: vec![0; size.bytes() as usize],
bytes: vec![0; size.bytes_usize()],
relocations: Relocations::new(),
undef_mask: UndefMask::new(size, false),
size,
Expand Down Expand Up @@ -152,7 +153,7 @@ impl Allocation<(), ()> {
/// Raw accessors. Provide access to otherwise private bytes.
impl<Tag, Extra> Allocation<Tag, Extra> {
pub fn len(&self) -> usize {
self.size.bytes() as usize
self.size.bytes_usize()
}

/// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
Expand Down Expand Up @@ -183,20 +184,15 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
#[inline]
fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
let end = offset + size; // This does overflow checking.
assert_eq!(
end.bytes() as usize as u64,
end.bytes(),
"cannot handle this access on this host architecture"
);
let end = end.bytes() as usize;
let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
assert!(
end <= self.len(),
"Out-of-bounds access at offset {}, size {} in allocation of size {}",
offset.bytes(),
size.bytes(),
self.len()
);
(offset.bytes() as usize)..end
offset.bytes_usize()..end
}

/// The last argument controls whether we error out when there are undefined
Expand Down Expand Up @@ -294,11 +290,10 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
) -> InterpResult<'tcx, &[u8]> {
assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
let offset = ptr.offset.bytes() as usize;
let offset = ptr.offset.bytes_usize();
Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
Some(size) => {
let size_with_null = Size::from_bytes((size + 1) as u64);
let size_with_null = Size::from_bytes(size) + Size::from_bytes(1);
// Go through `get_bytes` for checks and AllocationExtra hooks.
// We read the null, so we include it in the request, but we want it removed
// from the result, so we do subslicing.
Expand Down Expand Up @@ -343,7 +338,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
let (lower, upper) = src.size_hint();
let len = upper.expect("can only write bounded iterators");
assert_eq!(lower, len, "can only write iterators with a precise length");
let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len as u64))?;
let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len))?;
// `zip` would stop when the first iterator ends; we want to definitely
// cover all of `bytes`.
for dest in bytes {
Expand Down Expand Up @@ -386,7 +381,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
} else {
match self.relocations.get(&ptr.offset) {
Some(&(tag, alloc_id)) => {
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
return Ok(ScalarMaybeUndef::Scalar(ptr.into()));
}
None => {}
Expand Down Expand Up @@ -433,7 +428,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
};

let bytes = match val.to_bits_or_ptr(type_size, cx) {
Err(val) => val.offset.bytes() as u128,
Err(val) => u128::from(val.offset.bytes()),
Ok(data) => data,
};

Expand Down Expand Up @@ -524,7 +519,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
)
};
let start = ptr.offset;
let end = start + size;
let end = start + size; // `Size` addition

// Mark parts of the outermost relocations as undefined if they partially fall outside the
// given range.
Expand Down Expand Up @@ -563,7 +558,7 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
#[inline]
fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
self.undef_mask
.is_range_defined(ptr.offset, ptr.offset + size)
.is_range_defined(ptr.offset, ptr.offset + size) // `Size` addition
.or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx)))))
}

Expand Down Expand Up @@ -643,7 +638,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
if defined.ranges.len() <= 1 {
self.undef_mask.set_range_inbounds(
dest.offset,
dest.offset + size * repeat,
dest.offset + size * repeat, // `Size` operations
defined.initial,
);
return;
Expand Down Expand Up @@ -721,10 +716,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
for i in 0..length {
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
// compute offset for current repetition
let dest_offset = dest.offset + (i * size);
let dest_offset = dest.offset + size * i; // `Size` operations
(
// shift offsets from source allocation to destination allocation
offset + dest_offset - src.offset,
(offset + dest_offset) - src.offset, // `Size` operations
reloc,
)
}));
Expand Down Expand Up @@ -861,18 +856,18 @@ impl UndefMask {
if amount.bytes() == 0 {
return;
}
let unused_trailing_bits = self.blocks.len() as u64 * Self::BLOCK_SIZE - self.len.bytes();
let unused_trailing_bits =
u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
if amount.bytes() > unused_trailing_bits {
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
assert_eq!(additional_blocks as usize as u64, additional_blocks);
self.blocks.extend(
// FIXME(oli-obk): optimize this by repeating `new_state as Block`.
iter::repeat(0).take(additional_blocks as usize),
iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
);
}
let start = self.len;
self.len += amount;
self.set_range_inbounds(start, start + amount, new_state);
self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
}
}

Expand All @@ -881,7 +876,5 @@ fn bit_index(bits: Size) -> (usize, usize) {
let bits = bits.bytes();
let a = bits / UndefMask::BLOCK_SIZE;
let b = bits % UndefMask::BLOCK_SIZE;
assert_eq!(a as usize as u64, a);
assert_eq!(b as usize as u64, b);
(a as usize, b as usize)
(usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
}
43 changes: 23 additions & 20 deletions src/librustc/mir/interpret/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,27 @@ mod pointer;
mod queries;
mod value;

use std::convert::TryFrom;
use std::fmt;
use std::io;
use std::num::NonZeroU32;
use std::sync::atomic::{AtomicU32, Ordering};

use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
use rustc_ast::ast::LitKind;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::{HashMapExt, Lock};
use rustc_data_structures::tiny_list::TinyList;
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
use rustc_serialize::{Decodable, Encodable, Encoder};

use crate::mir;
use crate::ty::codec::TyDecoder;
use crate::ty::layout::{self, Size};
use crate::ty::subst::GenericArgKind;
use crate::ty::{self, Instance, Ty, TyCtxt};

pub use self::error::{
struct_error, ConstEvalErr, ConstEvalRawResult, ConstEvalResult, ErrorHandled, FrameInfo,
InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType,
Expand All @@ -107,24 +128,6 @@ pub use self::allocation::{Allocation, AllocationExtra, Relocations, UndefMask};

pub use self::pointer::{CheckInAllocMsg, Pointer, PointerArithmetic};

use crate::mir;
use crate::ty::codec::TyDecoder;
use crate::ty::layout::{self, Size};
use crate::ty::subst::GenericArgKind;
use crate::ty::{self, Instance, Ty, TyCtxt};
use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
use rustc_ast::ast::LitKind;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::{HashMapExt, Lock};
use rustc_data_structures::tiny_list::TinyList;
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
use rustc_serialize::{Decodable, Encodable, Encoder};
use std::fmt;
use std::io;
use std::num::NonZeroU32;
use std::sync::atomic::{AtomicU32, Ordering};

/// Uniquely identifies one of the following:
/// - A constant
/// - A static
Expand Down Expand Up @@ -264,8 +267,8 @@ impl<'s> AllocDecodingSession<'s> {
D: TyDecoder<'tcx>,
{
// Read the index of the allocation.
let idx = decoder.read_u32()? as usize;
let pos = self.state.data_offsets[idx] as usize;
let idx = usize::try_from(decoder.read_u32()?).unwrap();
let pos = usize::try_from(self.state.data_offsets[idx]).unwrap();

// Decode the `AllocDiscriminant` now so that we know if we have to reserve an
// `AllocId`.
Expand Down
15 changes: 7 additions & 8 deletions src/librustc/mir/interpret/pointer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,9 @@ pub trait PointerArithmetic: layout::HasDataLayout {
/// This should be called by all the other methods before returning!
#[inline]
fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
let val = val as u128;
let val = u128::from(val);
let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
((val % max_ptr_plus_1) as u64, over || val >= max_ptr_plus_1)
(u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1)
}

#[inline]
Expand All @@ -73,17 +73,16 @@ pub trait PointerArithmetic: layout::HasDataLayout {
self.truncate_to_ptr(res)
}

// Overflow checking only works properly on the range from -u64 to +u64.
#[inline]
fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) {
// FIXME: is it possible to over/underflow here?
fn overflowing_signed_offset(&self, val: u64, i: i64) -> (u64, bool) {
if i < 0 {
// Trickery to ensure that `i64::MIN` works fine: compute `n = -i`.
// This formula only works for true negative values; it overflows for zero!
let n = u64::MAX - (i as u64) + 1;
let res = val.overflowing_sub(n);
self.truncate_to_ptr(res)
} else {
// `i >= 0`, so the cast is safe.
self.overflowing_offset(val, i as u64)
}
}
Expand All @@ -96,7 +95,7 @@ pub trait PointerArithmetic: layout::HasDataLayout {

#[inline]
fn signed_offset<'tcx>(&self, val: u64, i: i64) -> InterpResult<'tcx, u64> {
let (res, over) = self.overflowing_signed_offset(val, i128::from(i));
let (res, over) = self.overflowing_signed_offset(val, i);
if over { throw_ub!(PointerArithOverflow) } else { Ok(res) }
}
}
Expand Down Expand Up @@ -189,14 +188,14 @@ impl<'tcx, Tag> Pointer<Tag> {
}

#[inline]
pub fn overflowing_signed_offset(self, i: i128, cx: &impl HasDataLayout) -> (Self, bool) {
pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
}

#[inline(always)]
pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
self.overflowing_signed_offset(i128::from(i), cx).0
self.overflowing_signed_offset(i, cx).0
}

#[inline(always)]
Expand Down
Loading

0 comments on commit ea00fbc

Please sign in to comment.