diff --git a/.github/workflows/flux.yml b/.github/workflows/flux.yml
index b6ede18e581cf..b31cf19056a69 100644
--- a/.github/workflows/flux.yml
+++ b/.github/workflows/flux.yml
@@ -9,7 +9,7 @@ on:
env:
FIXPOINT_VERSION: "556104ba5508891c357b0bdf819ce706e93d9349"
- FLUX_VERSION: "a17246965a8752e3d3d4e3559865311048bb61f7"
+ FLUX_VERSION: "b0cec81c42bc6e210f675b46dd5b4b16774b0d0e"
jobs:
check-flux-on-core:
diff --git a/library/Cargo.lock b/library/Cargo.lock
index 656576d2d8e56..8b860f6949229 100644
--- a/library/Cargo.lock
+++ b/library/Cargo.lock
@@ -140,9 +140,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.174"
+version = "0.2.175"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776"
+checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
dependencies = [
"rustc-std-workspace-core",
]
@@ -169,9 +169,9 @@ dependencies = [
[[package]]
name = "object"
-version = "0.37.2"
+version = "0.37.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3e3d0a7419f081f4a808147e845310313a39f322d7ae1f996b7f001d6cbed04"
+checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe"
dependencies = [
"memchr",
"rustc-std-workspace-alloc",
@@ -192,7 +192,6 @@ name = "panic_unwind"
version = "0.0.0"
dependencies = [
"alloc",
- "cfg-if",
"libc",
"rustc-std-workspace-core",
"unwind",
@@ -336,9 +335,9 @@ dependencies = [
name = "std_detect"
version = "0.1.5"
dependencies = [
- "alloc",
- "core",
"libc",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
]
[[package]]
diff --git a/library/Cargo.toml b/library/Cargo.toml
index a79c17fc4f7e3..e30e624094285 100644
--- a/library/Cargo.toml
+++ b/library/Cargo.toml
@@ -59,4 +59,3 @@ rustflags = ["-Cpanic=abort"]
rustc-std-workspace-core = { path = 'rustc-std-workspace-core' }
rustc-std-workspace-alloc = { path = 'rustc-std-workspace-alloc' }
rustc-std-workspace-std = { path = 'rustc-std-workspace-std' }
-compiler_builtins = { path = "compiler-builtins/compiler-builtins" }
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index c9b98fa4e5a9b..76630a746dd26 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -17,6 +17,7 @@ unsafe extern "Rust" {
#[rustc_allocator]
#[rustc_nounwind]
#[rustc_std_internal_symbol]
+ #[rustc_allocator_zeroed_variant = "__rust_alloc_zeroed"]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
#[rustc_deallocator]
#[rustc_nounwind]
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index c4e599222e501..8b6d86a288866 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -40,30 +40,15 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
/// An ordered map based on a [B-Tree].
///
-/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing
-/// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal
-/// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum amount of
-/// comparisons necessary to find an element (log2n). However, in practice the way this
-/// is done is *very* inefficient for modern computer architectures. In particular, every element
-/// is stored in its own individually heap-allocated node. This means that every single insertion
-/// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
-/// are both notably expensive things to do in practice, we are forced to, at the very least,
-/// reconsider the BST strategy.
-///
-/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
-/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
-/// searches. However, this does mean that searches will have to do *more* comparisons on average.
-/// The precise number of comparisons depends on the node search strategy used. For optimal cache
-/// efficiency, one could search the nodes linearly. For optimal comparisons, one could search
-/// the node using binary search. As a compromise, one could also perform a linear search
-/// that initially only checks every ith element for some choice of i.
+/// Given a key type with a [total order], an ordered map stores its entries in key order.
+/// That means that keys must be of a type that implements the [`Ord`] trait,
+/// such that two keys can always be compared to determine their [`Ordering`].
+/// Examples of keys with a total order are strings with lexicographical order,
+/// and numbers with their natural order.
///
-/// Currently, our implementation simply performs naive linear search. This provides excellent
-/// performance on *small* nodes of elements which are cheap to compare. However in the future we
-/// would like to further explore choosing the optimal search strategy based on the choice of B,
-/// and possibly other factors. Using linear search, searching for a random element is expected
-/// to take B * log(n) comparisons, which is generally worse than a BST. In practice,
-/// however, performance is excellent.
+/// Iterators obtained from functions such as [`BTreeMap::iter`], [`BTreeMap::into_iter`], [`BTreeMap::values`], or
+/// [`BTreeMap::keys`] produce their items in key order, and take worst-case logarithmic and
+/// amortized constant time per item returned.
///
/// It is a logic error for a key to be modified in such a way that the key's ordering relative to
/// any other key, as determined by the [`Ord`] trait, changes while it is in the map. This is
@@ -72,14 +57,6 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
/// `BTreeMap` that observed the logic error and not result in undefined behavior. This could
/// include panics, incorrect results, aborts, memory leaks, and non-termination.
///
-/// Iterators obtained from functions such as [`BTreeMap::iter`], [`BTreeMap::into_iter`], [`BTreeMap::values`], or
-/// [`BTreeMap::keys`] produce their items in order by key, and take worst-case logarithmic and
-/// amortized constant time per item returned.
-///
-/// [B-Tree]: https://en.wikipedia.org/wiki/B-tree
-/// [`Cell`]: core::cell::Cell
-/// [`RefCell`]: core::cell::RefCell
-///
/// # Examples
///
/// ```
@@ -169,6 +146,43 @@ pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
/// // modify an entry before an insert with in-place mutation
/// player_stats.entry("mana").and_modify(|mana| *mana += 200).or_insert(100);
/// ```
+///
+/// # Background
+///
+/// A B-tree is (like) a [binary search tree], but adapted to the natural granularity that modern
+/// machines like to consume data at. This means that each node contains an entire array of elements,
+/// instead of just a single element.
+///
+/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing
+/// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal
+/// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum number of
+/// comparisons necessary to find an element (log2n). However, in practice the way this
+/// is done is *very* inefficient for modern computer architectures. In particular, every element
+/// is stored in its own individually heap-allocated node. This means that every single insertion
+/// triggers a heap-allocation, and every comparison is a potential cache-miss due to the indirection.
+/// Since both heap-allocations and cache-misses are notably expensive in practice, we are forced to,
+/// at the very least, reconsider the BST strategy.
+///
+/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
+/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
+/// searches. However, this does mean that searches will have to do *more* comparisons on average.
+/// The precise number of comparisons depends on the node search strategy used. For optimal cache
+/// efficiency, one could search the nodes linearly. For optimal comparisons, one could search
+/// the node using binary search. As a compromise, one could also perform a linear search
+/// that initially only checks every ith element for some choice of i.
+///
+/// Currently, our implementation simply performs naive linear search. This provides excellent
+/// performance on *small* nodes of elements which are cheap to compare. However in the future we
+/// would like to further explore choosing the optimal search strategy based on the choice of B,
+/// and possibly other factors. Using linear search, searching for a random element is expected
+/// to take B * log(n) comparisons, which is generally worse than a BST. In practice,
+/// however, performance is excellent.
+///
+/// [B-Tree]: https://en.wikipedia.org/wiki/B-tree
+/// [binary search tree]: https://en.wikipedia.org/wiki/Binary_search_tree
+/// [total order]: https://en.wikipedia.org/wiki/Total_order
+/// [`Cell`]: core::cell::Cell
+/// [`RefCell`]: core::cell::RefCell
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "BTreeMap")]
#[rustc_insignificant_dtor]
diff --git a/library/core/src/array/mod.rs b/library/core/src/array/mod.rs
index b3a498570f95a..bb75ec74c8177 100644
--- a/library/core/src/array/mod.rs
+++ b/library/core/src/array/mod.rs
@@ -621,11 +621,11 @@ impl [T; N] {
/// assert_eq!(strings.len(), 3);
/// ```
#[stable(feature = "array_methods", since = "1.77.0")]
- #[rustc_const_unstable(feature = "const_array_each_ref", issue = "133289")]
+ #[rustc_const_stable(feature = "const_array_each_ref", since = "CURRENT_RUSTC_VERSION")]
pub const fn each_ref(&self) -> [&T; N] {
let mut buf = [null::(); N];
- // FIXME(const-hack): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions.
+ // FIXME(const_trait_impl): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions.
let mut i = 0;
while i < N {
buf[i] = &raw const self[i];
@@ -652,11 +652,11 @@ impl [T; N] {
/// assert_eq!(floats, [0.0, 2.7, -1.0]);
/// ```
#[stable(feature = "array_methods", since = "1.77.0")]
- #[rustc_const_unstable(feature = "const_array_each_ref", issue = "133289")]
+ #[rustc_const_stable(feature = "const_array_each_ref", since = "CURRENT_RUSTC_VERSION")]
pub const fn each_mut(&mut self) -> [&mut T; N] {
let mut buf = [null_mut::(); N];
- // FIXME(const-hack): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions.
+ // FIXME(const_trait_impl): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions.
let mut i = 0;
while i < N {
buf[i] = &raw mut self[i];
diff --git a/library/core/src/char/methods.rs b/library/core/src/char/methods.rs
index e2ccb64415f4f..86b9cc58a0f41 100644
--- a/library/core/src/char/methods.rs
+++ b/library/core/src/char/methods.rs
@@ -1874,28 +1874,33 @@ pub const unsafe fn encode_utf8_raw_unchecked(code: u32, dst: *mut u8) {
// SAFETY: The caller must guarantee that the buffer pointed to by `dst`
// is at least `len` bytes long.
unsafe {
- match len {
- 1 => {
- *dst = code as u8;
- }
- 2 => {
- *dst = (code >> 6 & 0x1F) as u8 | TAG_TWO_B;
- *dst.add(1) = (code & 0x3F) as u8 | TAG_CONT;
- }
- 3 => {
- *dst = (code >> 12 & 0x0F) as u8 | TAG_THREE_B;
- *dst.add(1) = (code >> 6 & 0x3F) as u8 | TAG_CONT;
- *dst.add(2) = (code & 0x3F) as u8 | TAG_CONT;
- }
- 4 => {
- *dst = (code >> 18 & 0x07) as u8 | TAG_FOUR_B;
- *dst.add(1) = (code >> 12 & 0x3F) as u8 | TAG_CONT;
- *dst.add(2) = (code >> 6 & 0x3F) as u8 | TAG_CONT;
- *dst.add(3) = (code & 0x3F) as u8 | TAG_CONT;
- }
- // SAFETY: `char` always takes between 1 and 4 bytes to encode in UTF-8.
- _ => crate::hint::unreachable_unchecked(),
+ if len == 1 {
+ *dst = code as u8;
+ return;
+ }
+
+ let last1 = (code >> 0 & 0x3F) as u8 | TAG_CONT;
+ let last2 = (code >> 6 & 0x3F) as u8 | TAG_CONT;
+ let last3 = (code >> 12 & 0x3F) as u8 | TAG_CONT;
+ let last4 = (code >> 18 & 0x3F) as u8 | TAG_FOUR_B;
+
+ if len == 2 {
+ *dst = last2 | TAG_TWO_B;
+ *dst.add(1) = last1;
+ return;
}
+
+ if len == 3 {
+ *dst = last3 | TAG_THREE_B;
+ *dst.add(1) = last2;
+ *dst.add(2) = last1;
+ return;
+ }
+
+ *dst = last4;
+ *dst.add(1) = last3;
+ *dst.add(2) = last2;
+ *dst.add(3) = last1;
}
}
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs
index a64fade285bf2..ab018fa267502 100644
--- a/library/core/src/cmp.rs
+++ b/library/core/src/cmp.rs
@@ -1554,6 +1554,9 @@ pub fn min(v1: T, v2: T) -> T {
///
/// Returns the first argument if the comparison determines them to be equal.
///
+/// The parameter order is preserved when calling the `compare` function, i.e. `v1` is
+/// always passed as the first argument and `v2` as the second.
+///
/// # Examples
///
/// ```
@@ -1574,7 +1577,7 @@ pub fn min(v1: T, v2: T) -> T {
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
pub fn min_by Ordering>(v1: T, v2: T, compare: F) -> T {
- if compare(&v2, &v1).is_lt() { v2 } else { v1 }
+ if compare(&v1, &v2).is_le() { v1 } else { v2 }
}
/// Returns the element that gives the minimum value from the specified function.
@@ -1646,6 +1649,9 @@ pub fn max(v1: T, v2: T) -> T {
///
/// Returns the second argument if the comparison determines them to be equal.
///
+/// The parameter order is preserved when calling the `compare` function, i.e. `v1` is
+/// always passed as the first argument and `v2` as the second.
+///
/// # Examples
///
/// ```
@@ -1666,7 +1672,7 @@ pub fn max(v1: T, v2: T) -> T {
#[must_use]
#[stable(feature = "cmp_min_max_by", since = "1.53.0")]
pub fn max_by Ordering>(v1: T, v2: T, compare: F) -> T {
- if compare(&v2, &v1).is_lt() { v1 } else { v2 }
+ if compare(&v1, &v2).is_gt() { v1 } else { v2 }
}
/// Returns the element that gives the maximum value from the specified function.
@@ -1745,6 +1751,9 @@ where
///
/// Returns `[v1, v2]` if the comparison determines them to be equal.
///
+/// The parameter order is preserved when calling the `compare` function, i.e. `v1` is
+/// always passed as the first argument and `v2` as the second.
+///
/// # Examples
///
/// ```
@@ -1769,7 +1778,7 @@ pub fn minmax_by(v1: T, v2: T, compare: F) -> [T; 2]
where
F: FnOnce(&T, &T) -> Ordering,
{
- if compare(&v2, &v1).is_lt() { [v2, v1] } else { [v1, v2] }
+ if compare(&v1, &v2).is_le() { [v1, v2] } else { [v2, v1] }
}
/// Returns minimum and maximum values with respect to the specified key function.
diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs
index d188f2a0fbe0e..cdeec0a12f466 100644
--- a/library/core/src/intrinsics/mod.rs
+++ b/library/core/src/intrinsics/mod.rs
@@ -267,53 +267,72 @@ pub unsafe fn atomic_fence();
pub unsafe fn atomic_singlethreadfence();
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
-/// if supported; otherwise, it is a no-op.
+/// for the given address if supported; otherwise, it is a no-op.
/// Prefetches have no effect on the behavior of the program but can change its performance
/// characteristics.
///
-/// The `locality` argument must be a constant integer and is a temporal locality specifier
-/// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+/// The `LOCALITY` argument is a temporal locality specifier ranging from (0) - no locality,
+/// to (3) - extremely local keep in cache.
///
/// This intrinsic does not have a stable counterpart.
#[rustc_intrinsic]
#[rustc_nounwind]
-pub unsafe fn prefetch_read_data(data: *const T, locality: i32);
+#[miri::intrinsic_fallback_is_spec]
+pub const fn prefetch_read_data(data: *const T) {
+ // This operation is a no-op, unless it is overridden by the backend.
+ let _ = data;
+}
+
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
-/// if supported; otherwise, it is a no-op.
+/// for the given address if supported; otherwise, it is a no-op.
/// Prefetches have no effect on the behavior of the program but can change its performance
/// characteristics.
///
-/// The `locality` argument must be a constant integer and is a temporal locality specifier
-/// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+/// The `LOCALITY` argument is a temporal locality specifier ranging from (0) - no locality,
+/// to (3) - extremely local keep in cache.
///
/// This intrinsic does not have a stable counterpart.
#[rustc_intrinsic]
#[rustc_nounwind]
-pub unsafe fn prefetch_write_data(data: *const T, locality: i32);
+#[miri::intrinsic_fallback_is_spec]
+pub const fn prefetch_write_data(data: *const T) {
+ // This operation is a no-op, unless it is overridden by the backend.
+ let _ = data;
+}
+
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
-/// if supported; otherwise, it is a no-op.
+/// for the given address if supported; otherwise, it is a no-op.
/// Prefetches have no effect on the behavior of the program but can change its performance
/// characteristics.
///
-/// The `locality` argument must be a constant integer and is a temporal locality specifier
-/// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+/// The `LOCALITY` argument is a temporal locality specifier ranging from (0) - no locality,
+/// to (3) - extremely local keep in cache.
///
/// This intrinsic does not have a stable counterpart.
#[rustc_intrinsic]
#[rustc_nounwind]
-pub unsafe fn prefetch_read_instruction(data: *const T, locality: i32);
+#[miri::intrinsic_fallback_is_spec]
+pub const fn prefetch_read_instruction(data: *const T) {
+ // This operation is a no-op, unless it is overridden by the backend.
+ let _ = data;
+}
+
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
-/// if supported; otherwise, it is a no-op.
+/// for the given address if supported; otherwise, it is a no-op.
/// Prefetches have no effect on the behavior of the program but can change its performance
/// characteristics.
///
-/// The `locality` argument must be a constant integer and is a temporal locality specifier
-/// ranging from (0) - no locality, to (3) - extremely local keep in cache.
+/// The `LOCALITY` argument is a temporal locality specifier ranging from (0) - no locality,
+/// to (3) - extremely local keep in cache.
///
/// This intrinsic does not have a stable counterpart.
#[rustc_intrinsic]
#[rustc_nounwind]
-pub unsafe fn prefetch_write_instruction(data: *const T, locality: i32);
+#[miri::intrinsic_fallback_is_spec]
+pub const fn prefetch_write_instruction(data: *const T) {
+ // This operation is a no-op, unless it is overridden by the backend.
+ let _ = data;
+}
/// Executes a breakpoint trap, for inspection by a debugger.
///
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index 0a057a5b3ca77..fc8c895f825ec 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -172,6 +172,7 @@
#![feature(no_core)]
#![feature(optimize_attribute)]
#![feature(prelude_import)]
+#![feature(reborrow)]
#![feature(repr_simd)]
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
diff --git a/library/core/src/marker.rs b/library/core/src/marker.rs
index ba00ee17b6528..8ad58599c6815 100644
--- a/library/core/src/marker.rs
+++ b/library/core/src/marker.rs
@@ -1365,3 +1365,11 @@ pub macro CoercePointee($item:item) {
pub trait CoercePointeeValidated {
/* compiler built-in */
}
+
+/// Allows value to be reborrowed as exclusive, creating a copy of the value
+/// that disables the source for reads and writes for the lifetime of the copy.
+#[lang = "reborrow"]
+#[unstable(feature = "reborrow", issue = "145612")]
+pub trait Reborrow {
+ // Empty.
+}
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs
index b9ca18eef8227..0bea58c7b75e4 100644
--- a/library/core/src/num/int_macros.rs
+++ b/library/core/src/num/int_macros.rs
@@ -209,6 +209,48 @@ macro_rules! int_impl {
self & self.wrapping_neg()
}
+ /// Returns the index of the highest bit set to one in `self`, or `None`
+ /// if `self` is `0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_lowest_highest_one)]
+ ///
+ #[doc = concat!("assert_eq!(0x0_", stringify!($SelfT), ".highest_one(), None);")]
+ #[doc = concat!("assert_eq!(0x1_", stringify!($SelfT), ".highest_one(), Some(0));")]
+ #[doc = concat!("assert_eq!(0x10_", stringify!($SelfT), ".highest_one(), Some(4));")]
+ #[doc = concat!("assert_eq!(0x1f_", stringify!($SelfT), ".highest_one(), Some(4));")]
+ /// ```
+ #[unstable(feature = "int_lowest_highest_one", issue = "145203")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn highest_one(self) -> Option {
+ (self as $UnsignedT).highest_one()
+ }
+
+ /// Returns the index of the lowest bit set to one in `self`, or `None`
+ /// if `self` is `0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_lowest_highest_one)]
+ ///
+ #[doc = concat!("assert_eq!(0x0_", stringify!($SelfT), ".lowest_one(), None);")]
+ #[doc = concat!("assert_eq!(0x1_", stringify!($SelfT), ".lowest_one(), Some(0));")]
+ #[doc = concat!("assert_eq!(0x10_", stringify!($SelfT), ".lowest_one(), Some(4));")]
+ #[doc = concat!("assert_eq!(0x1f_", stringify!($SelfT), ".lowest_one(), Some(0));")]
+ /// ```
+ #[unstable(feature = "int_lowest_highest_one", issue = "145203")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn lowest_one(self) -> Option {
+ (self as $UnsignedT).lowest_one()
+ }
+
/// Returns the bit pattern of `self` reinterpreted as an unsigned integer of the same size.
///
/// This produces the same result as an `as` cast, but ensures that the bit-width remains
diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs
index 3574671e1f2a7..46a78a3a41137 100644
--- a/library/core/src/num/nonzero.rs
+++ b/library/core/src/num/nonzero.rs
@@ -706,6 +706,54 @@ macro_rules! nonzero_integer {
unsafe { NonZero::new_unchecked(n) }
}
+ /// Returns the index of the highest bit set to one in `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_lowest_highest_one)]
+ ///
+ /// # use core::num::NonZero;
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x1)?.highest_one(), 0);")]
+ #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x10)?.highest_one(), 4);")]
+ #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x1f)?.highest_one(), 4);")]
+ /// # Some(())
+ /// # }
+ /// ```
+ #[unstable(feature = "int_lowest_highest_one", issue = "145203")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn highest_one(self) -> u32 {
+ Self::BITS - 1 - self.leading_zeros()
+ }
+
+ /// Returns the index of the lowest bit set to one in `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_lowest_highest_one)]
+ ///
+ /// # use core::num::NonZero;
+ /// # fn main() { test().unwrap(); }
+ /// # fn test() -> Option<()> {
+ #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x1)?.lowest_one(), 0);")]
+ #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x10)?.lowest_one(), 4);")]
+ #[doc = concat!("assert_eq!(NonZero::<", stringify!($Int), ">::new(0x1f)?.lowest_one(), 0);")]
+ /// # Some(())
+ /// # }
+ /// ```
+ #[unstable(feature = "int_lowest_highest_one", issue = "145203")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn lowest_one(self) -> u32 {
+ self.trailing_zeros()
+ }
+
/// Returns the number of ones in the binary representation of `self`.
///
/// # Examples
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs
index 40ef62600086c..f60b7e8202d03 100644
--- a/library/core/src/num/uint_macros.rs
+++ b/library/core/src/num/uint_macros.rs
@@ -261,6 +261,54 @@ macro_rules! uint_impl {
self & self.wrapping_neg()
}
+ /// Returns the index of the highest bit set to one in `self`, or `None`
+ /// if `self` is `0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_lowest_highest_one)]
+ ///
+ #[doc = concat!("assert_eq!(0x0_", stringify!($SelfT), ".highest_one(), None);")]
+ #[doc = concat!("assert_eq!(0x1_", stringify!($SelfT), ".highest_one(), Some(0));")]
+ #[doc = concat!("assert_eq!(0x10_", stringify!($SelfT), ".highest_one(), Some(4));")]
+ #[doc = concat!("assert_eq!(0x1f_", stringify!($SelfT), ".highest_one(), Some(4));")]
+ /// ```
+ #[unstable(feature = "int_lowest_highest_one", issue = "145203")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn highest_one(self) -> Option {
+ match NonZero::new(self) {
+ Some(v) => Some(v.highest_one()),
+ None => None,
+ }
+ }
+
+ /// Returns the index of the lowest bit set to one in `self`, or `None`
+ /// if `self` is `0`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(int_lowest_highest_one)]
+ ///
+ #[doc = concat!("assert_eq!(0x0_", stringify!($SelfT), ".lowest_one(), None);")]
+ #[doc = concat!("assert_eq!(0x1_", stringify!($SelfT), ".lowest_one(), Some(0));")]
+ #[doc = concat!("assert_eq!(0x10_", stringify!($SelfT), ".lowest_one(), Some(4));")]
+ #[doc = concat!("assert_eq!(0x1f_", stringify!($SelfT), ".lowest_one(), Some(0));")]
+ /// ```
+ #[unstable(feature = "int_lowest_highest_one", issue = "145203")]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[inline(always)]
+ pub const fn lowest_one(self) -> Option {
+ match NonZero::new(self) {
+ Some(v) => Some(v.lowest_one()),
+ None => None,
+ }
+ }
+
/// Returns the bit pattern of `self` reinterpreted as a signed integer of the same size.
///
/// This produces the same result as an `as` cast, but ensures that the bit-width remains
diff --git a/library/core/src/panic/location.rs b/library/core/src/panic/location.rs
index 6ef7d5a22a30f..cafdcfa2c2e02 100644
--- a/library/core/src/panic/location.rs
+++ b/library/core/src/panic/location.rs
@@ -183,7 +183,7 @@ impl<'a> Location<'a> {
#[must_use]
#[stable(feature = "panic_hooks", since = "1.10.0")]
#[rustc_const_stable(feature = "const_location_fields", since = "1.79.0")]
- pub const fn file(&self) -> &str {
+ pub const fn file(&self) -> &'a str {
// SAFETY: The filename is valid.
unsafe { self.filename.as_ref() }
}
@@ -195,7 +195,7 @@ impl<'a> Location<'a> {
#[must_use]
#[unstable(feature = "file_with_nul", issue = "141727")]
#[inline]
- pub const fn file_with_nul(&self) -> &CStr {
+ pub const fn file_with_nul(&self) -> &'a CStr {
let filename = self.filename.as_ptr();
// SAFETY: The filename is valid for `filename_len+1` bytes, so this addition can't
diff --git a/library/core/src/pin/unsafe_pinned.rs b/library/core/src/pin/unsafe_pinned.rs
index b18b5d7c9ec0d..ede6e0d6106bd 100644
--- a/library/core/src/pin/unsafe_pinned.rs
+++ b/library/core/src/pin/unsafe_pinned.rs
@@ -120,8 +120,8 @@ impl UnsafePinned {
#[inline(always)]
#[must_use]
#[unstable(feature = "unsafe_pinned", issue = "125735")]
- pub const fn raw_get(this: *const Self) -> *const T {
- this as *const T
+ pub const fn raw_get(this: *const Self) -> *mut T {
+ this as *const T as *mut T
}
/// Gets a mutable pointer to the wrapped value.
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 75ec4b7c76954..98a27012b9ad3 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -916,6 +916,7 @@ pub const fn dangling() -> *const T {
#[must_use]
#[stable(feature = "strict_provenance", since = "1.84.0")]
#[rustc_const_stable(feature = "strict_provenance", since = "1.84.0")]
+#[allow(integer_to_ptr_transmutes)] // Expected semantics here.
pub const fn without_provenance_mut(addr: usize) -> *mut T {
// An int-to-pointer transmute currently has exactly the intended semantics: it creates a
// pointer without provenance. Note that this is *not* a stable guarantee about transmute
diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs
index ae360df80f60b..98091e9fe83fb 100644
--- a/library/core/src/slice/index.rs
+++ b/library/core/src/slice/index.rs
@@ -34,53 +34,44 @@ where
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
-const fn slice_start_index_len_fail(index: usize, len: usize) -> ! {
- const_panic!(
- "slice start index is out of range for slice",
- "range start index {index} out of range for slice of length {len}",
- index: usize,
- len: usize,
- )
-}
+const fn slice_index_fail(start: usize, end: usize, len: usize) -> ! {
+ if start > len {
+ const_panic!(
+ "slice start index is out of range for slice",
+ "range start index {start} out of range for slice of length {len}",
+ start: usize,
+ len: usize,
+ )
+ }
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
-#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[track_caller]
-const fn slice_end_index_len_fail(index: usize, len: usize) -> ! {
- const_panic!(
- "slice end index is out of range for slice",
- "range end index {index} out of range for slice of length {len}",
- index: usize,
- len: usize,
- )
-}
+ if end > len {
+ const_panic!(
+ "slice end index is out of range for slice",
+ "range end index {end} out of range for slice of length {len}",
+ end: usize,
+ len: usize,
+ )
+ }
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
-#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[track_caller]
-const fn slice_index_order_fail(index: usize, end: usize) -> ! {
+ if start > end {
+ const_panic!(
+ "slice index start is larger than end",
+ "slice index starts at {start} but ends at {end}",
+ start: usize,
+ end: usize,
+ )
+ }
+
+ // Only reachable if the range was a `RangeInclusive` or a
+ // `RangeToInclusive`, with `end == len`.
const_panic!(
- "slice index start is larger than end",
- "slice index starts at {index} but ends at {end}",
- index: usize,
+ "slice end index is out of range for slice",
+ "range end index {end} out of range for slice of length {len}",
end: usize,
+ len: usize,
)
}
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
-#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[track_caller]
-const fn slice_start_index_overflow_fail() -> ! {
- panic!("attempted to index slice from after maximum usize");
-}
-
-#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never), cold)]
-#[cfg_attr(feature = "panic_immediate_abort", inline)]
-#[track_caller]
-const fn slice_end_index_overflow_fail() -> ! {
- panic!("attempted to index slice up to maximum usize");
-}
-
// The UbChecks are great for catching bugs in the unsafe methods, but including
// them in safe indexing is unnecessary and hurts inlining and debug runtime perf.
// Both the safe and unsafe public methods share these helpers,
@@ -341,7 +332,7 @@ unsafe impl const SliceIndex<[T]> for ops::IndexRange {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*get_offset_len_noubcheck(slice, self.start(), self.len()) }
} else {
- slice_end_index_len_fail(self.end(), slice.len())
+ slice_index_fail(self.start(), self.end(), slice.len())
}
}
@@ -351,7 +342,7 @@ unsafe impl const SliceIndex<[T]> for ops::IndexRange {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start(), self.len()) }
} else {
- slice_end_index_len_fail(self.end(), slice.len())
+ slice_index_fail(self.start(), self.end(), slice.len())
}
}
}
@@ -436,26 +427,27 @@ unsafe impl const SliceIndex<[T]> for ops::Range {
#[inline(always)]
fn index(self, slice: &[T]) -> &[T] {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
- let Some(new_len) = usize::checked_sub(self.end, self.start) else {
- slice_index_order_fail(self.start, self.end)
- };
- if self.end > slice.len() {
- slice_end_index_len_fail(self.end, slice.len());
+ if let Some(new_len) = usize::checked_sub(self.end, self.start)
+ && self.end <= slice.len()
+ {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &*get_offset_len_noubcheck(slice, self.start, new_len) }
+ } else {
+ slice_index_fail(self.start, self.end, slice.len())
}
- // SAFETY: `self` is checked to be valid and in bounds above.
- unsafe { &*get_offset_len_noubcheck(slice, self.start, new_len) }
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
- let Some(new_len) = usize::checked_sub(self.end, self.start) else {
- slice_index_order_fail(self.start, self.end)
- };
- if self.end > slice.len() {
- slice_end_index_len_fail(self.end, slice.len());
+ // Using checked_sub is a safe way to get `SubUnchecked` in MIR
+ if let Some(new_len) = usize::checked_sub(self.end, self.start)
+ && self.end <= slice.len()
+ {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start, new_len) }
+ } else {
+ slice_index_fail(self.start, self.end, slice.len())
}
- // SAFETY: `self` is checked to be valid and in bounds above.
- unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start, new_len) }
}
}
@@ -567,7 +559,7 @@ unsafe impl const SliceIndex<[T]> for ops::RangeFrom {
#[inline]
fn index(self, slice: &[T]) -> &[T] {
if self.start > slice.len() {
- slice_start_index_len_fail(self.start, slice.len());
+ slice_index_fail(self.start, slice.len(), slice.len())
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*self.get_unchecked(slice) }
@@ -576,7 +568,7 @@ unsafe impl const SliceIndex<[T]> for ops::RangeFrom {
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > slice.len() {
- slice_start_index_len_fail(self.start, slice.len());
+ slice_index_fail(self.start, slice.len(), slice.len())
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *self.get_unchecked_mut(slice) }
@@ -690,18 +682,32 @@ unsafe impl const SliceIndex<[T]> for ops::RangeInclusive {
#[inline]
fn index(self, slice: &[T]) -> &[T] {
- if *self.end() == usize::MAX {
- slice_end_index_overflow_fail();
+ let Self { mut start, mut end, exhausted } = self;
+ let len = slice.len();
+ if end < len {
+ end = end + 1;
+ start = if exhausted { end } else { start };
+ if let Some(new_len) = usize::checked_sub(end, start) {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { return &*get_offset_len_noubcheck(slice, start, new_len) }
+ }
}
- self.into_slice_range().index(slice)
+ slice_index_fail(start, end, slice.len())
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
- if *self.end() == usize::MAX {
- slice_end_index_overflow_fail();
+ let Self { mut start, mut end, exhausted } = self;
+ let len = slice.len();
+ if end < len {
+ end = end + 1;
+ start = if exhausted { end } else { start };
+ if let Some(new_len) = usize::checked_sub(end, start) {
+ // SAFETY: `self` is checked to be valid and in bounds above.
+ unsafe { return &mut *get_offset_len_mut_noubcheck(slice, start, new_len) }
+ }
}
- self.into_slice_range().index_mut(slice)
+ slice_index_fail(start, end, slice.len())
}
}
@@ -852,28 +858,26 @@ where
{
let len = bounds.end;
- let start = match range.start_bound() {
- ops::Bound::Included(&start) => start,
- ops::Bound::Excluded(start) => {
- start.checked_add(1).unwrap_or_else(|| slice_start_index_overflow_fail())
- }
- ops::Bound::Unbounded => 0,
- };
-
let end = match range.end_bound() {
- ops::Bound::Included(end) => {
- end.checked_add(1).unwrap_or_else(|| slice_end_index_overflow_fail())
- }
+ ops::Bound::Included(&end) if end >= len => slice_index_fail(0, end, len),
+ // Cannot overflow because `end < len` implies `end < usize::MAX`.
+ ops::Bound::Included(&end) => end + 1,
+
+ ops::Bound::Excluded(&end) if end > len => slice_index_fail(0, end, len),
ops::Bound::Excluded(&end) => end,
ops::Bound::Unbounded => len,
};
- if start > end {
- slice_index_order_fail(start, end);
- }
- if end > len {
- slice_end_index_len_fail(end, len);
- }
+ let start = match range.start_bound() {
+ ops::Bound::Excluded(&start) if start >= end => slice_index_fail(start, end, len),
+ // Cannot overflow because `start < end` implies `start < usize::MAX`.
+ ops::Bound::Excluded(&start) => start + 1,
+
+ ops::Bound::Included(&start) if start > end => slice_index_fail(start, end, len),
+ ops::Bound::Included(&start) => start,
+
+ ops::Bound::Unbounded => 0,
+ };
ops::Range { start, end }
}
@@ -982,25 +986,27 @@ pub(crate) fn into_slice_range(
len: usize,
(start, end): (ops::Bound, ops::Bound),
) -> ops::Range {
- use ops::Bound;
- let start = match start {
- Bound::Included(start) => start,
- Bound::Excluded(start) => {
- start.checked_add(1).unwrap_or_else(|| slice_start_index_overflow_fail())
- }
- Bound::Unbounded => 0,
- };
-
let end = match end {
- Bound::Included(end) => {
- end.checked_add(1).unwrap_or_else(|| slice_end_index_overflow_fail())
- }
- Bound::Excluded(end) => end,
- Bound::Unbounded => len,
+ ops::Bound::Included(end) if end >= len => slice_index_fail(0, end, len),
+ // Cannot overflow because `end < len` implies `end < usize::MAX`.
+ ops::Bound::Included(end) => end + 1,
+
+ ops::Bound::Excluded(end) if end > len => slice_index_fail(0, end, len),
+ ops::Bound::Excluded(end) => end,
+
+ ops::Bound::Unbounded => len,
};
- // Don't bother with checking `start < end` and `end <= len`
- // since these checks are handled by `Range` impls
+ let start = match start {
+ ops::Bound::Excluded(start) if start >= end => slice_index_fail(start, end, len),
+ // Cannot overflow because `start < end` implies `start < usize::MAX`.
+ ops::Bound::Excluded(start) => start + 1,
+
+ ops::Bound::Included(start) if start > end => slice_index_fail(start, end, len),
+ ops::Bound::Included(start) => start,
+
+ ops::Bound::Unbounded => 0,
+ };
start..end
}
diff --git a/library/core/src/task/poll.rs b/library/core/src/task/poll.rs
index ca668361ef63b..59ffe7ad49c05 100644
--- a/library/core/src/task/poll.rs
+++ b/library/core/src/task/poll.rs
@@ -125,7 +125,7 @@ impl Poll> {
}
}
- /// Maps a `Poll::Ready>` to `Poll::Ready>` by
+ /// Maps a `Poll::Ready>` to `Poll::Ready>` by
/// applying a function to a contained `Poll::Ready(Err)` value, leaving all other
/// variants untouched.
///
diff --git a/library/coretests/tests/lib.rs b/library/coretests/tests/lib.rs
index b128acfc00083..d2281b1df2ffc 100644
--- a/library/coretests/tests/lib.rs
+++ b/library/coretests/tests/lib.rs
@@ -54,6 +54,7 @@
#![feature(generic_assert_internals)]
#![feature(hasher_prefixfree_extras)]
#![feature(hashmap_internals)]
+#![feature(int_lowest_highest_one)]
#![feature(int_roundings)]
#![feature(ip)]
#![feature(is_ascii_octdigit)]
diff --git a/library/coretests/tests/nonzero.rs b/library/coretests/tests/nonzero.rs
index eb06c34fd0205..69e4ed9c36b3a 100644
--- a/library/coretests/tests/nonzero.rs
+++ b/library/coretests/tests/nonzero.rs
@@ -462,3 +462,111 @@ fn test_nonzero_fmt() {
assert_eq!(i, nz);
}
+
+#[test]
+fn test_nonzero_highest_one() {
+ macro_rules! nonzero_int_impl {
+ ($($T:ty),+) => {
+ $(
+ {
+ for i in 0..<$T>::BITS {
+ // Set single bit.
+ assert_eq!(NonZero::<$T>::new(1 << i).unwrap().highest_one(), i);
+ if i > <$T>::BITS {
+ // Set lowest bits.
+ assert_eq!(
+ NonZero::<$T>::new(<$T>::MAX >> i).unwrap().highest_one(),
+ <$T>::BITS - i - 2,
+ );
+ }
+ // Set highest bits.
+ assert_eq!(
+ NonZero::<$T>::new(-1 << i).unwrap().highest_one(),
+ <$T>::BITS - 1,
+ );
+ }
+ }
+ )+
+ };
+ }
+
+ macro_rules! nonzero_uint_impl {
+ ($($T:ty),+) => {
+ $(
+ {
+ for i in 0..<$T>::BITS {
+ // Set single bit.
+ assert_eq!(NonZero::<$T>::new(1 << i).unwrap().highest_one(), i);
+ // Set lowest bits.
+ assert_eq!(
+ NonZero::<$T>::new(<$T>::MAX >> i).unwrap().highest_one(),
+ <$T>::BITS - i - 1,
+ );
+ // Set highest bits.
+ assert_eq!(
+ NonZero::<$T>::new(<$T>::MAX << i).unwrap().highest_one(),
+ <$T>::BITS - 1,
+ );
+ }
+ }
+ )+
+ };
+ }
+
+ nonzero_int_impl!(i8, i16, i32, i64, i128, isize);
+ nonzero_uint_impl!(u8, u16, u32, u64, u128, usize);
+}
+
+#[test]
+fn test_nonzero_lowest_one() {
+ macro_rules! nonzero_int_impl {
+ ($($T:ty),+) => {
+ $(
+ {
+ for i in 0..<$T>::BITS {
+ // Set single bit.
+ assert_eq!(NonZero::<$T>::new(1 << i).unwrap().lowest_one(), i);
+ if i > <$T>::BITS {
+ // Set lowest bits.
+ assert_eq!(
+ NonZero::<$T>::new(<$T>::MAX >> i).unwrap().lowest_one(),
+ 0,
+ );
+ }
+ // Set highest bits.
+ assert_eq!(
+ NonZero::<$T>::new(-1 << i).unwrap().lowest_one(),
+ i,
+ );
+ }
+ }
+ )+
+ };
+ }
+
+ macro_rules! nonzero_uint_impl {
+ ($($T:ty),+) => {
+ $(
+ {
+ for i in 0..<$T>::BITS {
+ // Set single bit.
+ assert_eq!(NonZero::<$T>::new(1 << i).unwrap().lowest_one(), i);
+ // Set lowest bits.
+ assert_eq!(
+ NonZero::<$T>::new(<$T>::MAX >> i).unwrap().lowest_one(),
+ 0,
+ );
+ // Set highest bits.
+ assert_eq!(
+ NonZero::<$T>::new(<$T>::MAX << i).unwrap().lowest_one(),
+ i,
+ );
+ }
+ }
+ )+
+ };
+ }
+
+ nonzero_int_impl!(i8, i16, i32, i64, i128, isize);
+ nonzero_uint_impl!(u8, u16, u32, u64, u128, usize);
+}
diff --git a/library/coretests/tests/num/int_macros.rs b/library/coretests/tests/num/int_macros.rs
index ca32fce861f8c..1611a6466f5ab 100644
--- a/library/coretests/tests/num/int_macros.rs
+++ b/library/coretests/tests/num/int_macros.rs
@@ -227,6 +227,46 @@ macro_rules! int_module {
}
}
+ #[test]
+ fn test_highest_one() {
+ const ZERO: $T = 0;
+ const ONE: $T = 1;
+ const MINUS_ONE: $T = -1;
+
+ assert_eq!(ZERO.highest_one(), None);
+
+ for i in 0..<$T>::BITS {
+ // Set single bit.
+ assert_eq!((ONE << i).highest_one(), Some(i));
+ if i != <$T>::BITS - 1 {
+ // Set lowest bits.
+ assert_eq!((<$T>::MAX >> i).highest_one(), Some(<$T>::BITS - i - 2));
+ }
+ // Set highest bits.
+ assert_eq!((MINUS_ONE << i).highest_one(), Some(<$T>::BITS - 1));
+ }
+ }
+
+ #[test]
+ fn test_lowest_one() {
+ const ZERO: $T = 0;
+ const ONE: $T = 1;
+ const MINUS_ONE: $T = -1;
+
+ assert_eq!(ZERO.lowest_one(), None);
+
+ for i in 0..<$T>::BITS {
+ // Set single bit.
+ assert_eq!((ONE << i).lowest_one(), Some(i));
+ if i != <$T>::BITS - 1 {
+ // Set lowest bits.
+ assert_eq!((<$T>::MAX >> i).lowest_one(), Some(0));
+ }
+ // Set highest bits.
+ assert_eq!((MINUS_ONE << i).lowest_one(), Some(i));
+ }
+ }
+
#[test]
fn test_from_str() {
fn from_str(t: &str) -> Option {
diff --git a/library/coretests/tests/num/uint_macros.rs b/library/coretests/tests/num/uint_macros.rs
index 8f389de70aa2e..c7d10ea4d880a 100644
--- a/library/coretests/tests/num/uint_macros.rs
+++ b/library/coretests/tests/num/uint_macros.rs
@@ -184,6 +184,40 @@ macro_rules! uint_module {
}
}
+ #[test]
+ fn test_highest_one() {
+ const ZERO: $T = 0;
+ const ONE: $T = 1;
+
+ assert_eq!(ZERO.highest_one(), None);
+
+ for i in 0..<$T>::BITS {
+ // Set single bit.
+ assert_eq!((ONE << i).highest_one(), Some(i));
+ // Set lowest bits.
+ assert_eq!((<$T>::MAX >> i).highest_one(), Some(<$T>::BITS - i - 1));
+ // Set highest bits.
+ assert_eq!((<$T>::MAX << i).highest_one(), Some(<$T>::BITS - 1));
+ }
+ }
+
+ #[test]
+ fn test_lowest_one() {
+ const ZERO: $T = 0;
+ const ONE: $T = 1;
+
+ assert_eq!(ZERO.lowest_one(), None);
+
+ for i in 0..<$T>::BITS {
+ // Set single bit.
+ assert_eq!((ONE << i).lowest_one(), Some(i));
+ // Set lowest bits.
+ assert_eq!((<$T>::MAX >> i).lowest_one(), Some(0));
+ // Set highest bits.
+ assert_eq!((<$T>::MAX << i).lowest_one(), Some(i));
+ }
+ }
+
fn from_str(t: &str) -> Option {
core::str::FromStr::from_str(t).ok()
}
diff --git a/library/coretests/tests/panic/location.rs b/library/coretests/tests/panic/location.rs
index 910001bcc1c58..a7db05a15c68f 100644
--- a/library/coretests/tests/panic/location.rs
+++ b/library/coretests/tests/panic/location.rs
@@ -47,11 +47,19 @@ fn location_const_column() {
assert_eq!(COLUMN, 40);
}
+#[test]
+fn location_file_lifetime<'x>() {
+ // Verify that the returned `&str`s lifetime is derived from the generic
+ // lifetime 'a, not the lifetime of `&self`, when calling `Location::file`.
+ // Test failure is indicated by a compile failure, not a runtime panic.
+ let _: for<'a> fn(&'a Location<'x>) -> &'x str = Location::file;
+}
+
#[test]
fn location_debug() {
let f = format!("{:?}", Location::caller());
assert!(f.contains(&format!("{:?}", file!())));
- assert!(f.contains("52"));
+ assert!(f.contains("60"));
assert!(f.contains("29"));
}
diff --git a/library/coretests/tests/slice.rs b/library/coretests/tests/slice.rs
index 992f24cb18f20..110c4e5f3b406 100644
--- a/library/coretests/tests/slice.rs
+++ b/library/coretests/tests/slice.rs
@@ -1492,28 +1492,28 @@ mod slice_index {
// note: using 0 specifically ensures that the result of overflowing is 0..0,
// so that `get` doesn't simply return None for the wrong reason.
bad: data[0 ..= usize::MAX];
- message: "maximum usize";
+ message: "out of range";
}
in mod rangetoinclusive_overflow {
data: [0, 1];
bad: data[..= usize::MAX];
- message: "maximum usize";
+ message: "out of range";
}
in mod boundpair_overflow_end {
data: [0; 1];
bad: data[(Bound::Unbounded, Bound::Included(usize::MAX))];
- message: "maximum usize";
+ message: "out of range";
}
in mod boundpair_overflow_start {
data: [0; 1];
bad: data[(Bound::Excluded(usize::MAX), Bound::Unbounded)];
- message: "maximum usize";
+ message: "out of range";
}
} // panic_cases!
}
@@ -2008,7 +2008,7 @@ fn test_copy_within_panics_src_inverted() {
bytes.copy_within(2..1, 0);
}
#[test]
-#[should_panic(expected = "attempted to index slice up to maximum usize")]
+#[should_panic(expected = "out of range")]
fn test_copy_within_panics_src_out_of_bounds() {
let mut bytes = *b"Hello, World!";
// an inclusive range ending at usize::MAX would make src_end overflow
diff --git a/library/panic_unwind/Cargo.toml b/library/panic_unwind/Cargo.toml
index 13d1a7160da8d..67fc919c42c2b 100644
--- a/library/panic_unwind/Cargo.toml
+++ b/library/panic_unwind/Cargo.toml
@@ -13,7 +13,6 @@ doc = false
[dependencies]
alloc = { path = "../alloc" }
-cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] }
core = { path = "../rustc-std-workspace-core", package = "rustc-std-workspace-core" }
unwind = { path = "../unwind" }
diff --git a/library/panic_unwind/src/lib.rs b/library/panic_unwind/src/lib.rs
index 50bd933aca204..83311f3238012 100644
--- a/library/panic_unwind/src/lib.rs
+++ b/library/panic_unwind/src/lib.rs
@@ -15,6 +15,7 @@
#![unstable(feature = "panic_unwind", issue = "32837")]
#![doc(issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")]
#![feature(cfg_emscripten_wasm_eh)]
+#![feature(cfg_select)]
#![feature(core_intrinsics)]
#![feature(lang_items)]
#![feature(panic_unwind)]
@@ -33,18 +34,21 @@ use alloc::boxed::Box;
use core::any::Any;
use core::panic::PanicPayload;
-cfg_if::cfg_if! {
- if #[cfg(all(target_os = "emscripten", not(emscripten_wasm_eh)))] {
+cfg_select! {
+ all(target_os = "emscripten", not(emscripten_wasm_eh)) => {
#[path = "emcc.rs"]
mod imp;
- } else if #[cfg(target_os = "hermit")] {
+ }
+ target_os = "hermit" => {
#[path = "hermit.rs"]
mod imp;
- } else if #[cfg(target_os = "l4re")] {
+ }
+ target_os = "l4re" => {
// L4Re is unix family but does not yet support unwinding.
#[path = "dummy.rs"]
mod imp;
- } else if #[cfg(any(
+ }
+ any(
all(target_family = "windows", target_env = "gnu"),
target_os = "psp",
target_os = "xous",
@@ -52,19 +56,22 @@ cfg_if::cfg_if! {
all(target_family = "unix", not(any(target_os = "espidf", target_os = "nuttx"))),
all(target_vendor = "fortanix", target_env = "sgx"),
target_family = "wasm",
- ))] {
+ ) => {
#[path = "gcc.rs"]
mod imp;
- } else if #[cfg(miri)] {
+ }
+ miri => {
// Use the Miri runtime on Windows as miri doesn't support funclet based unwinding,
// only landingpad based unwinding. Also use the Miri runtime on unsupported platforms.
#[path = "miri.rs"]
mod imp;
- } else if #[cfg(all(target_env = "msvc", not(target_arch = "arm")))] {
+ }
+ all(target_env = "msvc", not(target_arch = "arm")) => {
// LLVM does not support unwinding on 32 bit ARM msvc (thumbv7a-pc-windows-msvc)
#[path = "seh.rs"]
mod imp;
- } else {
+ }
+ _ => {
// Targets that don't support unwinding.
// - os=none ("bare metal" targets)
// - os=uefi
diff --git a/library/panic_unwind/src/seh.rs b/library/panic_unwind/src/seh.rs
index 668e988abff39..a5d67dbb6a9f4 100644
--- a/library/panic_unwind/src/seh.rs
+++ b/library/panic_unwind/src/seh.rs
@@ -289,10 +289,11 @@ macro_rules! define_cleanup {
}
}
}
-cfg_if::cfg_if! {
- if #[cfg(target_arch = "x86")] {
+cfg_select! {
+ target_arch = "x86" => {
define_cleanup!("thiscall" "thiscall-unwind");
- } else {
+ }
+ _ => {
define_cleanup!("C" "C-unwind");
}
}
diff --git a/library/std/src/collections/mod.rs b/library/std/src/collections/mod.rs
index 889ed3c538035..6104a02c739b5 100644
--- a/library/std/src/collections/mod.rs
+++ b/library/std/src/collections/mod.rs
@@ -26,7 +26,7 @@
//! should be considered. Detailed discussions of strengths and weaknesses of
//! individual collections can be found on their own documentation pages.
//!
-//! ### Use a `Vec` when:
+//! ### Use a [`Vec`] when:
//! * You want to collect items up to be processed or sent elsewhere later, and
//! don't care about any properties of the actual values being stored.
//! * You want a sequence of elements in a particular order, and will only be
@@ -35,25 +35,25 @@
//! * You want a resizable array.
//! * You want a heap-allocated array.
//!
-//! ### Use a `VecDeque` when:
+//! ### Use a [`VecDeque`] when:
//! * You want a [`Vec`] that supports efficient insertion at both ends of the
//! sequence.
//! * You want a queue.
//! * You want a double-ended queue (deque).
//!
-//! ### Use a `LinkedList` when:
+//! ### Use a [`LinkedList`] when:
//! * You want a [`Vec`] or [`VecDeque`] of unknown size, and can't tolerate
//! amortization.
//! * You want to efficiently split and append lists.
//! * You are *absolutely* certain you *really*, *truly*, want a doubly linked
//! list.
//!
-//! ### Use a `HashMap` when:
+//! ### Use a [`HashMap`] when:
//! * You want to associate arbitrary keys with an arbitrary value.
//! * You want a cache.
//! * You want a map, with no extra functionality.
//!
-//! ### Use a `BTreeMap` when:
+//! ### Use a [`BTreeMap`] when:
//! * You want a map sorted by its keys.
//! * You want to be able to get a range of entries on-demand.
//! * You're interested in what the smallest or largest key-value pair is.
@@ -65,7 +65,7 @@
//! * There is no meaningful value to associate with your keys.
//! * You just want a set.
//!
-//! ### Use a `BinaryHeap` when:
+//! ### Use a [`BinaryHeap`] when:
//!
//! * You want to store a bunch of elements, but only ever want to process the
//! "biggest" or "most important" one at any given time.
diff --git a/library/std/src/io/buffered/bufreader/buffer.rs b/library/std/src/io/buffered/bufreader/buffer.rs
index 574288e579e0b..9b600cd55758b 100644
--- a/library/std/src/io/buffered/bufreader/buffer.rs
+++ b/library/std/src/io/buffered/bufreader/buffer.rs
@@ -122,7 +122,7 @@ impl Buffer {
/// Remove bytes that have already been read from the buffer.
pub fn backshift(&mut self) {
- self.buf.copy_within(self.pos.., 0);
+ self.buf.copy_within(self.pos..self.filled, 0);
self.filled -= self.pos;
self.pos = 0;
}
diff --git a/library/std/src/io/error.rs b/library/std/src/io/error.rs
index 562fdbf4ff76d..dcfa189823f89 100644
--- a/library/std/src/io/error.rs
+++ b/library/std/src/io/error.rs
@@ -18,7 +18,7 @@ use crate::{error, fmt, result, sys};
/// This type is broadly used across [`std::io`] for any operation which may
/// produce an error.
///
-/// This typedef is generally used to avoid writing out [`io::Error`] directly and
+/// This type alias is generally used to avoid writing out [`io::Error`] directly and
/// is otherwise a direct mapping to [`Result`].
///
/// While usual Rust style is to import types directly, aliases of [`Result`]
diff --git a/library/std/src/io/mod.rs b/library/std/src/io/mod.rs
index d351ee5e739d3..ff0e29e04c251 100644
--- a/library/std/src/io/mod.rs
+++ b/library/std/src/io/mod.rs
@@ -2461,7 +2461,7 @@ pub trait BufRead: Read {
/// delimiter or EOF is found.
///
/// If successful, this function will return the total number of bytes read,
- /// including the delimiter byte.
+ /// including the delimiter byte if found.
///
/// This is useful for efficiently skipping data such as NUL-terminated strings
/// in binary file formats without buffering.
@@ -2489,7 +2489,7 @@ pub trait BufRead: Read {
/// ```
/// use std::io::{self, BufRead};
///
- /// let mut cursor = io::Cursor::new(b"Ferris\0Likes long walks on the beach\0Crustacean\0");
+ /// let mut cursor = io::Cursor::new(b"Ferris\0Likes long walks on the beach\0Crustacean\0!");
///
/// // read name
/// let mut name = Vec::new();
@@ -2509,6 +2509,11 @@ pub trait BufRead: Read {
/// .expect("reading from cursor won't fail");
/// assert_eq!(num_bytes, 11);
/// assert_eq!(animal, b"Crustacean\0");
+ ///
+ /// // reach EOF
+ /// let num_bytes = cursor.skip_until(b'\0')
+ /// .expect("reading from cursor won't fail");
+ /// assert_eq!(num_bytes, 1);
/// ```
#[stable(feature = "bufread_skip_until", since = "1.83.0")]
fn skip_until(&mut self, byte: u8) -> Result {
diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
index 228bb39e59b67..0cc59aa89cc82 100644
--- a/library/std/src/lib.rs
+++ b/library/std/src/lib.rs
@@ -15,7 +15,7 @@
//!
//! If you already know the name of what you are looking for, the fastest way to
//! find it is to use the search
-//! bar at the top of the page.
+//! button at the top of the page.
//!
//! Otherwise, you may want to jump to one of these useful sections:
//!
diff --git a/library/std/src/sync/lazy_lock.rs b/library/std/src/sync/lazy_lock.rs
index a40e29a772a9c..3231125f7a13a 100644
--- a/library/std/src/sync/lazy_lock.rs
+++ b/library/std/src/sync/lazy_lock.rs
@@ -244,7 +244,11 @@ impl T> LazyLock {
#[inline]
#[stable(feature = "lazy_cell", since = "1.80.0")]
pub fn force(this: &LazyLock) -> &T {
- this.once.call_once(|| {
+ this.once.call_once_force(|state| {
+ if state.is_poisoned() {
+ panic_poisoned();
+ }
+
// SAFETY: `call_once` only runs this closure once, ever.
let data = unsafe { &mut *this.data.get() };
let f = unsafe { ManuallyDrop::take(&mut data.f) };
@@ -257,8 +261,7 @@ impl T> LazyLock {
// * the closure was called and initialized `value`.
// * the closure was called and panicked, so this point is never reached.
// * the closure was not called, but a previous call initialized `value`.
- // * the closure was not called because the Once is poisoned, so this point
- // is never reached.
+ // * the closure was not called because the Once is poisoned, which we handled above.
// So `value` has definitely been initialized and will not be modified again.
unsafe { &*(*this.data.get()).value }
}
diff --git a/library/std/src/sync/nonpoison.rs b/library/std/src/sync/nonpoison.rs
index 2bbf226dc2cde..b3ae376e70d55 100644
--- a/library/std/src/sync/nonpoison.rs
+++ b/library/std/src/sync/nonpoison.rs
@@ -33,5 +33,10 @@ impl fmt::Display for WouldBlock {
pub use self::mutex::MappedMutexGuard;
#[unstable(feature = "nonpoison_mutex", issue = "134645")]
pub use self::mutex::{Mutex, MutexGuard};
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+pub use self::rwlock::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+pub use self::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
mod mutex;
+mod rwlock;
diff --git a/library/std/src/sync/nonpoison/rwlock.rs b/library/std/src/sync/nonpoison/rwlock.rs
new file mode 100644
index 0000000000000..eb0aef99cc1e7
--- /dev/null
+++ b/library/std/src/sync/nonpoison/rwlock.rs
@@ -0,0 +1,1081 @@
+use crate::cell::UnsafeCell;
+use crate::fmt;
+use crate::marker::PhantomData;
+use crate::mem::{self, ManuallyDrop, forget};
+use crate::ops::{Deref, DerefMut};
+use crate::ptr::NonNull;
+use crate::sync::nonpoison::{TryLockResult, WouldBlock};
+use crate::sys::sync as sys;
+
+/// A reader-writer lock that does not keep track of lock poisoning.
+///
+/// For more information about reader-writer locks, check out the documentation for the poisoning
+/// variant of this lock (which can be found at [`poison::RwLock`]).
+///
+/// [`poison::RwLock`]: crate::sync::poison::RwLock
+///
+/// # Examples
+///
+/// ```
+/// #![feature(nonpoison_rwlock)]
+///
+/// use std::sync::nonpoison::RwLock;
+///
+/// let lock = RwLock::new(5);
+///
+/// // many reader locks can be held at once
+/// {
+/// let r1 = lock.read();
+/// let r2 = lock.read();
+/// assert_eq!(*r1, 5);
+/// assert_eq!(*r2, 5);
+/// } // read locks are dropped at this point
+///
+/// // only one write lock may be held, however
+/// {
+/// let mut w = lock.write();
+/// *w += 1;
+/// assert_eq!(*w, 6);
+/// } // write lock is dropped here
+/// ```
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonRwLock")]
+pub struct RwLock {
+ /// The inner [`sys::RwLock`] that synchronizes thread access to the protected data.
+ inner: sys::RwLock,
+ /// The lock-protected data.
+ data: UnsafeCell,
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl Send for RwLock {}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl Sync for RwLock {}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Guards
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`read`] and [`try_read`] methods on
+/// [`RwLock`].
+///
+/// [`read`]: RwLock::read
+/// [`try_read`]: RwLock::try_read
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a RwLockReadGuard across suspend \
+ points can cause deadlocks, delays, \
+ and cause Futures to not implement `Send`"]
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonRwLockReadGuard")]
+pub struct RwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> {
+ /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+ /// `&'rwlock T` to avoid `noalias` violations, because a `RwLockReadGuard` instance only holds
+ /// immutability until it drops, not for its whole scope.
+ /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also
+ /// covariant over `T`, just like we would have with `&T`.
+ data: NonNull,
+ /// A reference to the internal [`sys::RwLock`] that we have read-locked.
+ inner_lock: &'rwlock sys::RwLock,
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl !Send for RwLockReadGuard<'_, T> {}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl Sync for RwLockReadGuard<'_, T> {}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`write`] and [`try_write`] methods
+/// on [`RwLock`].
+///
+/// [`write`]: RwLock::write
+/// [`try_write`]: RwLock::try_write
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a RwLockWriteGuard across suspend \
+ points can cause deadlocks, delays, \
+ and cause Future's to not implement `Send`"]
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[clippy::has_significant_drop]
+#[cfg_attr(not(test), rustc_diagnostic_item = "NonPoisonRwLockWriteGuard")]
+pub struct RwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> {
+ /// A reference to the [`RwLock`] that we have write-locked.
+ lock: &'rwlock RwLock,
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl !Send for RwLockWriteGuard<'_, T> {}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl Sync for RwLockWriteGuard<'_, T> {}
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped, which can point to a subfield of the protected data.
+///
+/// This structure is created by the [`map`] and [`filter_map`] methods
+/// on [`RwLockReadGuard`].
+///
+/// [`map`]: RwLockReadGuard::map
+/// [`filter_map`]: RwLockReadGuard::filter_map
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a MappedRwLockReadGuard across suspend \
+ points can cause deadlocks, delays, \
+ and cause Futures to not implement `Send`"]
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[clippy::has_significant_drop]
+pub struct MappedRwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> {
+ /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+ /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockReadGuard` instance only
+ /// holds immutability until it drops, not for its whole scope.
+ /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also
+ /// covariant over `T`, just like we would have with `&T`.
+ data: NonNull,
+ /// A reference to the internal [`sys::RwLock`] that we have read-locked.
+ inner_lock: &'rwlock sys::RwLock,
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl !Send for MappedRwLockReadGuard<'_, T> {}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl Sync for MappedRwLockReadGuard<'_, T> {}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped, which can point to a subfield of the protected data.
+///
+/// This structure is created by the [`map`] and [`filter_map`] methods
+/// on [`RwLockWriteGuard`].
+///
+/// [`map`]: RwLockWriteGuard::map
+/// [`filter_map`]: RwLockWriteGuard::filter_map
+#[must_use = "if unused the RwLock will immediately unlock"]
+#[must_not_suspend = "holding a MappedRwLockWriteGuard across suspend \
+ points can cause deadlocks, delays, \
+ and cause Future's to not implement `Send`"]
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+#[clippy::has_significant_drop]
+pub struct MappedRwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> {
+ /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+ /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockWriteGuard` instance only
+ /// holds uniquneness until it drops, not for its whole scope.
+ /// `NonNull` is preferable over `*const T` to allow for niche optimizations.
+ data: NonNull,
+ /// `NonNull` is covariant over `T`, so we add a `PhantomData<&'rwlock mut T>` field here to
+ /// enforce the correct invariance over `T`.
+ _variance: PhantomData<&'rwlock mut T>,
+ /// A reference to the internal [`sys::RwLock`] that we have write-locked.
+ inner_lock: &'rwlock sys::RwLock,
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl !Send for MappedRwLockWriteGuard<'_, T> {}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+unsafe impl Sync for MappedRwLockWriteGuard<'_, T> {}
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Implementations
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+impl RwLock {
+ /// Creates a new instance of an `RwLock` which is unlocked.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ ///
+ /// use std::sync::nonpoison::RwLock;
+ ///
+ /// let lock = RwLock::new(5);
+ /// ```
+ #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ #[inline]
+ pub const fn new(t: T) -> RwLock {
+ RwLock { inner: sys::RwLock::new(), data: UnsafeCell::new(t) }
+ }
+
+ /// Returns the contained value by cloning it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ /// #![feature(lock_value_accessors)]
+ ///
+ /// use std::sync::nonpoison::RwLock;
+ ///
+ /// let mut lock = RwLock::new(7);
+ ///
+ /// assert_eq!(lock.get_cloned(), 7);
+ /// ```
+ #[unstable(feature = "lock_value_accessors", issue = "133407")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn get_cloned(&self) -> T
+ where
+ T: Clone,
+ {
+ self.read().clone()
+ }
+
+ /// Sets the contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ /// #![feature(lock_value_accessors)]
+ ///
+ /// use std::sync::nonpoison::RwLock;
+ ///
+ /// let mut lock = RwLock::new(7);
+ ///
+ /// assert_eq!(lock.get_cloned(), 7);
+ /// lock.set(11);
+ /// assert_eq!(lock.get_cloned(), 11);
+ /// ```
+ #[unstable(feature = "lock_value_accessors", issue = "133407")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn set(&self, value: T) {
+ if mem::needs_drop::() {
+ // If the contained value has a non-trivial destructor, we
+ // call that destructor after the lock has been released.
+ drop(self.replace(value))
+ } else {
+ *self.write() = value;
+ }
+ }
+
+ /// Replaces the contained value with `value`, and returns the old contained value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ /// #![feature(lock_value_accessors)]
+ ///
+ /// use std::sync::nonpoison::RwLock;
+ ///
+ /// let mut lock = RwLock::new(7);
+ ///
+ /// assert_eq!(lock.replace(11), 7);
+ /// assert_eq!(lock.get_cloned(), 11);
+ /// ```
+ #[unstable(feature = "lock_value_accessors", issue = "133407")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn replace(&self, value: T) -> T {
+ let mut guard = self.write();
+ mem::replace(&mut *guard, value)
+ }
+}
+
+impl RwLock {
+ /// Locks this `RwLock` with shared read access, blocking the current thread
+ /// until it can be acquired.
+ ///
+ /// The calling thread will be blocked until there are no more writers which
+ /// hold the lock. There may be other readers currently inside the lock when
+ /// this method returns. This method does not provide any guarantees with
+ /// respect to the ordering of whether contentious readers or writers will
+ /// acquire the lock first.
+ ///
+ /// Returns an RAII guard which will release this thread's shared access
+ /// once it is dropped.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic when called if the lock is already held by the current thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::sync::nonpoison::RwLock;
+ /// use std::thread;
+ ///
+ /// let lock = Arc::new(RwLock::new(1));
+ /// let c_lock = Arc::clone(&lock);
+ ///
+ /// let n = lock.read();
+ /// assert_eq!(*n, 1);
+ ///
+ /// thread::spawn(move || {
+ /// let r = c_lock.read();
+ /// }).join().unwrap();
+ /// ```
+ #[inline]
+ #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn read(&self) -> RwLockReadGuard<'_, T> {
+ unsafe {
+ self.inner.read();
+ RwLockReadGuard::new(self)
+ }
+ }
+
+ /// Attempts to acquire this `RwLock` with shared read access.
+ ///
+ /// If the access could not be granted at this time, then `Err` is returned.
+ /// Otherwise, an RAII guard is returned which will release the shared access
+ /// when it is dropped.
+ ///
+ /// This function does not block.
+ ///
+ /// This function does not provide any guarantees with respect to the ordering
+ /// of whether contentious readers or writers will acquire the lock first.
+ ///
+ /// # Errors
+ ///
+ /// This function will return the [`WouldBlock`] error if the `RwLock` could
+ /// not be acquired because it was already locked exclusively.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ ///
+ /// use std::sync::nonpoison::RwLock;
+ ///
+ /// let lock = RwLock::new(1);
+ ///
+ /// match lock.try_read() {
+ /// Ok(n) => assert_eq!(*n, 1),
+ /// Err(_) => unreachable!(),
+ /// };
+ /// ```
+ #[inline]
+ #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn try_read(&self) -> TryLockResult> {
+ unsafe {
+ if self.inner.try_read() { Ok(RwLockReadGuard::new(self)) } else { Err(WouldBlock) }
+ }
+ }
+
+ /// Locks this `RwLock` with exclusive write access, blocking the current
+ /// thread until it can be acquired.
+ ///
+ /// This function will not return while other writers or other readers
+ /// currently have access to the lock.
+ ///
+ /// Returns an RAII guard which will drop the write access of this `RwLock`
+ /// when dropped.
+ ///
+ /// # Panics
+ ///
+ /// This function might panic when called if the lock is already held by the current thread.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ ///
+ /// use std::sync::nonpoison::RwLock;
+ ///
+ /// let lock = RwLock::new(1);
+ ///
+ /// let mut n = lock.write();
+ /// *n = 2;
+ ///
+ /// assert!(lock.try_read().is_err());
+ /// ```
+ #[inline]
+ #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn write(&self) -> RwLockWriteGuard<'_, T> {
+ unsafe {
+ self.inner.write();
+ RwLockWriteGuard::new(self)
+ }
+ }
+
+ /// Attempts to lock this `RwLock` with exclusive write access.
+ ///
+ /// If the lock could not be acquired at this time, then `Err` is returned.
+ /// Otherwise, an RAII guard is returned which will release the lock when
+ /// it is dropped.
+ ///
+ /// This function does not block.
+ ///
+ /// This function does not provide any guarantees with respect to the ordering
+ /// of whether contentious readers or writers will acquire the lock first.
+ ///
+ /// # Errors
+ ///
+ /// This function will return the [`WouldBlock`] error if the `RwLock` could
+ /// not be acquired because it was already locked.
+ ///
+ /// [`WouldBlock`]: WouldBlock
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ ///
+ /// use std::sync::nonpoison::RwLock;
+ ///
+ /// let lock = RwLock::new(1);
+ ///
+ /// let n = lock.read();
+ /// assert_eq!(*n, 1);
+ ///
+ /// assert!(lock.try_write().is_err());
+ /// ```
+ #[inline]
+ #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn try_write(&self) -> TryLockResult> {
+ unsafe {
+ if self.inner.try_write() { Ok(RwLockWriteGuard::new(self)) } else { Err(WouldBlock) }
+ }
+ }
+
+ /// Consumes this `RwLock`, returning the underlying data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ ///
+ /// use std::sync::nonpoison::RwLock;
+ ///
+ /// let lock = RwLock::new(String::new());
+ /// {
+ /// let mut s = lock.write();
+ /// *s = "modified".to_owned();
+ /// }
+ /// assert_eq!(lock.into_inner(), "modified");
+ /// ```
+ #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn into_inner(self) -> T
+ where
+ T: Sized,
+ {
+ self.data.into_inner()
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// Since this call borrows the `RwLock` mutably, no actual locking needs to
+ /// take place -- the mutable borrow statically guarantees no new locks can be acquired
+ /// while this reference exists. Note that this method does not clear any previously abandoned
+ /// locks (e.g., via [`forget()`] on a [`RwLockReadGuard`] or [`RwLockWriteGuard`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ ///
+ /// use std::sync::nonpoison::RwLock;
+ ///
+ /// let mut lock = RwLock::new(0);
+ /// *lock.get_mut() = 10;
+ /// assert_eq!(*lock.read(), 10);
+ /// ```
+ #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.data.get_mut()
+ }
+
+ /// Returns a raw pointer to the underlying data.
+ ///
+ /// The returned pointer is always non-null and properly aligned, but it is
+ /// the user's responsibility to ensure that any reads and writes through it
+ /// are properly synchronized to avoid data races, and that it is not read
+ /// or written through after the lock is dropped.
+ #[unstable(feature = "rwlock_data_ptr", issue = "140368")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn data_ptr(&self) -> *mut T {
+ self.data.get()
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl fmt::Debug for RwLock {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut d = f.debug_struct("RwLock");
+ match self.try_read() {
+ Ok(guard) => {
+ d.field("data", &&*guard);
+ }
+ Err(WouldBlock) => {
+ d.field("data", &format_args!(""));
+ }
+ }
+ d.finish_non_exhaustive()
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl Default for RwLock {
+ /// Creates a new `RwLock`, with the `Default` value for T.
+ fn default() -> RwLock {
+ RwLock::new(Default::default())
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl From for RwLock {
+ /// Creates a new instance of an `RwLock` which is unlocked.
+ /// This is equivalent to [`RwLock::new`].
+ fn from(t: T) -> Self {
+ RwLock::new(t)
+ }
+}
+
+impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
+ /// Creates a new instance of `RwLockReadGuard` from a `RwLock`.
+ ///
+ /// # Safety
+ ///
+ /// This function is safe if and only if the same thread has successfully and safely called
+ /// `lock.inner.read()`, `lock.inner.try_read()`, or `lock.inner.downgrade()` before
+ /// instantiating this object.
+ unsafe fn new(lock: &'rwlock RwLock) -> RwLockReadGuard<'rwlock, T> {
+ RwLockReadGuard {
+ data: unsafe { NonNull::new_unchecked(lock.data.get()) },
+ inner_lock: &lock.inner,
+ }
+ }
+
+ /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, e.g.
+ /// an enum variant.
+ ///
+ /// The `RwLock` is already locked for reading, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RwLockReadGuard::map(...)`. A method would interfere with methods of
+ /// the same name on the contents of the `RwLockReadGuard` used through
+ /// `Deref`.
+ ///
+ /// # Panics
+ ///
+ /// If the closure panics, the guard will be dropped (unlocked).
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn map(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U>
+ where
+ F: FnOnce(&T) -> &U,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
+ let orig = ManuallyDrop::new(orig);
+ MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
+ }
+
+ /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data. The
+ /// original guard is returned as an `Err(...)` if the closure returns
+ /// `None`.
+ ///
+ /// The `RwLock` is already locked for reading, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RwLockReadGuard::filter_map(...)`. A method would interfere with methods
+ /// of the same name on the contents of the `RwLockReadGuard` used through
+ /// `Deref`.
+ ///
+ /// # Panics
+ ///
+ /// If the closure panics, the guard will be dropped (unlocked).
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn filter_map(orig: Self, f: F) -> Result, Self>
+ where
+ F: FnOnce(&T) -> Option<&U>,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ match f(unsafe { orig.data.as_ref() }) {
+ Some(data) => {
+ let data = NonNull::from(data);
+ let orig = ManuallyDrop::new(orig);
+ Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock })
+ }
+ None => Err(orig),
+ }
+ }
+}
+
+impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
+ /// Creates a new instance of `RwLockWriteGuard` from a `RwLock`.
+ ///
+ /// # Safety
+ ///
+ /// This function is safe if and only if the same thread has successfully and safely called
+ /// `lock.inner.write()`, `lock.inner.try_write()`, or `lock.inner.try_upgrade` before
+ /// instantiating this object.
+ unsafe fn new(lock: &'rwlock RwLock) -> RwLockWriteGuard<'rwlock, T> {
+ RwLockWriteGuard { lock }
+ }
+
+ /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`].
+ ///
+ /// Since we have the `RwLockWriteGuard`, the [`RwLock`] must already be locked for writing, so
+ /// this method cannot fail.
+ ///
+ /// After downgrading, other readers will be allowed to read the protected data.
+ ///
+ /// # Examples
+ ///
+ /// `downgrade` takes ownership of the `RwLockWriteGuard` and returns a [`RwLockReadGuard`].
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ /// #![feature(rwlock_downgrade)]
+ ///
+ /// use std::sync::nonpoison::{RwLock, RwLockWriteGuard};
+ ///
+ /// let rw = RwLock::new(0);
+ ///
+ /// let mut write_guard = rw.write();
+ /// *write_guard = 42;
+ ///
+ /// let read_guard = RwLockWriteGuard::downgrade(write_guard);
+ /// assert_eq!(42, *read_guard);
+ /// ```
+ ///
+ /// `downgrade` will _atomically_ change the state of the [`RwLock`] from exclusive mode into
+ /// shared mode. This means that it is impossible for another writing thread to get in between a
+ /// thread calling `downgrade` and any reads it performs after downgrading.
+ ///
+ /// ```
+ /// #![feature(nonpoison_rwlock)]
+ /// #![feature(rwlock_downgrade)]
+ ///
+ /// use std::sync::Arc;
+ /// use std::sync::nonpoison::{RwLock, RwLockWriteGuard};
+ ///
+ /// let rw = Arc::new(RwLock::new(1));
+ ///
+ /// // Put the lock in write mode.
+ /// let mut main_write_guard = rw.write();
+ ///
+ /// let rw_clone = rw.clone();
+ /// let evil_handle = std::thread::spawn(move || {
+ /// // This will not return until the main thread drops the `main_read_guard`.
+ /// let mut evil_guard = rw_clone.write();
+ ///
+ /// assert_eq!(*evil_guard, 2);
+ /// *evil_guard = 3;
+ /// });
+ ///
+ /// *main_write_guard = 2;
+ ///
+ /// // Atomically downgrade the write guard into a read guard.
+ /// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
+ ///
+ /// // Since `downgrade` is atomic, the writer thread cannot have changed the protected data.
+ /// assert_eq!(*main_read_guard, 2, "`downgrade` was not atomic");
+ /// #
+ /// # drop(main_read_guard);
+ /// # evil_handle.join().unwrap();
+ /// #
+ /// # let final_check = rw.read();
+ /// # assert_eq!(*final_check, 3);
+ /// ```
+ #[unstable(feature = "rwlock_downgrade", issue = "128203")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn downgrade(s: Self) -> RwLockReadGuard<'rwlock, T> {
+ let lock = s.lock;
+
+ // We don't want to call the destructor since that calls `write_unlock`.
+ forget(s);
+
+ // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write
+ // mode, satisfying the `downgrade` contract.
+ unsafe { lock.inner.downgrade() };
+
+ // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract.
+ unsafe { RwLockReadGuard::new(lock) }
+ }
+
+ /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data, e.g.
+ /// an enum variant.
+ ///
+ /// The `RwLock` is already locked for writing, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RwLockWriteGuard::map(...)`. A method would interfere with methods of
+ /// the same name on the contents of the `RwLockWriteGuard` used through
+ /// `Deref`.
+ ///
+ /// # Panics
+ ///
+ /// If the closure panics, the guard will be dropped (unlocked).
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn map(orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() }));
+ let orig = ManuallyDrop::new(orig);
+ MappedRwLockWriteGuard { data, inner_lock: &orig.lock.inner, _variance: PhantomData }
+ }
+
+ /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data. The
+ /// original guard is returned as an `Err(...)` if the closure returns
+ /// `None`.
+ ///
+ /// The `RwLock` is already locked for writing, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RwLockWriteGuard::filter_map(...)`. A method would interfere with methods
+ /// of the same name on the contents of the `RwLockWriteGuard` used through
+ /// `Deref`.
+ ///
+ /// # Panics
+ ///
+ /// If the closure panics, the guard will be dropped (unlocked).
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn filter_map(orig: Self, f: F) -> Result, Self>
+ where
+ F: FnOnce(&mut T) -> Option<&mut U>,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ match f(unsafe { &mut *orig.lock.data.get() }) {
+ Some(data) => {
+ let data = NonNull::from(data);
+ let orig = ManuallyDrop::new(orig);
+ Ok(MappedRwLockWriteGuard {
+ data,
+ inner_lock: &orig.lock.inner,
+ _variance: PhantomData,
+ })
+ }
+ None => Err(orig),
+ }
+ }
+}
+
+impl<'rwlock, T: ?Sized> MappedRwLockReadGuard<'rwlock, T> {
+ /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data,
+ /// e.g. an enum variant.
+ ///
+ /// The `RwLock` is already locked for reading, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `MappedRwLockReadGuard::map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the `MappedRwLockReadGuard`
+ /// used through `Deref`.
+ ///
+ /// # Panics
+ ///
+ /// If the closure panics, the guard will be dropped (unlocked).
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn map(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U>
+ where
+ F: FnOnce(&T) -> &U,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
+ let orig = ManuallyDrop::new(orig);
+ MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
+ }
+
+ /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data.
+ /// The original guard is returned as an `Err(...)` if the closure returns
+ /// `None`.
+ ///
+ /// The `RwLock` is already locked for reading, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `MappedRwLockReadGuard::filter_map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the `MappedRwLockReadGuard`
+ /// used through `Deref`.
+ ///
+ /// # Panics
+ ///
+ /// If the closure panics, the guard will be dropped (unlocked).
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn filter_map(orig: Self, f: F) -> Result, Self>
+ where
+ F: FnOnce(&T) -> Option<&U>,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ match f(unsafe { orig.data.as_ref() }) {
+ Some(data) => {
+ let data = NonNull::from(data);
+ let orig = ManuallyDrop::new(orig);
+ Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock })
+ }
+ None => Err(orig),
+ }
+ }
+}
+
+impl<'rwlock, T: ?Sized> MappedRwLockWriteGuard<'rwlock, T> {
+ /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data,
+ /// e.g. an enum variant.
+ ///
+ /// The `RwLock` is already locked for writing, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `MappedRwLockWriteGuard::map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the `MappedRwLockWriteGuard`
+ /// used through `Deref`.
+ ///
+ /// # Panics
+ ///
+ /// If the closure panics, the guard will be dropped (unlocked).
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn map(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
+ let orig = ManuallyDrop::new(orig);
+ MappedRwLockWriteGuard { data, inner_lock: orig.inner_lock, _variance: PhantomData }
+ }
+
+ /// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data.
+ /// The original guard is returned as an `Err(...)` if the closure returns
+ /// `None`.
+ ///
+ /// The `RwLock` is already locked for writing, so this cannot fail.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `MappedRwLockWriteGuard::filter_map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the `MappedRwLockWriteGuard`
+ /// used through `Deref`.
+ ///
+ /// # Panics
+ ///
+ /// If the closure panics, the guard will be dropped (unlocked).
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ // #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+ pub fn filter_map(
+ mut orig: Self,
+ f: F,
+ ) -> Result, Self>
+ where
+ F: FnOnce(&mut T) -> Option<&mut U>,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ match f(unsafe { orig.data.as_mut() }) {
+ Some(data) => {
+ let data = NonNull::from(data);
+ let orig = ManuallyDrop::new(orig);
+ Ok(MappedRwLockWriteGuard {
+ data,
+ inner_lock: orig.inner_lock,
+ _variance: PhantomData,
+ })
+ }
+ None => Err(orig),
+ }
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl Drop for RwLockReadGuard<'_, T> {
+ fn drop(&mut self) {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
+ unsafe {
+ self.inner_lock.read_unlock();
+ }
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl Drop for RwLockWriteGuard<'_, T> {
+ fn drop(&mut self) {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+ unsafe {
+ self.lock.inner.write_unlock();
+ }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl Drop for MappedRwLockReadGuard<'_, T> {
+ fn drop(&mut self) {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe {
+ self.inner_lock.read_unlock();
+ }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl Drop for MappedRwLockWriteGuard<'_, T> {
+ fn drop(&mut self) {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe {
+ self.inner_lock.write_unlock();
+ }
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl Deref for RwLockReadGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
+ unsafe { self.data.as_ref() }
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl Deref for RwLockWriteGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+ unsafe { &*self.lock.data.get() }
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl DerefMut for RwLockWriteGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+ unsafe { &mut *self.lock.data.get() }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl Deref for MappedRwLockReadGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe { self.data.as_ref() }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl Deref for MappedRwLockWriteGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe { self.data.as_ref() }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl DerefMut for MappedRwLockWriteGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe { self.data.as_mut() }
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl fmt::Debug for RwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl fmt::Display for RwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl fmt::Debug for RwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl fmt::Display for RwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl fmt::Debug for MappedRwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl fmt::Display for MappedRwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl fmt::Debug for MappedRwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+// #[unstable(feature = "nonpoison_rwlock", issue = "134645")]
+impl fmt::Display for MappedRwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
diff --git a/library/std/src/sync/poison/rwlock.rs b/library/std/src/sync/poison/rwlock.rs
index 2c92602bc878f..0a463f3f9c7e3 100644
--- a/library/std/src/sync/poison/rwlock.rs
+++ b/library/std/src/sync/poison/rwlock.rs
@@ -80,16 +80,24 @@ use crate::sys::sync as sys;
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "RwLock")]
pub struct RwLock {
+ /// The inner [`sys::RwLock`] that synchronizes thread access to the protected data.
inner: sys::RwLock,
+ /// A flag denoting if this `RwLock` has been poisoned.
poison: poison::Flag,
+ /// The lock-protected data.
data: UnsafeCell,
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl Send for RwLock {}
+
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl Sync for RwLock {}
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Guards
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
/// RAII structure used to release the shared read access of a lock when
/// dropped.
///
@@ -105,13 +113,15 @@ unsafe impl Sync for RwLock {}
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
#[cfg_attr(not(test), rustc_diagnostic_item = "RwLockReadGuard")]
-pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
- // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
- // `RwLockReadGuard` argument doesn't hold immutability for its whole scope, only until it drops.
- // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
- // is preferable over `const* T` to allow for niche optimization.
+pub struct RwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> {
+ /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+ /// `&'rwlock T` to avoid `noalias` violations, because a `RwLockReadGuard` instance only holds
+ /// immutability until it drops, not for its whole scope.
+ /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also
+ /// covariant over `T`, just like we would have with `&T`.
data: NonNull,
- inner_lock: &'a sys::RwLock,
+ /// A reference to the internal [`sys::RwLock`] that we have read-locked.
+ inner_lock: &'rwlock sys::RwLock,
}
#[stable(feature = "rust1", since = "1.0.0")]
@@ -135,8 +145,10 @@ unsafe impl Sync for RwLockReadGuard<'_, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
#[clippy::has_significant_drop]
#[cfg_attr(not(test), rustc_diagnostic_item = "RwLockWriteGuard")]
-pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
- lock: &'a RwLock,
+pub struct RwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> {
+ /// A reference to the [`RwLock`] that we have write-locked.
+ lock: &'rwlock RwLock,
+ /// The poison guard. See the [`poison`] module for more information.
poison: poison::Guard,
}
@@ -160,13 +172,15 @@ unsafe impl Sync for RwLockWriteGuard<'_, T> {}
and cause Futures to not implement `Send`"]
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
#[clippy::has_significant_drop]
-pub struct MappedRwLockReadGuard<'a, T: ?Sized + 'a> {
- // NB: we use a pointer instead of `&'a T` to avoid `noalias` violations, because a
- // `MappedRwLockReadGuard` argument doesn't hold immutability for its whole scope, only until it drops.
- // `NonNull` is also covariant over `T`, just like we would have with `&T`. `NonNull`
- // is preferable over `const* T` to allow for niche optimization.
+pub struct MappedRwLockReadGuard<'rwlock, T: ?Sized + 'rwlock> {
+ /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+ /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockReadGuard` instance only
+ /// holds immutability until it drops, not for its whole scope.
+ /// `NonNull` is preferable over `*const T` to allow for niche optimizations. `NonNull` is also
+ /// covariant over `T`, just like we would have with `&T`.
data: NonNull,
- inner_lock: &'a sys::RwLock,
+ /// A reference to the internal [`sys::RwLock`] that we have read-locked.
+ inner_lock: &'rwlock sys::RwLock,
}
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
@@ -189,16 +203,21 @@ unsafe impl Sync for MappedRwLockReadGuard<'_, T> {}
and cause Future's to not implement `Send`"]
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
#[clippy::has_significant_drop]
-pub struct MappedRwLockWriteGuard<'a, T: ?Sized + 'a> {
- // NB: we use a pointer instead of `&'a mut T` to avoid `noalias` violations, because a
- // `MappedRwLockWriteGuard` argument doesn't hold uniqueness for its whole scope, only until it drops.
- // `NonNull` is covariant over `T`, so we add a `PhantomData<&'a mut T>` field
- // below for the correct variance over `T` (invariance).
+pub struct MappedRwLockWriteGuard<'rwlock, T: ?Sized + 'rwlock> {
+ /// A pointer to the data protected by the `RwLock`. Note that we use a pointer here instead of
+ /// `&'rwlock T` to avoid `noalias` violations, because a `MappedRwLockWriteGuard` instance only
+ /// holds uniquneness until it drops, not for its whole scope.
+ /// `NonNull` is preferable over `*const T` to allow for niche optimizations.
data: NonNull,
- inner_lock: &'a sys::RwLock,
- poison_flag: &'a poison::Flag,
- poison: poison::Guard,
- _variance: PhantomData<&'a mut T>,
+ /// `NonNull` is covariant over `T`, so we add a `PhantomData<&'rwlock mut T>` field here to
+ /// enforce the correct invariance over `T`.
+ _variance: PhantomData<&'rwlock mut T>,
+ /// A reference to the internal [`sys::RwLock`] that we have write-locked.
+ inner_lock: &'rwlock sys::RwLock,
+ /// A reference to the original `RwLock`'s poison state.
+ poison_flag: &'rwlock poison::Flag,
+ /// The poison guard. See the [`poison`] module for more information.
+ poison_guard: poison::Guard,
}
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
@@ -207,6 +226,10 @@ impl !Send for MappedRwLockWriteGuard<'_, T> {}
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
unsafe impl Sync for MappedRwLockWriteGuard<'_, T> {}
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Implementations
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
impl RwLock {
/// Creates a new instance of an `RwLock` which is unlocked.
///
@@ -611,8 +634,8 @@ impl RwLock {
///
/// Since this call borrows the `RwLock` mutably, no actual locking needs to
/// take place -- the mutable borrow statically guarantees no new locks can be acquired
- /// while this reference exists. Note that this method does not clear any previously abandoned locks
- /// (e.g., via [`forget()`] on a [`RwLockReadGuard`] or [`RwLockWriteGuard`]).
+ /// while this reference exists. Note that this method does not clear any previously abandoned
+ /// locks (e.g., via [`forget()`] on a [`RwLockReadGuard`] or [`RwLockWriteGuard`]).
///
/// # Errors
///
@@ -700,177 +723,7 @@ impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
inner_lock: &lock.inner,
})
}
-}
-
-impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
- /// Creates a new instance of `RwLockWriteGuard` from a `RwLock`.
- // SAFETY: if and only if `lock.inner.write()` (or `lock.inner.try_write()`) has been
- // successfully called from the same thread before instantiating this object.
- unsafe fn new(lock: &'rwlock RwLock) -> LockResult> {
- poison::map_result(lock.poison.guard(), |guard| RwLockWriteGuard { lock, poison: guard })
- }
-}
-
-#[stable(feature = "std_debug", since = "1.16.0")]
-impl fmt::Debug for RwLockReadGuard<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- (**self).fmt(f)
- }
-}
-
-#[stable(feature = "std_guard_impls", since = "1.20.0")]
-impl fmt::Display for RwLockReadGuard<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- (**self).fmt(f)
- }
-}
-
-#[stable(feature = "std_debug", since = "1.16.0")]
-impl fmt::Debug for RwLockWriteGuard<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- (**self).fmt(f)
- }
-}
-
-#[stable(feature = "std_guard_impls", since = "1.20.0")]
-impl fmt::Display for RwLockWriteGuard<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- (**self).fmt(f)
- }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl fmt::Debug for MappedRwLockReadGuard<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- (**self).fmt(f)
- }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl fmt::Display for MappedRwLockReadGuard<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- (**self).fmt(f)
- }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl fmt::Debug for MappedRwLockWriteGuard<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- (**self).fmt(f)
- }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl fmt::Display for MappedRwLockWriteGuard<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- (**self).fmt(f)
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Deref for RwLockReadGuard<'_, T> {
- type Target = T;
-
- fn deref(&self) -> &T {
- // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
- unsafe { self.data.as_ref() }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Deref for RwLockWriteGuard<'_, T> {
- type Target = T;
-
- fn deref(&self) -> &T {
- // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
- unsafe { &*self.lock.data.get() }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl DerefMut for RwLockWriteGuard<'_, T> {
- fn deref_mut(&mut self) -> &mut T {
- // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
- unsafe { &mut *self.lock.data.get() }
- }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl Deref for MappedRwLockReadGuard<'_, T> {
- type Target = T;
-
- fn deref(&self) -> &T {
- // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
- // was created, and have been upheld throughout `map` and/or `filter_map`.
- unsafe { self.data.as_ref() }
- }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl Deref for MappedRwLockWriteGuard<'_, T> {
- type Target = T;
-
- fn deref(&self) -> &T {
- // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
- // was created, and have been upheld throughout `map` and/or `filter_map`.
- unsafe { self.data.as_ref() }
- }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl DerefMut for MappedRwLockWriteGuard<'_, T> {
- fn deref_mut(&mut self) -> &mut T {
- // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
- // was created, and have been upheld throughout `map` and/or `filter_map`.
- unsafe { self.data.as_mut() }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Drop for RwLockReadGuard<'_, T> {
- fn drop(&mut self) {
- // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
- unsafe {
- self.inner_lock.read_unlock();
- }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Drop for RwLockWriteGuard<'_, T> {
- fn drop(&mut self) {
- self.lock.poison.done(&self.poison);
- // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
- unsafe {
- self.lock.inner.write_unlock();
- }
- }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl Drop for MappedRwLockReadGuard<'_, T> {
- fn drop(&mut self) {
- // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
- // was created, and have been upheld throughout `map` and/or `filter_map`.
- unsafe {
- self.inner_lock.read_unlock();
- }
- }
-}
-
-#[unstable(feature = "mapped_lock_guards", issue = "117108")]
-impl Drop for MappedRwLockWriteGuard<'_, T> {
- fn drop(&mut self) {
- self.poison_flag.done(&self.poison);
- // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
- // was created, and have been upheld throughout `map` and/or `filter_map`.
- unsafe {
- self.inner_lock.write_unlock();
- }
- }
-}
-impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
/// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data, e.g.
/// an enum variant.
///
@@ -883,17 +736,18 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
///
/// # Panics
///
- /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
+ /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be
+ /// poisoned.
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
- pub fn map(orig: Self, f: F) -> MappedRwLockReadGuard<'a, U>
+ pub fn map(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U>
where
F: FnOnce(&T) -> &U,
U: ?Sized,
{
// SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
// was created, and have been upheld throughout `map` and/or `filter_map`.
- // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
- // passed to it. If the closure panics, the guard will be dropped.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
let orig = ManuallyDrop::new(orig);
MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
@@ -912,17 +766,18 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
///
/// # Panics
///
- /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
+ /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be
+ /// poisoned.
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
- pub fn filter_map(orig: Self, f: F) -> Result, Self>
+ pub fn filter_map(orig: Self, f: F) -> Result, Self>
where
F: FnOnce(&T) -> Option<&U>,
U: ?Sized,
{
// SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
// was created, and have been upheld throughout `map` and/or `filter_map`.
- // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
- // passed to it. If the closure panics, the guard will be dropped.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
match f(unsafe { orig.data.as_ref() }) {
Some(data) => {
let data = NonNull::from(data);
@@ -934,71 +789,95 @@ impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
}
}
-impl<'a, T: ?Sized> MappedRwLockReadGuard<'a, T> {
- /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data,
- /// e.g. an enum variant.
- ///
- /// The `RwLock` is already locked for reading, so this cannot fail.
- ///
- /// This is an associated function that needs to be used as
- /// `MappedRwLockReadGuard::map(...)`. A method would interfere with
- /// methods of the same name on the contents of the `MappedRwLockReadGuard`
- /// used through `Deref`.
+impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
+ /// Creates a new instance of `RwLockWriteGuard` from a `RwLock`.
///
- /// # Panics
+ /// # Safety
///
- /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
- #[unstable(feature = "mapped_lock_guards", issue = "117108")]
- pub fn map(orig: Self, f: F) -> MappedRwLockReadGuard<'a, U>
- where
- F: FnOnce(&T) -> &U,
- U: ?Sized,
- {
- // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
- // was created, and have been upheld throughout `map` and/or `filter_map`.
- // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
- // passed to it. If the closure panics, the guard will be dropped.
- let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
- let orig = ManuallyDrop::new(orig);
- MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
+ /// This function is safe if and only if the same thread has successfully and safely called
+ /// `lock.inner.write()`, `lock.inner.try_write()`, or `lock.inner.try_upgrade` before
+ /// instantiating this object.
+ unsafe fn new(lock: &'rwlock RwLock) -> LockResult> {
+ poison::map_result(lock.poison.guard(), |guard| RwLockWriteGuard { lock, poison: guard })
}
- /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data.
- /// The original guard is returned as an `Err(...)` if the closure returns
- /// `None`.
+ /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`].
///
- /// The `RwLock` is already locked for reading, so this cannot fail.
+ /// Since we have the `RwLockWriteGuard`, the [`RwLock`] must already be locked for writing, so
+ /// this method cannot fail.
///
- /// This is an associated function that needs to be used as
- /// `MappedRwLockReadGuard::filter_map(...)`. A method would interfere with
- /// methods of the same name on the contents of the `MappedRwLockReadGuard`
- /// used through `Deref`.
+ /// After downgrading, other readers will be allowed to read the protected data.
///
- /// # Panics
+ /// # Examples
///
- /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be poisoned.
- #[unstable(feature = "mapped_lock_guards", issue = "117108")]
- pub fn filter_map(orig: Self, f: F) -> Result, Self>
- where
- F: FnOnce(&T) -> Option<&U>,
- U: ?Sized,
- {
- // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
- // was created, and have been upheld throughout `map` and/or `filter_map`.
- // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
- // passed to it. If the closure panics, the guard will be dropped.
- match f(unsafe { orig.data.as_ref() }) {
- Some(data) => {
- let data = NonNull::from(data);
- let orig = ManuallyDrop::new(orig);
- Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock })
- }
- None => Err(orig),
- }
+ /// `downgrade` takes ownership of the `RwLockWriteGuard` and returns a [`RwLockReadGuard`].
+ ///
+ /// ```
+ /// #![feature(rwlock_downgrade)]
+ ///
+ /// use std::sync::{RwLock, RwLockWriteGuard};
+ ///
+ /// let rw = RwLock::new(0);
+ ///
+ /// let mut write_guard = rw.write().unwrap();
+ /// *write_guard = 42;
+ ///
+ /// let read_guard = RwLockWriteGuard::downgrade(write_guard);
+ /// assert_eq!(42, *read_guard);
+ /// ```
+ ///
+ /// `downgrade` will _atomically_ change the state of the [`RwLock`] from exclusive mode into
+ /// shared mode. This means that it is impossible for another writing thread to get in between a
+ /// thread calling `downgrade` and any reads it performs after downgrading.
+ ///
+ /// ```
+ /// #![feature(rwlock_downgrade)]
+ ///
+ /// use std::sync::{Arc, RwLock, RwLockWriteGuard};
+ ///
+ /// let rw = Arc::new(RwLock::new(1));
+ ///
+ /// // Put the lock in write mode.
+ /// let mut main_write_guard = rw.write().unwrap();
+ ///
+ /// let rw_clone = rw.clone();
+ /// let evil_handle = std::thread::spawn(move || {
+ /// // This will not return until the main thread drops the `main_read_guard`.
+ /// let mut evil_guard = rw_clone.write().unwrap();
+ ///
+ /// assert_eq!(*evil_guard, 2);
+ /// *evil_guard = 3;
+ /// });
+ ///
+ /// *main_write_guard = 2;
+ ///
+ /// // Atomically downgrade the write guard into a read guard.
+ /// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
+ ///
+ /// // Since `downgrade` is atomic, the writer thread cannot have changed the protected data.
+ /// assert_eq!(*main_read_guard, 2, "`downgrade` was not atomic");
+ /// #
+ /// # drop(main_read_guard);
+ /// # evil_handle.join().unwrap();
+ /// #
+ /// # let final_check = rw.read().unwrap();
+ /// # assert_eq!(*final_check, 3);
+ /// ```
+ #[unstable(feature = "rwlock_downgrade", issue = "128203")]
+ pub fn downgrade(s: Self) -> RwLockReadGuard<'rwlock, T> {
+ let lock = s.lock;
+
+ // We don't want to call the destructor since that calls `write_unlock`.
+ forget(s);
+
+ // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write
+ // mode, satisfying the `downgrade` contract.
+ unsafe { lock.inner.downgrade() };
+
+ // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract.
+ unsafe { RwLockReadGuard::new(lock).unwrap_or_else(PoisonError::into_inner) }
}
-}
-impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
/// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data, e.g.
/// an enum variant.
///
@@ -1013,22 +892,22 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
///
/// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
- pub fn map(orig: Self, f: F) -> MappedRwLockWriteGuard<'a, U>
+ pub fn map(orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U>
where
F: FnOnce(&mut T) -> &mut U,
U: ?Sized,
{
// SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
// was created, and have been upheld throughout `map` and/or `filter_map`.
- // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
- // passed to it. If the closure panics, the guard will be dropped.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
let data = NonNull::from(f(unsafe { &mut *orig.lock.data.get() }));
let orig = ManuallyDrop::new(orig);
MappedRwLockWriteGuard {
data,
inner_lock: &orig.lock.inner,
poison_flag: &orig.lock.poison,
- poison: orig.poison.clone(),
+ poison_guard: orig.poison.clone(),
_variance: PhantomData,
}
}
@@ -1048,15 +927,15 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
///
/// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
- pub fn filter_map(orig: Self, f: F) -> Result, Self>
+ pub fn filter_map(orig: Self, f: F) -> Result, Self>
where
F: FnOnce(&mut T) -> Option<&mut U>,
U: ?Sized,
{
// SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
// was created, and have been upheld throughout `map` and/or `filter_map`.
- // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
- // passed to it. If the closure panics, the guard will be dropped.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
match f(unsafe { &mut *orig.lock.data.get() }) {
Some(data) => {
let data = NonNull::from(data);
@@ -1065,78 +944,82 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
data,
inner_lock: &orig.lock.inner,
poison_flag: &orig.lock.poison,
- poison: orig.poison.clone(),
+ poison_guard: orig.poison.clone(),
_variance: PhantomData,
})
}
None => Err(orig),
}
}
+}
- /// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`].
- ///
- /// This method will atomically change the state of the [`RwLock`] from exclusive mode into
- /// shared mode. This means that it is impossible for a writing thread to get in between a
- /// thread calling `downgrade` and the same thread reading whatever it wrote while it had the
- /// [`RwLock`] in write mode.
- ///
- /// Note that since we have the `RwLockWriteGuard`, we know that the [`RwLock`] is already
- /// locked for writing, so this method cannot fail.
- ///
- /// # Example
- ///
- /// ```
- /// #![feature(rwlock_downgrade)]
- /// use std::sync::{Arc, RwLock, RwLockWriteGuard};
- ///
- /// // The inner value starts as 0.
- /// let rw = Arc::new(RwLock::new(0));
+impl<'rwlock, T: ?Sized> MappedRwLockReadGuard<'rwlock, T> {
+ /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data,
+ /// e.g. an enum variant.
///
- /// // Put the lock in write mode.
- /// let mut main_write_guard = rw.write().unwrap();
+ /// The `RwLock` is already locked for reading, so this cannot fail.
///
- /// let evil = rw.clone();
- /// let handle = std::thread::spawn(move || {
- /// // This will not return until the main thread drops the `main_read_guard`.
- /// let mut evil_guard = evil.write().unwrap();
+ /// This is an associated function that needs to be used as
+ /// `MappedRwLockReadGuard::map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the `MappedRwLockReadGuard`
+ /// used through `Deref`.
///
- /// assert_eq!(*evil_guard, 1);
- /// *evil_guard = 2;
- /// });
+ /// # Panics
///
- /// // After spawning the writer thread, set the inner value to 1.
- /// *main_write_guard = 1;
+ /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be
+ /// poisoned.
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ pub fn map(orig: Self, f: F) -> MappedRwLockReadGuard<'rwlock, U>
+ where
+ F: FnOnce(&T) -> &U,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ let data = NonNull::from(f(unsafe { orig.data.as_ref() }));
+ let orig = ManuallyDrop::new(orig);
+ MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock }
+ }
+
+ /// Makes a [`MappedRwLockReadGuard`] for a component of the borrowed data.
+ /// The original guard is returned as an `Err(...)` if the closure returns
+ /// `None`.
///
- /// // Atomically downgrade the write guard into a read guard.
- /// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
+ /// The `RwLock` is already locked for reading, so this cannot fail.
///
- /// // Since `downgrade` is atomic, the writer thread cannot have set the inner value to 2.
- /// assert_eq!(*main_read_guard, 1, "`downgrade` was not atomic");
+ /// This is an associated function that needs to be used as
+ /// `MappedRwLockReadGuard::filter_map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the `MappedRwLockReadGuard`
+ /// used through `Deref`.
///
- /// // Clean up everything now
- /// drop(main_read_guard);
- /// handle.join().unwrap();
+ /// # Panics
///
- /// let final_check = rw.read().unwrap();
- /// assert_eq!(*final_check, 2);
- /// ```
- #[unstable(feature = "rwlock_downgrade", issue = "128203")]
- pub fn downgrade(s: Self) -> RwLockReadGuard<'a, T> {
- let lock = s.lock;
-
- // We don't want to call the destructor since that calls `write_unlock`.
- forget(s);
-
- // SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write
- // mode, satisfying the `downgrade` contract.
- unsafe { lock.inner.downgrade() };
-
- // SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract.
- unsafe { RwLockReadGuard::new(lock).unwrap_or_else(PoisonError::into_inner) }
+ /// If the closure panics, the guard will be dropped (unlocked) and the RwLock will not be
+ /// poisoned.
+ #[unstable(feature = "mapped_lock_guards", issue = "117108")]
+ pub fn filter_map(orig: Self, f: F) -> Result, Self>
+ where
+ F: FnOnce(&T) -> Option<&U>,
+ U: ?Sized,
+ {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
+ match f(unsafe { orig.data.as_ref() }) {
+ Some(data) => {
+ let data = NonNull::from(data);
+ let orig = ManuallyDrop::new(orig);
+ Ok(MappedRwLockReadGuard { data, inner_lock: &orig.inner_lock })
+ }
+ None => Err(orig),
+ }
}
}
-impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
+impl<'rwlock, T: ?Sized> MappedRwLockWriteGuard<'rwlock, T> {
/// Makes a [`MappedRwLockWriteGuard`] for a component of the borrowed data,
/// e.g. an enum variant.
///
@@ -1151,22 +1034,22 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
///
/// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
- pub fn map(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'a, U>
+ pub fn map(mut orig: Self, f: F) -> MappedRwLockWriteGuard<'rwlock, U>
where
F: FnOnce(&mut T) -> &mut U,
U: ?Sized,
{
// SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
// was created, and have been upheld throughout `map` and/or `filter_map`.
- // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
- // passed to it. If the closure panics, the guard will be dropped.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
let data = NonNull::from(f(unsafe { orig.data.as_mut() }));
let orig = ManuallyDrop::new(orig);
MappedRwLockWriteGuard {
data,
inner_lock: orig.inner_lock,
poison_flag: orig.poison_flag,
- poison: orig.poison.clone(),
+ poison_guard: orig.poison_guard.clone(),
_variance: PhantomData,
}
}
@@ -1186,15 +1069,18 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
///
/// If the closure panics, the guard will be dropped (unlocked) and the RwLock will be poisoned.
#[unstable(feature = "mapped_lock_guards", issue = "117108")]
- pub fn filter_map(mut orig: Self, f: F) -> Result, Self>
+ pub fn filter_map(
+ mut orig: Self,
+ f: F,
+ ) -> Result, Self>
where
F: FnOnce(&mut T) -> Option<&mut U>,
U: ?Sized,
{
// SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
// was created, and have been upheld throughout `map` and/or `filter_map`.
- // The signature of the closure guarantees that it will not "leak" the lifetime of the reference
- // passed to it. If the closure panics, the guard will be dropped.
+ // The signature of the closure guarantees that it will not "leak" the lifetime of the
+ // reference passed to it. If the closure panics, the guard will be dropped.
match f(unsafe { orig.data.as_mut() }) {
Some(data) => {
let data = NonNull::from(data);
@@ -1203,7 +1089,7 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
data,
inner_lock: orig.inner_lock,
poison_flag: orig.poison_flag,
- poison: orig.poison.clone(),
+ poison_guard: orig.poison_guard.clone(),
_variance: PhantomData,
})
}
@@ -1211,3 +1097,162 @@ impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {
}
}
}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Drop for RwLockReadGuard<'_, T> {
+ fn drop(&mut self) {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
+ unsafe {
+ self.inner_lock.read_unlock();
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Drop for RwLockWriteGuard<'_, T> {
+ fn drop(&mut self) {
+ self.lock.poison.done(&self.poison);
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+ unsafe {
+ self.lock.inner.write_unlock();
+ }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl Drop for MappedRwLockReadGuard<'_, T> {
+ fn drop(&mut self) {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe {
+ self.inner_lock.read_unlock();
+ }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl Drop for MappedRwLockWriteGuard<'_, T> {
+ fn drop(&mut self) {
+ self.poison_flag.done(&self.poison_guard);
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe {
+ self.inner_lock.write_unlock();
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Deref for RwLockReadGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when created.
+ unsafe { self.data.as_ref() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Deref for RwLockWriteGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+ unsafe { &*self.lock.data.get() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl DerefMut for RwLockWriteGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when created.
+ unsafe { &mut *self.lock.data.get() }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl Deref for MappedRwLockReadGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockReadGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe { self.data.as_ref() }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl Deref for MappedRwLockWriteGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe { self.data.as_ref() }
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl DerefMut for MappedRwLockWriteGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ // SAFETY: the conditions of `RwLockWriteGuard::new` were satisfied when the original guard
+ // was created, and have been upheld throughout `map` and/or `filter_map`.
+ unsafe { self.data.as_mut() }
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for RwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl fmt::Display for RwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[stable(feature = "std_debug", since = "1.16.0")]
+impl fmt::Debug for RwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[stable(feature = "std_guard_impls", since = "1.20.0")]
+impl fmt::Display for RwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl fmt::Debug for MappedRwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl fmt::Display for MappedRwLockReadGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl fmt::Debug for MappedRwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+#[unstable(feature = "mapped_lock_guards", issue = "117108")]
+impl fmt::Display for MappedRwLockWriteGuard<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
diff --git a/library/std/src/sys/fd/unix.rs b/library/std/src/sys/fd/unix.rs
index cdca73cdca11e..a12f692e7543b 100644
--- a/library/std/src/sys/fd/unix.rs
+++ b/library/std/src/sys/fd/unix.rs
@@ -37,10 +37,10 @@ pub struct FileDesc(OwnedFd);
//
// On Apple targets however, apparently the 64-bit libc is either buggy or
// intentionally showing odd behavior by rejecting any read with a size
-// larger than or equal to INT_MAX. To handle both of these the read
-// size is capped on both platforms.
+// larger than INT_MAX. To handle both of these the read size is capped on
+// both platforms.
const READ_LIMIT: usize = if cfg!(target_vendor = "apple") {
- libc::c_int::MAX as usize - 1
+ libc::c_int::MAX as usize
} else {
libc::ssize_t::MAX as usize
};
diff --git a/library/std/src/sys/fs/unix.rs b/library/std/src/sys/fs/unix.rs
index 7ee9f3c445a04..0d710a4b2a6c8 100644
--- a/library/std/src/sys/fs/unix.rs
+++ b/library/std/src/sys/fs/unix.rs
@@ -1265,6 +1265,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
))]
pub fn lock(&self) -> io::Result<()> {
@@ -1278,6 +1279,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
)))]
pub fn lock(&self) -> io::Result<()> {
@@ -1290,6 +1292,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
))]
pub fn lock_shared(&self) -> io::Result<()> {
@@ -1303,6 +1306,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
)))]
pub fn lock_shared(&self) -> io::Result<()> {
@@ -1315,6 +1319,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
))]
pub fn try_lock(&self) -> Result<(), TryLockError> {
@@ -1336,6 +1341,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
)))]
pub fn try_lock(&self) -> Result<(), TryLockError> {
@@ -1351,6 +1357,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
))]
pub fn try_lock_shared(&self) -> Result<(), TryLockError> {
@@ -1372,6 +1379,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
)))]
pub fn try_lock_shared(&self) -> Result<(), TryLockError> {
@@ -1387,6 +1395,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
))]
pub fn unlock(&self) -> io::Result<()> {
@@ -1400,6 +1409,7 @@ impl File {
target_os = "linux",
target_os = "netbsd",
target_os = "openbsd",
+ target_os = "cygwin",
target_vendor = "apple",
)))]
pub fn unlock(&self) -> io::Result<()> {
diff --git a/library/std/src/sys/pal/unix/mod.rs b/library/std/src/sys/pal/unix/mod.rs
index fede3673eb6e8..aef7ab55088d2 100644
--- a/library/std/src/sys/pal/unix/mod.rs
+++ b/library/std/src/sys/pal/unix/mod.rs
@@ -59,6 +59,30 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
}
unsafe fn sanitize_standard_fds() {
+ #[allow(dead_code, unused_variables, unused_mut)]
+ let mut opened_devnull = -1;
+ #[allow(dead_code, unused_variables, unused_mut)]
+ let mut open_devnull = || {
+ #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
+ use libc::open;
+ #[cfg(all(target_os = "linux", target_env = "gnu"))]
+ use libc::open64 as open;
+
+ if opened_devnull != -1 {
+ if libc::dup(opened_devnull) != -1 {
+ return;
+ }
+ }
+ opened_devnull = open(c"/dev/null".as_ptr(), libc::O_RDWR, 0);
+ if opened_devnull == -1 {
+ // If the stream is closed but we failed to reopen it, abort the
+ // process. Otherwise we wouldn't preserve the safety of
+ // operations on the corresponding Rust object Stdin, Stdout, or
+ // Stderr.
+ libc::abort();
+ }
+ };
+
// fast path with a single syscall for systems with poll()
#[cfg(not(any(
miri,
@@ -74,11 +98,6 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
target_vendor = "apple",
)))]
'poll: {
- #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
- use libc::open as open64;
- #[cfg(all(target_os = "linux", target_env = "gnu"))]
- use libc::open64;
-
use crate::sys::os::errno;
let pfds: &mut [_] = &mut [
libc::pollfd { fd: 0, events: 0, revents: 0 },
@@ -106,13 +125,7 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
if pfd.revents & libc::POLLNVAL == 0 {
continue;
}
- if open64(c"/dev/null".as_ptr(), libc::O_RDWR, 0) == -1 {
- // If the stream is closed but we failed to reopen it, abort the
- // process. Otherwise we wouldn't preserve the safety of
- // operations on the corresponding Rust object Stdin, Stdout, or
- // Stderr.
- libc::abort();
- }
+ open_devnull();
}
return;
}
@@ -129,21 +142,10 @@ pub unsafe fn init(argc: isize, argv: *const *const u8, sigpipe: u8) {
target_os = "vita",
)))]
{
- #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
- use libc::open as open64;
- #[cfg(all(target_os = "linux", target_env = "gnu"))]
- use libc::open64;
-
use crate::sys::os::errno;
for fd in 0..3 {
if libc::fcntl(fd, libc::F_GETFD) == -1 && errno() == libc::EBADF {
- if open64(c"/dev/null".as_ptr(), libc::O_RDWR, 0) == -1 {
- // If the stream is closed but we failed to reopen it, abort the
- // process. Otherwise we wouldn't preserve the safety of
- // operations on the corresponding Rust object Stdin, Stdout, or
- // Stderr.
- libc::abort();
- }
+ open_devnull();
}
}
}
diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs
index 0ad014ccd3e2e..797feeb2bbb5f 100644
--- a/library/std/src/thread/local.rs
+++ b/library/std/src/thread/local.rs
@@ -8,8 +8,8 @@ use crate::fmt;
/// A thread local storage (TLS) key which owns its contents.
///
-/// This key uses the fastest possible implementation available to it for the
-/// target platform. It is instantiated with the [`thread_local!`] macro and the
+/// This key uses the fastest implementation available on the target platform.
+/// It is instantiated with the [`thread_local!`] macro and the
/// primary method is the [`with`] method, though there are helpers to make
/// working with [`Cell`] types easier.
///
@@ -24,10 +24,10 @@ use crate::fmt;
/// [`with`]) within a thread, and values that implement [`Drop`] get
/// destructed when a thread exits. Some platform-specific caveats apply, which
/// are explained below.
-/// Note that, should the destructor panics, the whole process will be [aborted].
+/// Note that if the destructor panics, the whole process will be [aborted].
///
/// A `LocalKey`'s initializer cannot recursively depend on itself. Using a
-/// `LocalKey` in this way may cause panics, aborts or infinite recursion on
+/// `LocalKey` in this way may cause panics, aborts, or infinite recursion on
/// the first call to `with`.
///
/// [aborted]: crate::process::abort
diff --git a/library/std/tests/sync/lazy_lock.rs b/library/std/tests/sync/lazy_lock.rs
index 6c14b79f2ce7c..68aeea834b4fa 100644
--- a/library/std/tests/sync/lazy_lock.rs
+++ b/library/std/tests/sync/lazy_lock.rs
@@ -33,16 +33,6 @@ fn lazy_default() {
assert_eq!(CALLED.load(SeqCst), 1);
}
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn lazy_poisoning() {
- let x: LazyCell = LazyCell::new(|| panic!("kaboom"));
- for _ in 0..2 {
- let res = panic::catch_unwind(panic::AssertUnwindSafe(|| x.len()));
- assert!(res.is_err());
- }
-}
-
#[test]
#[cfg_attr(any(target_os = "emscripten", target_os = "wasi"), ignore)] // no threads
fn sync_lazy_new() {
@@ -123,16 +113,6 @@ fn static_sync_lazy_via_fn() {
assert_eq!(xs(), &vec![1, 2, 3]);
}
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn sync_lazy_poisoning() {
- let x: LazyLock = LazyLock::new(|| panic!("kaboom"));
- for _ in 0..2 {
- let res = panic::catch_unwind(|| x.len());
- assert!(res.is_err());
- }
-}
-
// Check that we can infer `T` from closure's type.
#[test]
fn lazy_type_inference() {
@@ -145,17 +125,6 @@ fn is_sync_send() {
assert_traits::>();
}
-#[test]
-#[should_panic = "has previously been poisoned"]
-fn lazy_force_mut_panic() {
- let mut lazy = LazyLock::::new(|| panic!());
- panic::catch_unwind(panic::AssertUnwindSafe(|| {
- let _ = LazyLock::force_mut(&mut lazy);
- }))
- .unwrap_err();
- let _ = &*lazy;
-}
-
#[test]
fn lazy_force_mut() {
let s = "abc".to_owned();
@@ -165,3 +134,56 @@ fn lazy_force_mut() {
p.clear();
LazyLock::force_mut(&mut lazy);
}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn lazy_poisoning() {
+ let x: LazyCell = LazyCell::new(|| panic!("kaboom"));
+ for _ in 0..2 {
+ let res = panic::catch_unwind(panic::AssertUnwindSafe(|| x.len()));
+ assert!(res.is_err());
+ }
+}
+
+/// Verifies that when a `LazyLock` is poisoned, it panics with the correct error message ("LazyLock
+/// instance has previously been poisoned") instead of the underlying `Once` error message.
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+#[should_panic(expected = "LazyLock instance has previously been poisoned")]
+fn lazy_lock_deref_panic() {
+ let lazy: LazyLock = LazyLock::new(|| panic!("initialization failed"));
+
+ // First access will panic during initialization.
+ let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ let _ = &*lazy;
+ }));
+
+ // Second access should panic with the poisoned message.
+ let _ = &*lazy;
+}
+
+#[test]
+#[should_panic(expected = "LazyLock instance has previously been poisoned")]
+fn lazy_lock_deref_mut_panic() {
+ let mut lazy: LazyLock = LazyLock::new(|| panic!("initialization failed"));
+
+ // First access will panic during initialization.
+ let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ let _ = LazyLock::force_mut(&mut lazy);
+ }));
+
+ // Second access should panic with the poisoned message.
+ let _ = &*lazy;
+}
+
+/// Verifies that when the initialization closure panics with a custom message, that message is
+/// preserved and not overridden by `LazyLock`.
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+#[should_panic(expected = "custom panic message from closure")]
+fn lazy_lock_preserves_closure_panic_message() {
+ let lazy: LazyLock = LazyLock::new(|| panic!("custom panic message from closure"));
+
+ // This should panic with the original message from the closure.
+ let _ = &*lazy;
+}
diff --git a/library/std/tests/sync/lib.rs b/library/std/tests/sync/lib.rs
index 94f1fe96b6a26..f874c2ba38951 100644
--- a/library/std/tests/sync/lib.rs
+++ b/library/std/tests/sync/lib.rs
@@ -8,6 +8,7 @@
#![feature(std_internals)]
#![feature(sync_nonpoison)]
#![feature(nonpoison_mutex)]
+#![feature(nonpoison_rwlock)]
#![allow(internal_features)]
#![feature(macro_metavar_expr_concat)] // For concatenating identifiers in macros.
diff --git a/library/std/tests/sync/rwlock.rs b/library/std/tests/sync/rwlock.rs
index 1d55a1769483a..eca15d2a4adb8 100644
--- a/library/std/tests/sync/rwlock.rs
+++ b/library/std/tests/sync/rwlock.rs
@@ -29,239 +29,457 @@ fn test_needs_drop() {
assert!(mem::needs_drop::());
}
-#[derive(Clone, Eq, PartialEq, Debug)]
-struct Cloneable(i32);
-
-#[test]
-fn smoke() {
- let l = RwLock::new(());
- drop(l.read().unwrap());
- drop(l.write().unwrap());
- drop((l.read().unwrap(), l.read().unwrap()));
- drop(l.write().unwrap());
-}
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Non-poison & Poison Tests
+////////////////////////////////////////////////////////////////////////////////////////////////////
+use super::nonpoison_and_poison_unwrap_test;
+
+nonpoison_and_poison_unwrap_test!(
+ name: smoke,
+ test_body: {
+ use locks::RwLock;
+
+ let l = RwLock::new(());
+ drop(maybe_unwrap(l.read()));
+ drop(maybe_unwrap(l.write()));
+ drop((maybe_unwrap(l.read()), maybe_unwrap(l.read())));
+ drop(maybe_unwrap(l.write()));
+ }
+);
-#[test]
// FIXME: On macOS we use a provenance-incorrect implementation and Miri
// catches that issue with a chance of around 1/1000.
// See for details.
#[cfg_attr(all(miri, target_os = "macos"), ignore)]
-fn frob() {
- const N: u32 = 10;
- const M: usize = if cfg!(miri) { 100 } else { 1000 };
+nonpoison_and_poison_unwrap_test!(
+ name: frob,
+ test_body: {
+ use locks::RwLock;
- let r = Arc::new(RwLock::new(()));
+ const N: u32 = 10;
+ const M: usize = if cfg!(miri) { 100 } else { 1000 };
- let (tx, rx) = channel::<()>();
- for _ in 0..N {
- let tx = tx.clone();
- let r = r.clone();
- thread::spawn(move || {
- let mut rng = crate::common::test_rng();
- for _ in 0..M {
- if rng.random_bool(1.0 / (N as f64)) {
- drop(r.write().unwrap());
- } else {
- drop(r.read().unwrap());
+ let r = Arc::new(RwLock::new(()));
+
+ let (tx, rx) = channel::<()>();
+ for _ in 0..N {
+ let tx = tx.clone();
+ let r = r.clone();
+ thread::spawn(move || {
+ let mut rng = crate::common::test_rng();
+ for _ in 0..M {
+ if rng.random_bool(1.0 / (N as f64)) {
+ drop(maybe_unwrap(r.write()));
+ } else {
+ drop(maybe_unwrap(r.read()));
+ }
}
+ drop(tx);
+ });
+ }
+ drop(tx);
+ let _ = rx.recv();
+ }
+);
+
+nonpoison_and_poison_unwrap_test!(
+ name: test_rw_arc,
+ test_body: {
+ use locks::RwLock;
+
+ let arc = Arc::new(RwLock::new(0));
+ let arc2 = arc.clone();
+ let (tx, rx) = channel();
+
+ thread::spawn(move || {
+ let mut lock = maybe_unwrap(arc2.write());
+ for _ in 0..10 {
+ let tmp = *lock;
+ *lock = -1;
+ thread::yield_now();
+ *lock = tmp + 1;
}
- drop(tx);
+ tx.send(()).unwrap();
});
+
+ // Readers try to catch the writer in the act
+ let mut children = Vec::new();
+ for _ in 0..5 {
+ let arc3 = arc.clone();
+ children.push(thread::spawn(move || {
+ let lock = maybe_unwrap(arc3.read());
+ assert!(*lock >= 0);
+ }));
+ }
+
+ // Wait for children to pass their asserts
+ for r in children {
+ assert!(r.join().is_ok());
+ }
+
+ // Wait for writer to finish
+ rx.recv().unwrap();
+ let lock = maybe_unwrap(arc.read());
+ assert_eq!(*lock, 10);
}
- drop(tx);
- let _ = rx.recv();
-}
+);
-#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_poison_wr() {
- let arc = Arc::new(RwLock::new(1));
- let arc2 = arc.clone();
- let _: Result<(), _> = thread::spawn(move || {
- let _lock = arc2.write().unwrap();
- panic!();
- })
- .join();
- assert!(arc.read().is_err());
-}
+nonpoison_and_poison_unwrap_test!(
+ name: test_rw_arc_access_in_unwind,
+ test_body: {
+ use locks::RwLock;
+
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _ = thread::spawn(move || -> () {
+ struct Unwinder {
+ i: Arc>,
+ }
+ impl Drop for Unwinder {
+ fn drop(&mut self) {
+ let mut lock = maybe_unwrap(self.i.write());
+ *lock += 1;
+ }
+ }
+ let _u = Unwinder { i: arc2 };
+ panic!();
+ })
+ .join();
+ let lock = maybe_unwrap(arc.read());
+ assert_eq!(*lock, 2);
+ }
+);
+
+nonpoison_and_poison_unwrap_test!(
+ name: test_rwlock_unsized,
+ test_body: {
+ use locks::RwLock;
+
+ let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
+ {
+ let b = &mut *maybe_unwrap(rw.write());
+ b[0] = 4;
+ b[2] = 5;
+ }
+ let comp: &[i32] = &[4, 2, 5];
+ assert_eq!(&*maybe_unwrap(rw.read()), comp);
+ }
+);
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_poison_mapped_w_r() {
- let arc = Arc::new(RwLock::new(1));
- let arc2 = arc.clone();
- let _: Result<(), _> = thread::spawn(move || {
- let lock = arc2.write().unwrap();
- let _lock = RwLockWriteGuard::map(lock, |val| val);
- panic!();
- })
- .join();
- assert!(arc.read().is_err());
-}
+nonpoison_and_poison_unwrap_test!(
+ name: test_into_inner,
+ test_body: {
+ use locks::RwLock;
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_poison_ww() {
- let arc = Arc::new(RwLock::new(1));
- assert!(!arc.is_poisoned());
- let arc2 = arc.clone();
- let _: Result<(), _> = thread::spawn(move || {
- let _lock = arc2.write().unwrap();
- panic!();
- })
- .join();
- assert!(arc.write().is_err());
- assert!(arc.is_poisoned());
-}
+ let m = RwLock::new(NonCopy(10));
+ assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(10));
+ }
+);
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_poison_mapped_w_w() {
- let arc = Arc::new(RwLock::new(1));
- let arc2 = arc.clone();
- let _: Result<(), _> = thread::spawn(move || {
- let lock = arc2.write().unwrap();
- let _lock = RwLockWriteGuard::map(lock, |val| val);
- panic!();
- })
- .join();
- assert!(arc.write().is_err());
- assert!(arc.is_poisoned());
-}
+nonpoison_and_poison_unwrap_test!(
+ name: test_into_inner_drop,
+ test_body: {
+ use locks::RwLock;
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_no_poison_rr() {
- let arc = Arc::new(RwLock::new(1));
- let arc2 = arc.clone();
- let _: Result<(), _> = thread::spawn(move || {
- let _lock = arc2.read().unwrap();
- panic!();
- })
- .join();
- let lock = arc.read().unwrap();
- assert_eq!(*lock, 1);
-}
+ struct Foo(Arc);
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Ordering::SeqCst);
+ }
+ }
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_no_poison_mapped_r_r() {
- let arc = Arc::new(RwLock::new(1));
- let arc2 = arc.clone();
- let _: Result<(), _> = thread::spawn(move || {
- let lock = arc2.read().unwrap();
- let _lock = RwLockReadGuard::map(lock, |val| val);
- panic!();
- })
- .join();
- let lock = arc.read().unwrap();
- assert_eq!(*lock, 1);
-}
+ let num_drops = Arc::new(AtomicUsize::new(0));
+ let m = RwLock::new(Foo(num_drops.clone()));
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ {
+ let _inner = maybe_unwrap(m.into_inner());
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ }
+ assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+ }
+);
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_no_poison_rw() {
- let arc = Arc::new(RwLock::new(1));
- let arc2 = arc.clone();
- let _: Result<(), _> = thread::spawn(move || {
- let _lock = arc2.read().unwrap();
- panic!()
- })
- .join();
- let lock = arc.write().unwrap();
- assert_eq!(*lock, 1);
-}
+nonpoison_and_poison_unwrap_test!(
+ name: test_get_cloned,
+ test_body: {
+ use locks::RwLock;
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_no_poison_mapped_r_w() {
- let arc = Arc::new(RwLock::new(1));
- let arc2 = arc.clone();
- let _: Result<(), _> = thread::spawn(move || {
- let lock = arc2.read().unwrap();
- let _lock = RwLockReadGuard::map(lock, |val| val);
- panic!();
- })
- .join();
- let lock = arc.write().unwrap();
- assert_eq!(*lock, 1);
-}
+ #[derive(Clone, Eq, PartialEq, Debug)]
+ struct Cloneable(i32);
-#[test]
-fn test_rw_arc() {
- let arc = Arc::new(RwLock::new(0));
- let arc2 = arc.clone();
- let (tx, rx) = channel();
-
- thread::spawn(move || {
- let mut lock = arc2.write().unwrap();
- for _ in 0..10 {
- let tmp = *lock;
- *lock = -1;
- thread::yield_now();
- *lock = tmp + 1;
+ let m = RwLock::new(Cloneable(10));
+
+ assert_eq!(maybe_unwrap(m.get_cloned()), Cloneable(10));
+ }
+);
+
+nonpoison_and_poison_unwrap_test!(
+ name: test_get_mut,
+ test_body: {
+ use locks::RwLock;
+
+ let mut m = RwLock::new(NonCopy(10));
+ *maybe_unwrap(m.get_mut()) = NonCopy(20);
+ assert_eq!(maybe_unwrap(m.into_inner()), NonCopy(20));
+ }
+);
+
+nonpoison_and_poison_unwrap_test!(
+ name: test_set,
+ test_body: {
+ use locks::RwLock;
+
+ fn inner(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
+ where
+ T: Debug + Eq,
+ {
+ let m = RwLock::new(init());
+
+ assert_eq!(*maybe_unwrap(m.read()), init());
+ maybe_unwrap(m.set(value()));
+ assert_eq!(*maybe_unwrap(m.read()), value());
+ }
+
+ inner(|| NonCopy(10), || NonCopy(20));
+ inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
+ }
+);
+
+nonpoison_and_poison_unwrap_test!(
+ name: test_replace,
+ test_body: {
+ use locks::RwLock;
+
+ fn inner(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
+ where
+ T: Debug + Eq,
+ {
+ let m = RwLock::new(init());
+
+ assert_eq!(*maybe_unwrap(m.read()), init());
+ assert_eq!(maybe_unwrap(m.replace(value())), init());
+ assert_eq!(*maybe_unwrap(m.read()), value());
}
- tx.send(()).unwrap();
- });
- // Readers try to catch the writer in the act
- let mut children = Vec::new();
- for _ in 0..5 {
- let arc3 = arc.clone();
- children.push(thread::spawn(move || {
- let lock = arc3.read().unwrap();
- assert!(*lock >= 0);
- }));
+ inner(|| NonCopy(10), || NonCopy(20));
+ inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
+ }
+);
+
+nonpoison_and_poison_unwrap_test!(
+ name: test_read_guard_covariance,
+ test_body: {
+ use locks::{RwLock, RwLockReadGuard};
+
+ fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
+ let j: i32 = 5;
+ let lock = RwLock::new(&j);
+ {
+ let i = 6;
+ do_stuff(maybe_unwrap(lock.read()), &i);
+ }
+ drop(lock);
}
+);
+
+nonpoison_and_poison_unwrap_test!(
+ name: test_mapped_read_guard_covariance,
+ test_body: {
+ use locks::{RwLock, RwLockReadGuard, MappedRwLockReadGuard};
+
+ fn do_stuff<'a>(_: MappedRwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
+ let j: i32 = 5;
+ let lock = RwLock::new((&j, &j));
+ {
+ let i = 6;
+ let guard = maybe_unwrap(lock.read());
+ let guard = RwLockReadGuard::map(guard, |(val, _val)| val);
+ do_stuff(guard, &i);
+ }
+ drop(lock);
+ }
+);
+
+nonpoison_and_poison_unwrap_test!(
+ name: test_downgrade_basic,
+ test_body: {
+ use locks::{RwLock, RwLockWriteGuard};
+
+ let r = RwLock::new(());
- // Wait for children to pass their asserts
- for r in children {
- assert!(r.join().is_ok());
+ let write_guard = maybe_unwrap(r.write());
+ let _read_guard = RwLockWriteGuard::downgrade(write_guard);
}
+);
- // Wait for writer to finish
- rx.recv().unwrap();
- let lock = arc.read().unwrap();
- assert_eq!(*lock, 10);
-}
+// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
+// See for details.
+#[cfg_attr(all(miri, target_os = "macos"), ignore)]
+nonpoison_and_poison_unwrap_test!(
+ name: test_downgrade_observe,
+ test_body: {
+ use locks::{RwLock, RwLockWriteGuard};
+
+ // Inspired by the test `test_rwlock_downgrade` from:
+ // https://github.com/Amanieu/parking_lot/blob/master/src/rwlock.rs
+
+ const W: usize = 20;
+ const N: usize = if cfg!(miri) { 40 } else { 100 };
+
+ // This test spawns `W` writer threads, where each will increment a counter `N` times,
+ // ensuring that the value they wrote has not changed after downgrading.
+
+ let rw = Arc::new(RwLock::new(0));
+
+ // Spawn the writers that will do `W * N` operations and checks.
+ let handles: Vec<_> = (0..W)
+ .map(|_| {
+ let rw = rw.clone();
+ thread::spawn(move || {
+ for _ in 0..N {
+ // Increment the counter.
+ let mut write_guard = maybe_unwrap(rw.write());
+ *write_guard += 1;
+ let cur_val = *write_guard;
+
+ // Downgrade the lock to read mode, where the value protected cannot be
+ // modified.
+ let read_guard = RwLockWriteGuard::downgrade(write_guard);
+ assert_eq!(cur_val, *read_guard);
+ }
+ })
+ })
+ .collect();
-#[test]
-#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
-fn test_rw_arc_access_in_unwind() {
- let arc = Arc::new(RwLock::new(1));
- let arc2 = arc.clone();
- let _ = thread::spawn(move || -> () {
- struct Unwinder {
- i: Arc>,
+ for handle in handles {
+ handle.join().unwrap();
}
- impl Drop for Unwinder {
- fn drop(&mut self) {
- let mut lock = self.i.write().unwrap();
- *lock += 1;
- }
+
+ assert_eq!(*maybe_unwrap(rw.read()), W * N);
+ }
+);
+
+// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
+// See for details.
+#[cfg_attr(all(miri, target_os = "macos"), ignore)]
+nonpoison_and_poison_unwrap_test!(
+ name: test_downgrade_atomic,
+ test_body: {
+ use locks::{RwLock, RwLockWriteGuard};
+
+ const NEW_VALUE: i32 = -1;
+
+ // This test checks that `downgrade` is atomic, meaning as soon as a write lock has been
+ // downgraded, the lock must be in read mode and no other threads can take the write lock to
+ // modify the protected value.
+
+ // `W` is the number of evil writer threads.
+ const W: usize = 20;
+ let rwlock = Arc::new(RwLock::new(0));
+
+ // Spawns many evil writer threads that will try and write to the locked value before the
+ // initial writer (who has the exclusive lock) can read after it downgrades.
+ // If the `RwLock` behaves correctly, then the initial writer should read the value it wrote
+ // itself as no other thread should be able to mutate the protected value.
+
+ // Put the lock in write mode, causing all future threads trying to access this go to sleep.
+ let mut main_write_guard = maybe_unwrap(rwlock.write());
+
+ // Spawn all of the evil writer threads. They will each increment the protected value by 1.
+ let handles: Vec<_> = (0..W)
+ .map(|_| {
+ let rwlock = rwlock.clone();
+ thread::spawn(move || {
+ // Will go to sleep since the main thread initially has the write lock.
+ let mut evil_guard = maybe_unwrap(rwlock.write());
+ *evil_guard += 1;
+ })
+ })
+ .collect();
+
+ // Wait for a good amount of time so that evil threads go to sleep.
+ // Note: this is not strictly necessary...
+ let eternity = std::time::Duration::from_millis(42);
+ thread::sleep(eternity);
+
+ // Once everyone is asleep, set the value to `NEW_VALUE`.
+ *main_write_guard = NEW_VALUE;
+
+ // Atomically downgrade the write guard into a read guard.
+ let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
+
+ // If the above is not atomic, then it would be possible for an evil thread to get in front
+ // of this read and change the value to be non-negative.
+ assert_eq!(*main_read_guard, NEW_VALUE, "`downgrade` was not atomic");
+
+ // Drop the main read guard and allow the evil writer threads to start incrementing.
+ drop(main_read_guard);
+
+ for handle in handles {
+ handle.join().unwrap();
}
- let _u = Unwinder { i: arc2 };
- panic!();
- })
- .join();
- let lock = arc.read().unwrap();
- assert_eq!(*lock, 2);
-}
+
+ let final_check = maybe_unwrap(rwlock.read());
+ assert_eq!(*final_check, W as i32 + NEW_VALUE);
+ }
+);
+
+nonpoison_and_poison_unwrap_test!(
+ name: test_mapping_mapped_guard,
+ test_body: {
+ use locks::{
+ RwLock, RwLockReadGuard, RwLockWriteGuard, MappedRwLockReadGuard, MappedRwLockWriteGuard
+ };
+
+ let arr = [0; 4];
+ let mut lock = RwLock::new(arr);
+ let guard = maybe_unwrap(lock.write());
+ let guard = RwLockWriteGuard::map(guard, |arr| &mut arr[..2]);
+ let mut guard = MappedRwLockWriteGuard::map(guard, |slice| &mut slice[1..]);
+ assert_eq!(guard.len(), 1);
+ guard[0] = 42;
+ drop(guard);
+ assert_eq!(*maybe_unwrap(lock.get_mut()), [0, 42, 0, 0]);
+
+ let guard = maybe_unwrap(lock.read());
+ let guard = RwLockReadGuard::map(guard, |arr| &arr[..2]);
+ let guard = MappedRwLockReadGuard::map(guard, |slice| &slice[1..]);
+ assert_eq!(*guard, [42]);
+ drop(guard);
+ assert_eq!(*maybe_unwrap(lock.get_mut()), [0, 42, 0, 0]);
+ }
+);
#[test]
-fn test_rwlock_unsized() {
- let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
- {
- let b = &mut *rw.write().unwrap();
- b[0] = 4;
- b[2] = 5;
+fn nonpoison_test_rwlock_try_write() {
+ use std::sync::nonpoison::{RwLock, RwLockReadGuard, WouldBlock};
+
+ let lock = RwLock::new(0isize);
+ let read_guard = lock.read();
+
+ let write_result = lock.try_write();
+ match write_result {
+ Err(WouldBlock) => (),
+ Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
+ }
+
+ drop(read_guard);
+ let mapped_read_guard = RwLockReadGuard::map(lock.read(), |_| &());
+
+ let write_result = lock.try_write();
+ match write_result {
+ Err(WouldBlock) => (),
+ Ok(_) => assert!(false, "try_write should not succeed while mapped_read_guard is in scope"),
}
- let comp: &[i32] = &[4, 2, 5];
- assert_eq!(&*rw.read().unwrap(), comp);
+
+ drop(mapped_read_guard);
}
#[test]
-fn test_rwlock_try_write() {
+fn poison_test_rwlock_try_write() {
+ use std::sync::poison::{RwLock, RwLockReadGuard, TryLockError};
+
let lock = RwLock::new(0isize);
let read_guard = lock.read().unwrap();
@@ -285,6 +503,11 @@ fn test_rwlock_try_write() {
drop(mapped_read_guard);
}
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Poison Tests
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/// Creates a rwlock that is immediately poisoned.
fn new_poisoned_rwlock(value: T) -> RwLock {
let lock = RwLock::new(value);
@@ -300,30 +523,6 @@ fn new_poisoned_rwlock(value: T) -> RwLock {
lock
}
-#[test]
-fn test_into_inner() {
- let m = RwLock::new(NonCopy(10));
- assert_eq!(m.into_inner().unwrap(), NonCopy(10));
-}
-
-#[test]
-fn test_into_inner_drop() {
- struct Foo(Arc);
- impl Drop for Foo {
- fn drop(&mut self) {
- self.0.fetch_add(1, Ordering::SeqCst);
- }
- }
- let num_drops = Arc::new(AtomicUsize::new(0));
- let m = RwLock::new(Foo(num_drops.clone()));
- assert_eq!(num_drops.load(Ordering::SeqCst), 0);
- {
- let _inner = m.into_inner().unwrap();
- assert_eq!(num_drops.load(Ordering::SeqCst), 0);
- }
- assert_eq!(num_drops.load(Ordering::SeqCst), 1);
-}
-
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_into_inner_poison() {
@@ -335,16 +534,12 @@ fn test_into_inner_poison() {
}
}
-#[test]
-fn test_get_cloned() {
- let m = RwLock::new(Cloneable(10));
-
- assert_eq!(m.get_cloned().unwrap(), Cloneable(10));
-}
-
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_get_cloned_poison() {
+ #[derive(Clone, Eq, PartialEq, Debug)]
+ struct Cloneable(i32);
+
let m = new_poisoned_rwlock(Cloneable(10));
match m.get_cloned() {
@@ -353,13 +548,6 @@ fn test_get_cloned_poison() {
}
}
-#[test]
-fn test_get_mut() {
- let mut m = RwLock::new(NonCopy(10));
- *m.get_mut().unwrap() = NonCopy(20);
- assert_eq!(m.into_inner().unwrap(), NonCopy(20));
-}
-
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_get_mut_poison() {
@@ -371,23 +559,6 @@ fn test_get_mut_poison() {
}
}
-#[test]
-fn test_set() {
- fn inner(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
- where
- T: Debug + Eq,
- {
- let m = RwLock::new(init());
-
- assert_eq!(*m.read().unwrap(), init());
- m.set(value()).unwrap();
- assert_eq!(*m.read().unwrap(), value());
- }
-
- inner(|| NonCopy(10), || NonCopy(20));
- inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
-}
-
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_set_poison() {
@@ -410,23 +581,6 @@ fn test_set_poison() {
inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
}
-#[test]
-fn test_replace() {
- fn inner(mut init: impl FnMut() -> T, mut value: impl FnMut() -> T)
- where
- T: Debug + Eq,
- {
- let m = RwLock::new(init());
-
- assert_eq!(*m.read().unwrap(), init());
- assert_eq!(m.replace(value()).unwrap(), init());
- assert_eq!(*m.read().unwrap(), value());
- }
-
- inner(|| NonCopy(10), || NonCopy(20));
- inner(|| NonCopyNeedsDrop(10), || NonCopyNeedsDrop(20));
-}
-
#[test]
#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
fn test_replace_poison() {
@@ -450,49 +604,118 @@ fn test_replace_poison() {
}
#[test]
-fn test_read_guard_covariance() {
- fn do_stuff<'a>(_: RwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
- let j: i32 = 5;
- let lock = RwLock::new(&j);
- {
- let i = 6;
- do_stuff(lock.read().unwrap(), &i);
- }
- drop(lock);
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_poison_wr() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let _lock = arc2.write().unwrap();
+ panic!();
+ })
+ .join();
+ assert!(arc.read().is_err());
}
#[test]
-fn test_mapped_read_guard_covariance() {
- fn do_stuff<'a>(_: MappedRwLockReadGuard<'_, &'a i32>, _: &'a i32) {}
- let j: i32 = 5;
- let lock = RwLock::new((&j, &j));
- {
- let i = 6;
- let guard = lock.read().unwrap();
- let guard = RwLockReadGuard::map(guard, |(val, _val)| val);
- do_stuff(guard, &i);
- }
- drop(lock);
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_poison_mapped_w_r() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let lock = arc2.write().unwrap();
+ let _lock = RwLockWriteGuard::map(lock, |val| val);
+ panic!();
+ })
+ .join();
+ assert!(arc.read().is_err());
}
#[test]
-fn test_mapping_mapped_guard() {
- let arr = [0; 4];
- let mut lock = RwLock::new(arr);
- let guard = lock.write().unwrap();
- let guard = RwLockWriteGuard::map(guard, |arr| &mut arr[..2]);
- let mut guard = MappedRwLockWriteGuard::map(guard, |slice| &mut slice[1..]);
- assert_eq!(guard.len(), 1);
- guard[0] = 42;
- drop(guard);
- assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]);
-
- let guard = lock.read().unwrap();
- let guard = RwLockReadGuard::map(guard, |arr| &arr[..2]);
- let guard = MappedRwLockReadGuard::map(guard, |slice| &slice[1..]);
- assert_eq!(*guard, [42]);
- drop(guard);
- assert_eq!(*lock.get_mut().unwrap(), [0, 42, 0, 0]);
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_poison_ww() {
+ let arc = Arc::new(RwLock::new(1));
+ assert!(!arc.is_poisoned());
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let _lock = arc2.write().unwrap();
+ panic!();
+ })
+ .join();
+ assert!(arc.write().is_err());
+ assert!(arc.is_poisoned());
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_poison_mapped_w_w() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let lock = arc2.write().unwrap();
+ let _lock = RwLockWriteGuard::map(lock, |val| val);
+ panic!();
+ })
+ .join();
+ assert!(arc.write().is_err());
+ assert!(arc.is_poisoned());
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_no_poison_rr() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let _lock = arc2.read().unwrap();
+ panic!();
+ })
+ .join();
+ let lock = arc.read().unwrap();
+ assert_eq!(*lock, 1);
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_no_poison_mapped_r_r() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let lock = arc2.read().unwrap();
+ let _lock = RwLockReadGuard::map(lock, |val| val);
+ panic!();
+ })
+ .join();
+ let lock = arc.read().unwrap();
+ assert_eq!(*lock, 1);
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_no_poison_rw() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let _lock = arc2.read().unwrap();
+ panic!()
+ })
+ .join();
+ let lock = arc.write().unwrap();
+ assert_eq!(*lock, 1);
+}
+
+#[test]
+#[cfg_attr(not(panic = "unwind"), ignore = "test requires unwinding support")]
+fn test_rw_arc_no_poison_mapped_r_w() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _: Result<(), _> = thread::spawn(move || {
+ let lock = arc2.read().unwrap();
+ let _lock = RwLockReadGuard::map(lock, |val| val);
+ panic!();
+ })
+ .join();
+ let lock = arc.write().unwrap();
+ assert_eq!(*lock, 1);
}
#[test]
@@ -638,114 +861,3 @@ fn panic_while_mapping_write_unlocked_poison() {
drop(lock);
}
-
-#[test]
-fn test_downgrade_basic() {
- let r = RwLock::new(());
-
- let write_guard = r.write().unwrap();
- let _read_guard = RwLockWriteGuard::downgrade(write_guard);
-}
-
-#[test]
-// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
-// See for details.
-#[cfg_attr(all(miri, target_os = "macos"), ignore)]
-fn test_downgrade_observe() {
- // Taken from the test `test_rwlock_downgrade` from:
- // https://github.com/Amanieu/parking_lot/blob/master/src/rwlock.rs
-
- const W: usize = 20;
- const N: usize = if cfg!(miri) { 40 } else { 100 };
-
- // This test spawns `W` writer threads, where each will increment a counter `N` times, ensuring
- // that the value they wrote has not changed after downgrading.
-
- let rw = Arc::new(RwLock::new(0));
-
- // Spawn the writers that will do `W * N` operations and checks.
- let handles: Vec<_> = (0..W)
- .map(|_| {
- let rw = rw.clone();
- thread::spawn(move || {
- for _ in 0..N {
- // Increment the counter.
- let mut write_guard = rw.write().unwrap();
- *write_guard += 1;
- let cur_val = *write_guard;
-
- // Downgrade the lock to read mode, where the value protected cannot be modified.
- let read_guard = RwLockWriteGuard::downgrade(write_guard);
- assert_eq!(cur_val, *read_guard);
- }
- })
- })
- .collect();
-
- for handle in handles {
- handle.join().unwrap();
- }
-
- assert_eq!(*rw.read().unwrap(), W * N);
-}
-
-#[test]
-// FIXME: On macOS we use a provenance-incorrect implementation and Miri catches that issue.
-// See for details.
-#[cfg_attr(all(miri, target_os = "macos"), ignore)]
-fn test_downgrade_atomic() {
- const NEW_VALUE: i32 = -1;
-
- // This test checks that `downgrade` is atomic, meaning as soon as a write lock has been
- // downgraded, the lock must be in read mode and no other threads can take the write lock to
- // modify the protected value.
-
- // `W` is the number of evil writer threads.
- const W: usize = 20;
- let rwlock = Arc::new(RwLock::new(0));
-
- // Spawns many evil writer threads that will try and write to the locked value before the
- // initial writer (who has the exclusive lock) can read after it downgrades.
- // If the `RwLock` behaves correctly, then the initial writer should read the value it wrote
- // itself as no other thread should be able to mutate the protected value.
-
- // Put the lock in write mode, causing all future threads trying to access this go to sleep.
- let mut main_write_guard = rwlock.write().unwrap();
-
- // Spawn all of the evil writer threads. They will each increment the protected value by 1.
- let handles: Vec<_> = (0..W)
- .map(|_| {
- let rwlock = rwlock.clone();
- thread::spawn(move || {
- // Will go to sleep since the main thread initially has the write lock.
- let mut evil_guard = rwlock.write().unwrap();
- *evil_guard += 1;
- })
- })
- .collect();
-
- // Wait for a good amount of time so that evil threads go to sleep.
- // Note: this is not strictly necessary...
- let eternity = std::time::Duration::from_millis(42);
- thread::sleep(eternity);
-
- // Once everyone is asleep, set the value to `NEW_VALUE`.
- *main_write_guard = NEW_VALUE;
-
- // Atomically downgrade the write guard into a read guard.
- let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
-
- // If the above is not atomic, then it would be possible for an evil thread to get in front of
- // this read and change the value to be non-negative.
- assert_eq!(*main_read_guard, NEW_VALUE, "`downgrade` was not atomic");
-
- // Drop the main read guard and allow the evil writer threads to start incrementing.
- drop(main_read_guard);
-
- for handle in handles {
- handle.join().unwrap();
- }
-
- let final_check = rwlock.read().unwrap();
- assert_eq!(*final_check, W as i32 + NEW_VALUE);
-}
diff --git a/library/std_detect/Cargo.toml b/library/std_detect/Cargo.toml
index 33e6617c38147..2739bb5923009 100644
--- a/library/std_detect/Cargo.toml
+++ b/library/std_detect/Cargo.toml
@@ -21,8 +21,8 @@ is-it-maintained-open-issues = { repository = "rust-lang/stdarch" }
maintenance = { status = "experimental" }
[dependencies]
-core = { path = "../core" }
-alloc = { path = "../alloc" }
+core = { version = "1.0.0", package = 'rustc-std-workspace-core' }
+alloc = { version = "1.0.0", package = 'rustc-std-workspace-alloc' }
[target.'cfg(not(windows))'.dependencies]
libc = { version = "0.2.0", optional = true, default-features = false }
diff --git a/library/std_detect/src/detect/arch/riscv.rs b/library/std_detect/src/detect/arch/riscv.rs
index 1d21b1d485589..1e57d09edb143 100644
--- a/library/std_detect/src/detect/arch/riscv.rs
+++ b/library/std_detect/src/detect/arch/riscv.rs
@@ -37,90 +37,121 @@ features! {
///
/// # Unprivileged Specification
///
- /// The supported ratified RISC-V instruction sets are as follows:
+ /// The supported ratified RISC-V instruction sets are as follows (OS
+ /// columns denote runtime feature detection support with or without the
+ /// minimum supported version):
///
- /// * RV32E: `"rv32e"`
- /// * RV32I: `"rv32i"`
- /// * RV64I: `"rv64i"`
- /// * A: `"a"`
- /// * Zaamo: `"zaamo"`
- /// * Zalrsc: `"zalrsc"`
- /// * B: `"b"`
- /// * Zba: `"zba"`
- /// * Zbb: `"zbb"`
- /// * Zbs: `"zbs"`
- /// * C: `"c"`
- /// * Zca: `"zca"`
- /// * Zcd: `"zcd"` (if D is enabled)
- /// * Zcf: `"zcf"` (if F is enabled on RV32)
- /// * D: `"d"`
- /// * F: `"f"`
- /// * M: `"m"`
- /// * Q: `"q"`
- /// * V: `"v"`
- /// * Zve32x: `"zve32x"`
- /// * Zve32f: `"zve32f"`
- /// * Zve64x: `"zve64x"`
- /// * Zve64f: `"zve64f"`
- /// * Zve64d: `"zve64d"`
- /// * Zicbom: `"zicbom"`
- /// * Zicboz: `"zicboz"`
- /// * Zicntr: `"zicntr"`
- /// * Zicond: `"zicond"`
- /// * Zicsr: `"zicsr"`
- /// * Zifencei: `"zifencei"`
- /// * Zihintntl: `"zihintntl"`
- /// * Zihintpause: `"zihintpause"`
- /// * Zihpm: `"zihpm"`
- /// * Zimop: `"zimop"`
- /// * Zabha: `"zabha"`
- /// * Zacas: `"zacas"`
- /// * Zawrs: `"zawrs"`
- /// * Zfa: `"zfa"`
- /// * Zfbfmin: `"zfbfmin"`
- /// * Zfh: `"zfh"`
- /// * Zfhmin: `"zfhmin"`
- /// * Zfinx: `"zfinx"`
- /// * Zdinx: `"zdinx"`
- /// * Zhinx: `"zhinx"`
- /// * Zhinxmin: `"zhinxmin"`
- /// * Zcb: `"zcb"`
- /// * Zcmop: `"zcmop"`
- /// * Zbc: `"zbc"`
- /// * Zbkb: `"zbkb"`
- /// * Zbkc: `"zbkc"`
- /// * Zbkx: `"zbkx"`
- /// * Zk: `"zk"`
- /// * Zkn: `"zkn"`
- /// * Zknd: `"zknd"`
- /// * Zkne: `"zkne"`
- /// * Zknh: `"zknh"`
- /// * Zkr: `"zkr"`
- /// * Zks: `"zks"`
- /// * Zksed: `"zksed"`
- /// * Zksh: `"zksh"`
- /// * Zkt: `"zkt"`
- /// * Zvbb: `"zvbb"`
- /// * Zvbc: `"zvbc"`
- /// * Zvfbfmin: `"zvfbfmin"`
- /// * Zvfbfwma: `"zvfbfwma"`
- /// * Zvfh: `"zvfh"`
- /// * Zvfhmin: `"zvfhmin"`
- /// * Zvkb: `"zvkb"`
- /// * Zvkg: `"zvkg"`
- /// * Zvkn: `"zvkn"`
- /// * Zvkned: `"zvkned"`
- /// * Zvknha: `"zvknha"`
- /// * Zvknhb: `"zvknhb"`
- /// * Zvknc: `"zvknc"`
- /// * Zvkng: `"zvkng"`
- /// * Zvks: `"zvks"`
- /// * Zvksed: `"zvksed"`
- /// * Zvksh: `"zvksh"`
- /// * Zvksc: `"zvksc"`
- /// * Zvksg: `"zvksg"`
- /// * Zvkt: `"zvkt"`
- /// * Ztso: `"ztso"`
+ /// | Literal | Base | Linux |
+ /// |:---------- |:------- |:---------- |
+ /// | `"rv32e"` | RV32E | No |
+ /// | `"rv32i"` | RV32I | Yes [^ima] |
+ /// | `"rv64i"` | RV64I | Yes [^ima] |
+ ///
+ /// | Literal | Extension | Linux |
+ /// |:--------------- |:----------- |:------------------- |
+ /// | `"a"` | A | Yes [^ima] |
+ /// | `"b"` | B | 6.5 |
+ /// | `"c"` | C | Yes |
+ /// | `"d"` | D | Yes |
+ /// | `"f"` | F | Yes |
+ /// | `"m"` | M | Yes [^ima] |
+ /// | `"q"` | Q | No |
+ /// | `"v"` | V | 6.5 |
+ /// | `"zaamo"` | Zaamo | 6.15 [^ima] [^dep] |
+ /// | `"zabha"` | Zabha | 6.16 |
+ /// | `"zacas"` | Zacas | 6.8 |
+ /// | `"zalrsc"` | Zalrsc | 6.15 [^ima] [^dep] |
+ /// | `"zawrs"` | Zawrs | 6.11 |
+ /// | `"zba"` | Zba | 6.5 |
+ /// | `"zbb"` | Zbb | 6.5 |
+ /// | `"zbc"` | Zbc | 6.8 |
+ /// | `"zbkb"` | Zbkb | 6.8 |
+ /// | `"zbkc"` | Zbkc | 6.8 |
+ /// | `"zbkx"` | Zbkx | 6.8 |
+ /// | `"zbs"` | Zbs | 6.5 |
+ /// | `"zca"` | Zca | 6.11 [^dep] |
+ /// | `"zcb"` | Zcb | 6.11 |
+ /// | `"zcd"` | Zcd | 6.11 [^dep] |
+ /// | `"zcf"` | Zcf | 6.11 [^dep] |
+ /// | `"zcmop"` | Zcmop | 6.11 |
+ /// | `"zdinx"` | Zdinx | No |
+ /// | `"zfa"` | Zfa | 6.8 |
+ /// | `"zfbfmin"` | Zfbfmin | 6.15 |
+ /// | `"zfh"` | Zfh | 6.8 |
+ /// | `"zfhmin"` | Zfhmin | 6.8 |
+ /// | `"zfinx"` | Zfinx | No |
+ /// | `"zhinx"` | Zhinx | No |
+ /// | `"zhinxmin"` | Zhinxmin | No |
+ /// | `"zicbom"` | Zicbom | 6.15 |
+ /// | `"zicboz"` | Zicboz | 6.7 |
+ /// | `"zicntr"` | Zicntr | 6.15 [^ima] [^cntr] |
+ /// | `"zicond"` | Zicond | 6.8 |
+ /// | `"zicsr"` | Zicsr | No [^ima] [^dep] |
+ /// | `"zifencei"` | Zifencei | No [^ima] |
+ /// | `"zihintntl"` | Zihintntl | 6.8 |
+ /// | `"zihintpause"` | Zihintpause | 6.10 |
+ /// | `"zihpm"` | Zihpm | 6.15 [^cntr] |
+ /// | `"zimop"` | Zimop | 6.11 |
+ /// | `"zk"` | Zk | No [^zkr] |
+ /// | `"zkn"` | Zkn | 6.8 |
+ /// | `"zknd"` | Zknd | 6.8 |
+ /// | `"zkne"` | Zkne | 6.8 |
+ /// | `"zknh"` | Zknh | 6.8 |
+ /// | `"zkr"` | Zkr | No [^zkr] |
+ /// | `"zks"` | Zks | 6.8 |
+ /// | `"zksed"` | Zksed | 6.8 |
+ /// | `"zksh"` | Zksh | 6.8 |
+ /// | `"zkt"` | Zkt | 6.8 |
+ /// | `"ztso"` | Ztso | 6.8 |
+ /// | `"zvbb"` | Zvbb | 6.8 |
+ /// | `"zvbc"` | Zvbc | 6.8 |
+ /// | `"zve32f"` | Zve32f | 6.11 [^dep] |
+ /// | `"zve32x"` | Zve32x | 6.11 [^dep] |
+ /// | `"zve64d"` | Zve64d | 6.11 [^dep] |
+ /// | `"zve64f"` | Zve64f | 6.11 [^dep] |
+ /// | `"zve64x"` | Zve64x | 6.11 [^dep] |
+ /// | `"zvfbfmin"` | Zvfbfmin | 6.15 |
+ /// | `"zvfbfwma"` | Zvfbfwma | 6.15 |
+ /// | `"zvfh"` | Zvfh | 6.8 |
+ /// | `"zvfhmin"` | Zvfhmin | 6.8 |
+ /// | `"zvkb"` | Zvkb | 6.8 |
+ /// | `"zvkg"` | Zvkg | 6.8 |
+ /// | `"zvkn"` | Zvkn | 6.8 |
+ /// | `"zvknc"` | Zvknc | 6.8 |
+ /// | `"zvkned"` | Zvkned | 6.8 |
+ /// | `"zvkng"` | Zvkng | 6.8 |
+ /// | `"zvknha"` | Zvknha | 6.8 |
+ /// | `"zvknhb"` | Zvknhb | 6.8 |
+ /// | `"zvks"` | Zvks | 6.8 |
+ /// | `"zvksc"` | Zvksc | 6.8 |
+ /// | `"zvksed"` | Zvksed | 6.8 |
+ /// | `"zvksg"` | Zvksg | 6.8 |
+ /// | `"zvksh"` | Zvksh | 6.8 |
+ /// | `"zvkt"` | Zvkt | 6.8 |
+ ///
+ /// [^ima]: Or enabled when the IMA base behavior is detected on the Linux
+ /// kernel version 6.4 or later (for bases, the only matching one -- either
+ /// `"rv32i"` or `"rv64i"` -- is enabled).
+ ///
+ /// [^cntr]: Even if this extension is available, it does not necessarily
+ /// mean all performance counters are accessible.
+ /// For example, accesses to all performance counters except `time`
+ /// (wall-clock) are blocked by default on the Linux kernel
+ /// version 6.6 or later.
+ /// Also beware that, even if performance counters like `cycle` and
+ /// `instret` are accessible, their value can be unreliable (e.g. returning
+ /// the constant value) under certain circumstances.
+ ///
+ /// [^dep]: Or enabled as a dependency of another extension (a superset)
+ /// even if runtime detection of this feature itself is not supported (as
+ /// long as the runtime detection of the superset is supported).
+ ///
+ /// [^zkr]: Linux does not report existence of this extension even if
+ /// supported by the hardware mainly because the `seed` CSR on the Zkr
+ /// extension (which provides hardware-based randomness) is normally
+ /// inaccessible from the user mode.
+ /// For the Zk extension features except this CSR, check existence of both
+ /// `"zkn"` and `"zkt"` features instead.
///
/// There's also bases and extensions marked as standard instruction set,
/// but they are in frozen or draft state. These instruction sets are also
diff --git a/library/std_detect/src/detect/arch/x86.rs b/library/std_detect/src/detect/arch/x86.rs
index 28b3e3cfb35b7..bd749b88f566d 100644
--- a/library/std_detect/src/detect/arch/x86.rs
+++ b/library/std_detect/src/detect/arch/x86.rs
@@ -233,6 +233,12 @@ features! {
/// AMX-TF32 (TensorFloat32 Operations)
@FEATURE: #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] amx_transpose: "amx-transpose";
/// AMX-TRANSPOSE (Matrix Transpose Operations)
+ @FEATURE: #[unstable(feature = "apx_target_feature", issue = "139284")] apxf: "apxf";
+ /// APX-F (Advanced Performance Extensions - Foundation)
+ @FEATURE: #[unstable(feature = "avx10_target_feature", issue = "138843")] avx10_1: "avx10.1";
+ /// AVX10.1
+ @FEATURE: #[unstable(feature = "avx10_target_feature", issue = "138843")] avx10_2: "avx10.2";
+ /// AVX10.2
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] f16c: "f16c";
/// F16C (Conversions between IEEE-754 `binary16` and `binary32` formats)
@FEATURE: #[stable(feature = "simd_x86", since = "1.27.0")] fma: "fma";
diff --git a/library/std_detect/src/detect/os/riscv.rs b/library/std_detect/src/detect/os/riscv.rs
index c6acbd3525bd3..9b9e0cba09d1c 100644
--- a/library/std_detect/src/detect/os/riscv.rs
+++ b/library/std_detect/src/detect/os/riscv.rs
@@ -119,11 +119,31 @@ pub(crate) fn imply_features(mut value: cache::Initializer) -> cache::Initialize
imply!(d | zfhmin | zfa => f);
imply!(zfbfmin => f); // and some of (not all) "Zfh" instructions.
- // Relatively complex implication rules from the "C" extension.
+ // Relatively complex implication rules around the "C" extension.
+ // (from "C" and some others)
imply!(c => zca);
imply!(c & d => zcd);
#[cfg(target_arch = "riscv32")]
imply!(c & f => zcf);
+ // (to "C"; defined as superset)
+ cfg_select! {
+ target_arch = "riscv32" => {
+ if value.test(Feature::d as u32) {
+ imply!(zcf & zcd => c);
+ } else if value.test(Feature::f as u32) {
+ imply!(zcf => c);
+ } else {
+ imply!(zca => c);
+ }
+ }
+ _ => {
+ if value.test(Feature::d as u32) {
+ imply!(zcd => c);
+ } else {
+ imply!(zca => c);
+ }
+ }
+ }
imply!(zicntr | zihpm | f | zfinx | zve32x => zicsr);
diff --git a/library/std_detect/src/detect/os/x86.rs b/library/std_detect/src/detect/os/x86.rs
index 20f848ab05caf..cf11d8333127f 100644
--- a/library/std_detect/src/detect/os/x86.rs
+++ b/library/std_detect/src/detect/os/x86.rs
@@ -137,6 +137,32 @@ pub(crate) fn detect_features() -> cache::Initializer {
enable(ebx, 2, Feature::widekl);
}
+ // This detects ABM on AMD CPUs and LZCNT on Intel CPUs.
+ // On intel CPUs with popcnt, lzcnt implements the
+ // "missing part" of ABM, so we map both to the same
+ // internal feature.
+ //
+ // The `is_x86_feature_detected!("lzcnt")` macro then
+ // internally maps to Feature::abm.
+ enable(extended_proc_info_ecx, 5, Feature::lzcnt);
+
+ // As Hygon Dhyana originates from AMD technology and shares most of the architecture with
+ // AMD's family 17h, but with different CPU Vendor ID("HygonGenuine")/Family series
+ // number(Family 18h).
+ //
+ // For CPUID feature bits, Hygon Dhyana(family 18h) share the same definition with AMD
+ // family 17h.
+ //
+ // Related AMD CPUID specification is https://www.amd.com/system/files/TechDocs/25481.pdf.
+ // Related Hygon kernel patch can be found on
+ // http://lkml.kernel.org/r/5ce86123a7b9dad925ac583d88d2f921040e859b.1538583282.git.puwen@hygon.cn
+ if vendor_id == *b"AuthenticAMD" || vendor_id == *b"HygonGenuine" {
+ // These features are available on AMD arch CPUs:
+ enable(extended_proc_info_ecx, 6, Feature::sse4a);
+ enable(extended_proc_info_ecx, 21, Feature::tbm);
+ enable(extended_proc_info_ecx, 11, Feature::xop);
+ }
+
// `XSAVE` and `AVX` support:
let cpu_xsave = bit::test(proc_info_ecx as usize, 26);
if cpu_xsave {
@@ -161,6 +187,7 @@ pub(crate) fn detect_features() -> cache::Initializer {
// * AVX -> `XCR0.AVX[2]`
// * AVX-512 -> `XCR0.AVX-512[7:5]`.
// * AMX -> `XCR0.AMX[18:17]`
+ // * APX -> `XCR0.APX[19]`
//
// by setting the corresponding bits of `XCR0` to `1`.
//
@@ -173,6 +200,8 @@ pub(crate) fn detect_features() -> cache::Initializer {
let os_avx512_support = xcr0 & 0xe0 == 0xe0;
// Test `XCR0.AMX[18:17]` with the mask `0b110_0000_0000_0000_0000 == 0x60000`
let os_amx_support = xcr0 & 0x60000 == 0x60000;
+ // Test `XCR0.APX[19]` with the mask `0b1000_0000_0000_0000_0000 == 0x80000`
+ let os_apx_support = xcr0 & 0x80000 == 0x80000;
// Only if the OS and the CPU support saving/restoring the AVX
// registers we enable `xsave` support:
@@ -262,33 +291,20 @@ pub(crate) fn detect_features() -> cache::Initializer {
enable(amx_feature_flags_eax, 8, Feature::amx_movrs);
}
}
- }
- }
- // This detects ABM on AMD CPUs and LZCNT on Intel CPUs.
- // On intel CPUs with popcnt, lzcnt implements the
- // "missing part" of ABM, so we map both to the same
- // internal feature.
- //
- // The `is_x86_feature_detected!("lzcnt")` macro then
- // internally maps to Feature::abm.
- enable(extended_proc_info_ecx, 5, Feature::lzcnt);
+ if os_apx_support {
+ enable(extended_features_edx_leaf_1, 21, Feature::apxf);
+ }
- // As Hygon Dhyana originates from AMD technology and shares most of the architecture with
- // AMD's family 17h, but with different CPU Vendor ID("HygonGenuine")/Family series
- // number(Family 18h).
- //
- // For CPUID feature bits, Hygon Dhyana(family 18h) share the same definition with AMD
- // family 17h.
- //
- // Related AMD CPUID specification is https://www.amd.com/system/files/TechDocs/25481.pdf.
- // Related Hygon kernel patch can be found on
- // http://lkml.kernel.org/r/5ce86123a7b9dad925ac583d88d2f921040e859b.1538583282.git.puwen@hygon.cn
- if vendor_id == *b"AuthenticAMD" || vendor_id == *b"HygonGenuine" {
- // These features are available on AMD arch CPUs:
- enable(extended_proc_info_ecx, 6, Feature::sse4a);
- enable(extended_proc_info_ecx, 21, Feature::tbm);
- enable(extended_proc_info_ecx, 11, Feature::xop);
+ let avx10_1 = enable(extended_features_edx_leaf_1, 19, Feature::avx10_1);
+ if avx10_1 {
+ let CpuidResult { ebx, .. } = unsafe { __cpuid(0x24) };
+ let avx10_version = ebx & 0xff;
+ if avx10_version >= 2 {
+ value.set(Feature::avx10_2 as u32);
+ }
+ }
+ }
}
}
diff --git a/rust-toolchain.toml b/rust-toolchain.toml
index fd430df9ff933..503fd156e16d3 100644
--- a/rust-toolchain.toml
+++ b/rust-toolchain.toml
@@ -2,5 +2,5 @@
# standard library we currently track.
[toolchain]
-channel = "nightly-2025-08-20"
+channel = "nightly-2025-08-26"
components = ["llvm-tools-preview", "rustc-dev", "rust-src", "rustfmt"]
diff --git a/tool_config/kani-version.toml b/tool_config/kani-version.toml
index 5c8d81d7cc7bd..370f3461c064c 100644
--- a/tool_config/kani-version.toml
+++ b/tool_config/kani-version.toml
@@ -2,4 +2,4 @@
# incompatible with the verify-std repo.
[kani]
-commit = "a7d917d31772cc56e5f78c87a3876cae003b63b9"
+commit = "1f6d355f39e39091e632bea92bc1b0cb6b63ab95"