diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 6d85183faf75d..27ecefe043b1e 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -54,16 +54,33 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// exception. If you need to mutate through an `Arc`, use [`Mutex`][mutex], /// [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types. /// -/// `Arc` uses atomic operations for reference counting, so `Arc`s can be -/// sent between threads. In other words, `Arc` implements [`Send`] -/// as long as `T` implements [`Send`] and [`Sync`][sync]. The disadvantage is -/// that atomic operations are more expensive than ordinary memory accesses. -/// If you are not sharing reference-counted values between threads, consider -/// using [`rc::Rc`][`Rc`] for lower overhead. [`Rc`] is a safe default, because -/// the compiler will catch any attempt to send an [`Rc`] between threads. -/// However, a library might choose `Arc` in order to give library consumers +/// ## Thread Safety +/// +/// Unlike [`Rc`], `Arc` uses atomic operations for its reference +/// counting This means that it is thread-safe. The disadvantage is that +/// atomic operations are more expensive than ordinary memory accesses. If you +/// are not sharing reference-counted values between threads, consider using +/// [`Rc`] for lower overhead. [`Rc`] is a safe default, because the +/// compiler will catch any attempt to send an [`Rc`] between threads. +/// However, a library might choose `Arc` in order to give library consumers /// more flexibility. /// +/// `Arc` will implement [`Send`] and [`Sync`] as long as the `T` implements +/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an +/// `Arc` to make it thread-safe? This may be a bit counter-intuitive at +/// first: after all, isn't the point of `Arc` thread safety? The key is +/// this: `Arc` makes it thread safe to have multiple ownership of the same +/// data, but it doesn't add thread safety to its data. Consider +/// `Arc>`. `RefCell` isn't [`Sync`], and if `Arc` was always +/// [`Send`], `Arc>` would be as well. But then we'd have a problem: +/// `RefCell` is not thread safe; it keeps track of the borrowing count using +/// non-atomic operations. +/// +/// In the end, this means that you may need to pair `Arc` with some sort of +/// `std::sync` type, usually `Mutex`. +/// +/// ## Breaking cycles with `Weak` +/// /// The [`downgrade`][downgrade] method can be used to create a non-owning /// [`Weak`][weak] pointer. A [`Weak`][weak] pointer can be [`upgrade`][upgrade]d /// to an `Arc`, but this will return [`None`] if the value has already been @@ -74,6 +91,8 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// strong `Arc` pointers from parent nodes to children, and [`Weak`][weak] /// pointers from children back to their parents. /// +/// ## `Deref` behavior +/// /// `Arc` automatically dereferences to `T` (via the [`Deref`][deref] trait), /// so you can call `T`'s methods on a value of type `Arc`. To avoid name /// clashes with `T`'s methods, the methods of `Arc` itself are [associated @@ -91,13 +110,13 @@ const MAX_REFCOUNT: usize = (isize::MAX) as usize; /// /// [arc]: struct.Arc.html /// [weak]: struct.Weak.html -/// [`Rc`]: ../../std/rc/struct.Rc.html +/// [`Rc`]: ../../std/rc/struct.Rc.html /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone /// [mutex]: ../../std/sync/struct.Mutex.html /// [rwlock]: ../../std/sync/struct.RwLock.html /// [atomic]: ../../std/sync/atomic/index.html /// [`Send`]: ../../std/marker/trait.Send.html -/// [sync]: ../../std/marker/trait.Sync.html +/// [`Sync`]: ../../std/marker/trait.Sync.html /// [deref]: ../../std/ops/trait.Deref.html /// [downgrade]: struct.Arc.html#method.downgrade /// [upgrade]: struct.Weak.html#method.upgrade diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index a60abefc07650..5f189d473be79 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -1005,7 +1005,7 @@ unsafe impl Sync for Unique { } #[unstable(feature = "unique", issue = "27730")] impl Unique { - /// Creates a new `Shared` that is dangling, but well-aligned. + /// Creates a new `Unique` that is dangling, but well-aligned. /// /// This is useful for initializing types which lazily allocate, like /// `Vec::new` does. diff --git a/src/libstd/path.rs b/src/libstd/path.rs index 9d66430bc9303..f4b9a8972e3ab 100644 --- a/src/libstd/path.rs +++ b/src/libstd/path.rs @@ -51,10 +51,17 @@ //! ``` //! use std::path::PathBuf; //! +//! // This way works... //! let mut path = PathBuf::from("c:\\"); +//! //! path.push("windows"); //! path.push("system32"); +//! //! path.set_extension("dll"); +//! +//! // ... but push is best used if you don't know everything up +//! // front. If you do, this way is better: +//! let path: PathBuf = ["c:\\", "windows", "system32.dll"].iter().collect(); //! ``` //! //! [`Component`]: ../../std/path/enum.Component.html @@ -63,6 +70,7 @@ //! [`Path`]: ../../std/path/struct.Path.html //! [`push`]: ../../std/path/struct.PathBuf.html#method.push //! [`String`]: ../../std/string/struct.String.html +//! //! [`str`]: ../../std/primitive.str.html //! [`OsString`]: ../../std/ffi/struct.OsString.html //! [`OsStr`]: ../../std/ffi/struct.OsStr.html @@ -1036,14 +1044,40 @@ impl<'a> cmp::Ord for Components<'a> { /// /// # Examples /// +/// You can use [`push`] to build up a `PathBuf` from +/// components: +/// /// ``` /// use std::path::PathBuf; /// -/// let mut path = PathBuf::from("c:\\"); +/// let mut path = PathBuf::new(); +/// +/// path.push(r"C:\"); /// path.push("windows"); /// path.push("system32"); +/// /// path.set_extension("dll"); /// ``` +/// +/// However, [`push`] is best used for dynamic situations. This is a better way +/// to do this when you know all of the components ahead of time: +/// +/// ``` +/// use std::path::PathBuf; +/// +/// let path: PathBuf = [r"C:\", "windows", "system32.dll"].iter().collect(); +/// ``` +/// +/// We can still do better than this! Since these are all strings, we can use +/// `From::from`: +/// +/// ``` +/// use std::path::PathBuf; +/// +/// let path = PathBuf::from(r"C:\windows\system32.dll"); +/// ``` +/// +/// Which method works best depends on what kind of situation you're in. #[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] pub struct PathBuf { diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs index 4432f898e04f8..230c60baf8bb4 100644 --- a/src/libstd/thread/mod.rs +++ b/src/libstd/thread/mod.rs @@ -96,41 +96,6 @@ //! The [`thread::current`] function is available even for threads not spawned //! by the APIs of this module. //! -//! ## Blocking support: park and unpark -//! -//! Every thread is equipped with some basic low-level blocking support, via the -//! [`thread::park`][`park`] function and [`thread::Thread::unpark()`][`unpark`] -//! method. [`park`] blocks the current thread, which can then be resumed from -//! another thread by calling the [`unpark`] method on the blocked thread's handle. -//! -//! Conceptually, each [`Thread`] handle has an associated token, which is -//! initially not present: -//! -//! * The [`thread::park`][`park`] function blocks the current thread unless or until -//! the token is available for its thread handle, at which point it atomically -//! consumes the token. It may also return *spuriously*, without consuming the -//! token. [`thread::park_timeout`] does the same, but allows specifying a -//! maximum time to block the thread for. -//! -//! * The [`unpark`] method on a [`Thread`] atomically makes the token available -//! if it wasn't already. -//! -//! In other words, each [`Thread`] acts a bit like a semaphore with initial count -//! 0, except that the semaphore is *saturating* (the count cannot go above 1), -//! and can return spuriously. -//! -//! The API is typically used by acquiring a handle to the current thread, -//! placing that handle in a shared data structure so that other threads can -//! find it, and then `park`ing. When some desired condition is met, another -//! thread calls [`unpark`] on the handle. -//! -//! The motivation for this design is twofold: -//! -//! * It avoids the need to allocate mutexes and condvars when building new -//! synchronization primitives; the threads already provide basic blocking/signaling. -//! -//! * It can be implemented very efficiently on many platforms. -//! //! ## Thread-local storage //! //! This module also provides an implementation of thread-local storage for Rust @@ -322,6 +287,8 @@ impl Builder { /// thread finishes). The join handle can be used to block on /// termination of the child thread, including recovering its panics. /// + /// For a more complete documentation see [`thread::spawn`][`spawn`]. + /// /// # Errors /// /// Unlike the [`spawn`] free function, this method yields an @@ -396,19 +363,19 @@ impl Builder { /// panics, [`join`] will return an [`Err`] containing the argument given to /// [`panic`]. /// +/// This will create a thread using default parameters of [`Builder`], if you +/// want to specify the stack size or the name of the thread, use this API +/// instead. +/// /// # Panics /// /// Panics if the OS fails to create a thread; use [`Builder::spawn`] /// to recover from such errors. /// -/// [`JoinHandle`]: ../../std/thread/struct.JoinHandle.html -/// [`join`]: ../../std/thread/struct.JoinHandle.html#method.join -/// [`Err`]: ../../std/result/enum.Result.html#variant.Err -/// [`panic`]: ../../std/macro.panic.html -/// [`Builder::spawn`]: ../../std/thread/struct.Builder.html#method.spawn -/// /// # Examples /// +/// Creating a thread. +/// /// ``` /// use std::thread; /// @@ -418,6 +385,54 @@ impl Builder { /// /// handler.join().unwrap(); /// ``` +/// +/// As mentioned in the module documentation, threads are usually made to +/// communicate using [`channels`], here is how it usually looks. +/// +/// This example also shows how to use `move`, in order to give ownership +/// of values to a thread. +/// +/// ``` +/// use std::thread; +/// use std::sync::mpsc::channel; +/// +/// let (tx, rx) = channel(); +/// +/// let sender = thread::spawn(move || { +/// let _ = tx.send("Hello, thread".to_owned()); +/// }); +/// +/// let receiver = thread::spawn(move || { +/// println!("{}", rx.recv().unwrap()); +/// }); +/// +/// let _ = sender.join(); +/// let _ = receiver.join(); +/// ``` +/// +/// A thread can also return a value through its [`JoinHandle`], you can use +/// this to make asynchronous computations (futures might be more appropriate +/// though). +/// +/// ``` +/// use std::thread; +/// +/// let computation = thread::spawn(|| { +/// // Some expensive computation. +/// 42 +/// }); +/// +/// let result = computation.join().unwrap(); +/// println!("{}", result); +/// ``` +/// +/// [`channels`]: ../../std/sync/mpsc/index.html +/// [`JoinHandle`]: ../../std/thread/struct.JoinHandle.html +/// [`join`]: ../../std/thread/struct.JoinHandle.html#method.join +/// [`Err`]: ../../std/result/enum.Result.html#variant.Err +/// [`panic`]: ../../std/macro.panic.html +/// [`Builder::spawn`]: ../../std/thread/struct.Builder.html#method.spawn +/// [`Builder`]: ../../std/thread/struct.Builder.html #[stable(feature = "rust1", since = "1.0.0")] pub fn spawn(f: F) -> JoinHandle where F: FnOnce() -> T, F: Send + 'static, T: Send + 'static @@ -568,23 +583,72 @@ pub fn sleep(dur: Duration) { /// Blocks unless or until the current thread's token is made available. /// -/// Every thread is equipped with some basic low-level blocking support, via -/// the `park()` function and the [`unpark`][unpark] method. These can be -/// used as a more CPU-efficient implementation of a spinlock. +/// A call to `park` does not guarantee that the thread will remain parked +/// forever, and callers should be prepared for this possibility. /// -/// [unpark]: struct.Thread.html#method.unpark +/// # park and unpark +/// +/// Every thread is equipped with some basic low-level blocking support, via the +/// [`thread::park`][`park`] function and [`thread::Thread::unpark`][`unpark`] +/// method. [`park`] blocks the current thread, which can then be resumed from +/// another thread by calling the [`unpark`] method on the blocked thread's +/// handle. +/// +/// Conceptually, each [`Thread`] handle has an associated token, which is +/// initially not present: +/// +/// * The [`thread::park`][`park`] function blocks the current thread unless or +/// until the token is available for its thread handle, at which point it +/// atomically consumes the token. It may also return *spuriously*, without +/// consuming the token. [`thread::park_timeout`] does the same, but allows +/// specifying a maximum time to block the thread for. +/// +/// * The [`unpark`] method on a [`Thread`] atomically makes the token available +/// if it wasn't already. +/// +/// In other words, each [`Thread`] acts a bit like a spinlock that can be +/// locked and unlocked using `park` and `unpark`. /// /// The API is typically used by acquiring a handle to the current thread, /// placing that handle in a shared data structure so that other threads can -/// find it, and then parking (in a loop with a check for the token actually -/// being acquired). +/// find it, and then `park`ing. When some desired condition is met, another +/// thread calls [`unpark`] on the handle. /// -/// A call to `park` does not guarantee that the thread will remain parked -/// forever, and callers should be prepared for this possibility. +/// The motivation for this design is twofold: /// -/// See the [module documentation][thread] for more detail. +/// * It avoids the need to allocate mutexes and condvars when building new +/// synchronization primitives; the threads already provide basic +/// blocking/signaling. /// -/// [thread]: index.html +/// * It can be implemented very efficiently on many platforms. +/// +/// # Examples +/// +/// ``` +/// use std::thread; +/// use std::time::Duration; +/// +/// let parked_thread = thread::Builder::new() +/// .spawn(|| { +/// println!("Parking thread"); +/// thread::park(); +/// println!("Thread unparked"); +/// }) +/// .unwrap(); +/// +/// // Let some time pass for the thread to be spawned. +/// thread::sleep(Duration::from_millis(10)); +/// +/// println!("Unpark the thread"); +/// parked_thread.thread().unpark(); +/// +/// parked_thread.join().unwrap(); +/// ``` +/// +/// [`Thread`]: ../../std/thread/struct.Thread.html +/// [`park`]: ../../std/thread/fn.park.html +/// [`unpark`]: ../../std/thread/struct.Thread.html#method.unpark +/// [`thread::park_timeout`]: ../../std/thread/fn.park_timeout.html // // The implementation currently uses the trivial strategy of a Mutex+Condvar // with wakeup flag, which does not actually allow spurious wakeups. In the @@ -601,21 +665,21 @@ pub fn park() { *guard = false; } -/// Use [park_timeout]. +/// Use [`park_timeout`]. /// /// Blocks unless or until the current thread's token is made available or /// the specified duration has been reached (may wake spuriously). /// -/// The semantics of this function are equivalent to `park()` except that the -/// thread will be blocked for roughly no longer than `ms`. This method -/// should not be used for precise timing due to anomalies such as +/// The semantics of this function are equivalent to [`park`] except +/// that the thread will be blocked for roughly no longer than `dur`. This +/// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `ms` long. /// -/// See the [module documentation][thread] for more detail. +/// See the [park documentation][`park`] for more detail. /// -/// [thread]: index.html -/// [park_timeout]: fn.park_timeout.html +/// [`park_timeout`]: fn.park_timeout.html +/// [`park`]: ../../std/thread/fn.park.html #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated(since = "1.6.0", reason = "replaced by `std::thread::park_timeout`")] pub fn park_timeout_ms(ms: u32) { @@ -625,13 +689,13 @@ pub fn park_timeout_ms(ms: u32) { /// Blocks unless or until the current thread's token is made available or /// the specified duration has been reached (may wake spuriously). /// -/// The semantics of this function are equivalent to `park()` except that the -/// thread will be blocked for roughly no longer than `dur`. This method -/// should not be used for precise timing due to anomalies such as +/// The semantics of this function are equivalent to [`park`][park] except +/// that the thread will be blocked for roughly no longer than `dur`. This +/// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `dur` long. /// -/// See the module doc for more detail. +/// See the [park dococumentation][park] for more details. /// /// # Platform behavior /// @@ -656,6 +720,8 @@ pub fn park_timeout_ms(ms: u32) { /// park_timeout(timeout); /// } /// ``` +/// +/// [park]: fn.park.html #[stable(feature = "park_timeout", since = "1.4.0")] pub fn park_timeout(dur: Duration) { let thread = current(); @@ -777,22 +843,36 @@ impl Thread { /// Atomically makes the handle's token available if it is not already. /// - /// See the module doc for more detail. + /// Every thread is equipped with some basic low-level blocking support, via + /// the [`park`][park] function and the `unpark()` method. These can be + /// used as a more CPU-efficient implementation of a spinlock. + /// + /// See the [park documentation][park] for more details. /// /// # Examples /// /// ``` /// use std::thread; + /// use std::time::Duration; /// - /// let handler = thread::Builder::new() + /// let parked_thread = thread::Builder::new() /// .spawn(|| { - /// let thread = thread::current(); - /// thread.unpark(); + /// println!("Parking thread"); + /// thread::park(); + /// println!("Thread unparked"); /// }) /// .unwrap(); /// - /// handler.join().unwrap(); + /// // Let some time pass for the thread to be spawned. + /// thread::sleep(Duration::from_millis(10)); + /// + /// println!("Unpark the thread"); + /// parked_thread.thread().unpark(); + /// + /// parked_thread.join().unwrap(); /// ``` + /// + /// [park]: fn.park.html #[stable(feature = "rust1", since = "1.0.0")] pub fn unpark(&self) { let mut guard = self.inner.lock.lock().unwrap();