6262// `Once.state_and_queue` and an unknown number of `Waiter.signaled`.
6363// * `state_and_queue` is used (1) as a state flag, (2) for synchronizing the
6464// result of the `Once`, and (3) for synchronizing `Waiter` nodes.
65- // - At the end of the `call_inner ` function we have to make sure the result
65+ // - At the end of the `try_call_inner ` function we have to make sure the result
6666// of the `Once` is acquired. So every load which can be the only one to
6767// load COMPLETED must have at least Acquire ordering, which means all
6868// three of them.
7575// `state_and_queue` with Acquire ordering.
7676// - There is just one store where `state_and_queue` is used only as a
7777// state flag, without having to synchronize data: switching the state
78- // from INCOMPLETE to RUNNING in `call_inner `. This store can be Relaxed,
78+ // from INCOMPLETE to RUNNING in `try_call_inner `. This store can be Relaxed,
7979// but the read has to be Acquire because of the requirements mentioned
8080// above.
8181// * `Waiter.signaled` is both used as a flag, and to protect a field with
@@ -93,7 +93,7 @@ use crate::fmt;
9393use crate :: marker;
9494use crate :: ptr;
9595use crate :: sync:: atomic:: { spin_loop_hint, AtomicBool , AtomicUsize , Ordering } ;
96- use crate :: thread:: { self , Thread } ;
96+ use crate :: thread:: { self , Thread , ThreadId } ;
9797
9898/// A synchronization primitive which can be used to run a one-time global
9999/// initialization. Useful for one-time initialization for FFI or related
@@ -190,6 +190,7 @@ struct Waiter {
190190#[ repr( align( 4 ) ) ] // Ensure the two lower bits are free to use as state bits.
191191struct WaiterQueue {
192192 head : Cell < * const Waiter > ,
193+ id : ThreadId ,
193194}
194195
195196// A guard that will wake up the waiters when it gets dropped, i.e. also on panic.
@@ -202,6 +203,13 @@ struct WaiterQueueGuard<'a> {
202203 set_state_on_drop_to : usize ,
203204}
204205
206+ // Potential outcomes of calling try_call_inner
207+ enum CallResult {
208+ Complete ,
209+ Poisoned ,
210+ Reentrance ,
211+ }
212+
205213impl Once {
206214 /// Creates a new `Once` value.
207215 #[ stable( feature = "once_new" , since = "1.2.0" ) ]
@@ -403,13 +411,27 @@ impl Once {
403411 // without some allocation overhead.
404412 #[ cold]
405413 fn call_inner ( & self , ignore_poisoning : bool , init : & mut dyn FnMut ( & OnceState ) ) {
414+ match self . try_call_inner ( ignore_poisoning, init) {
415+ CallResult :: Complete => ( ) ,
416+ // Panic to propagate the poison.
417+ CallResult :: Poisoned => panic ! ( "Once instance has previously been poisoned" ) ,
418+ CallResult :: Reentrance => panic ! ( "Once instance cannot be recursively initialized" ) ,
419+ }
420+ }
421+
422+ fn try_call_inner (
423+ & self ,
424+ ignore_poisoning : bool ,
425+ init : & mut dyn FnMut ( & OnceState ) ,
426+ ) -> CallResult {
406427 let mut state_and_queue = self . state_and_queue . load ( Ordering :: Acquire ) ;
407428 loop {
408429 match state_and_queue {
409- COMPLETE => break ,
430+ COMPLETE => {
431+ return CallResult :: Complete ;
432+ }
410433 POISONED if !ignore_poisoning => {
411- // Panic to propagate the poison.
412- panic ! ( "Once instance has previously been poisoned" ) ;
434+ return CallResult :: Poisoned ;
413435 }
414436 POISONED | INCOMPLETE => {
415437 // Try to register this thread as the one RUNNING.
@@ -426,7 +448,8 @@ impl Once {
426448
427449 // `waiter_queue` will manage other waiting threads, and `queue_guard`
428450 // will wake them up on drop.
429- let waiter_queue = WaiterQueue { head : Cell :: new ( ptr:: null ( ) ) } ;
451+ let waiter_queue =
452+ WaiterQueue { head : Cell :: new ( ptr:: null ( ) ) , id : thread:: current ( ) . id ( ) } ;
430453 let mut queue_guard = WaiterQueueGuard {
431454 state_and_queue : & self . state_and_queue ,
432455 queue : & waiter_queue,
@@ -445,27 +468,32 @@ impl Once {
445468 } ;
446469 init ( & init_state) ;
447470 queue_guard. set_state_on_drop_to = init_state. set_state_on_drop_to . get ( ) ;
448- break ;
471+ return CallResult :: Complete ;
449472 }
450473 _ => {
451474 // All other values must be RUNNING with possibly a
452475 // pointer to the waiter queue in the more significant bits.
453476 assert ! ( state_and_queue & STATE_MASK == RUNNING ) ;
454- wait ( & self . state_and_queue , state_and_queue) ;
477+ if wait ( & self . state_and_queue , state_and_queue) {
478+ return CallResult :: Reentrance ;
479+ }
455480 state_and_queue = self . state_and_queue . load ( Ordering :: Acquire ) ;
456481 }
457482 }
458483 }
459484 }
460485}
461486
462- fn wait ( state_and_queue : & AtomicUsize , mut current_state : usize ) {
487+ // Returns whether reentrance has been detected.
488+ fn wait ( state_and_queue : & AtomicUsize , mut current_state : usize ) -> bool {
463489 // Note: the following code was carefully written to avoid creating a
464490 // mutable reference to `node` that gets aliased.
465491
466492 // Create a node upfront to reduce time spent inside spin lock.
493+ let thread = thread:: current ( ) ;
494+ let id = thread. id ( ) ;
467495 let node = Waiter {
468- thread : Cell :: new ( Some ( thread:: current ( ) ) ) ,
496+ thread : Cell :: new ( Some ( thread) ) ,
469497 signaled : AtomicBool :: new ( false ) ,
470498 next : Cell :: new ( ptr:: null ( ) ) ,
471499 } ;
@@ -475,7 +503,7 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
475503 // Don't queue this thread if the status is no longer running,
476504 // otherwise we will not be woken up.
477505 if current_state & STATE_MASK != RUNNING {
478- return ;
506+ return false ;
479507 }
480508
481509 // Currently locked, spin.
@@ -496,18 +524,28 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
496524 }
497525
498526 // Insert our node into the linked list.
499- {
527+ let reentry = {
500528 // SAFETY: This is okay because we have just "lock"ed it. Even the thread
501529 // that creates this WaiterQueue would need to lock it before drop it, so
502530 // the reference is definitely not dangling.
503531 let queue = unsafe { & * ( ( current_state & !STATE_MASK ) as * const WaiterQueue ) } ;
504- node. next . set ( queue. head . get ( ) ) ;
505- queue. head . set ( & node as * const Waiter ) ;
506- }
532+ if queue. id != id {
533+ node. next . set ( queue. head . get ( ) ) ;
534+ queue. head . set ( & node as * const Waiter ) ;
535+ false
536+ } else {
537+ // If thread id matches then this is an reentrance to try_call_inner
538+ true
539+ }
540+ } ;
507541
508542 // Unlock the WaiterQueue.
509543 state_and_queue. store ( current_state, Ordering :: Release ) ;
510544
545+ if reentry {
546+ return true ;
547+ }
548+
511549 // We have enqueued ourselves, now lets wait.
512550 // It is important not to return before being signaled, otherwise we
513551 // would drop our `Waiter` node and leave a hole in the linked list
@@ -520,6 +558,8 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
520558 // an `unpark` just before on an unparked thread is does not park.
521559 thread:: park ( ) ;
522560 }
561+
562+ false
523563}
524564
525565#[ stable( feature = "std_debug" , since = "1.16.0" ) ]
0 commit comments