1010//! and we do not detect copying of the lock, but macOS doesn't guarantee anything
1111//! in that case either.
1212
13+ use rustc_target:: abi:: Size ;
14+
1315use crate :: * ;
1416
15- struct MacOsUnfairLock {
16- id : MutexId ,
17+ #[ derive( Copy , Clone ) ]
18+ enum MacOsUnfairLock {
19+ Poisoned ,
20+ Active { id : MutexId } ,
1721}
1822
1923impl < ' tcx > EvalContextExtPriv < ' tcx > for crate :: MiriInterpCx < ' tcx > { }
2024trait EvalContextExtPriv < ' tcx > : crate :: MiriInterpCxExt < ' tcx > {
21- fn os_unfair_lock_getid ( & mut self , lock_ptr : & OpTy < ' tcx > ) -> InterpResult < ' tcx , MutexId > {
25+ fn os_unfair_lock_get_data (
26+ & mut self ,
27+ lock_ptr : & OpTy < ' tcx > ,
28+ ) -> InterpResult < ' tcx , MacOsUnfairLock > {
2229 let this = self . eval_context_mut ( ) ;
2330 let lock = this. deref_pointer ( lock_ptr) ?;
24- // We store the mutex ID in the `sync` metadata. This means that when the lock is moved,
25- // that's just implicitly creating a new lock at the new location.
26- let data = this. get_sync_or_init ( lock. ptr ( ) , |machine| {
27- let id = machine. sync . mutex_create ( ) ;
28- interp_ok ( MacOsUnfairLock { id } )
29- } ) ?;
30- interp_ok ( data. id )
31+ this. lazy_sync_get_data (
32+ & lock,
33+ Size :: ZERO , // offset for init tracking
34+ || {
35+ // If we get here, due to how we reset things to zero in `os_unfair_lock_unlock`,
36+ // this means the lock was moved while locked. This can happen with a `std` lock,
37+ // but then any future attempt to unlock will just deadlock. In practice, terrible
38+ // things can probably happen if you swap two locked locks, since they'd wake up
39+ // from the wrong queue... we just won't catch all UB of this library API then (we
40+ // would need to store some unique identifer in-memory for this, instead of a static
41+ // LAZY_INIT_COOKIE). This can't be hit via `std::sync::Mutex`.
42+ interp_ok ( MacOsUnfairLock :: Poisoned )
43+ } ,
44+ |ecx| {
45+ let id = ecx. machine . sync . mutex_create ( ) ;
46+ interp_ok ( MacOsUnfairLock :: Active { id } )
47+ } ,
48+ )
3149 }
3250}
3351
@@ -36,7 +54,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
3654 fn os_unfair_lock_lock ( & mut self , lock_op : & OpTy < ' tcx > ) -> InterpResult < ' tcx > {
3755 let this = self . eval_context_mut ( ) ;
3856
39- let id = this. os_unfair_lock_getid ( lock_op) ?;
57+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
58+ // Trying to get a poisoned lock. Just block forever...
59+ this. block_thread (
60+ BlockReason :: Sleep ,
61+ None ,
62+ callback ! (
63+ @capture<' tcx> { }
64+ @unblock = |_this| {
65+ panic!( "we shouldn't wake up ever" )
66+ }
67+ ) ,
68+ ) ;
69+ return interp_ok ( ( ) ) ;
70+ } ;
71+
4072 if this. mutex_is_locked ( id) {
4173 if this. mutex_get_owner ( id) == this. active_thread ( ) {
4274 // Matching the current macOS implementation: abort on reentrant locking.
@@ -60,7 +92,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
6092 ) -> InterpResult < ' tcx > {
6193 let this = self . eval_context_mut ( ) ;
6294
63- let id = this. os_unfair_lock_getid ( lock_op) ?;
95+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
96+ // Trying to get a poisoned lock. That never works.
97+ this. write_scalar ( Scalar :: from_bool ( false ) , dest) ?;
98+ return interp_ok ( ( ) ) ;
99+ } ;
100+
64101 if this. mutex_is_locked ( id) {
65102 // Contrary to the blocking lock function, this does not check for
66103 // reentrancy.
@@ -76,40 +113,71 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
76113 fn os_unfair_lock_unlock ( & mut self , lock_op : & OpTy < ' tcx > ) -> InterpResult < ' tcx > {
77114 let this = self . eval_context_mut ( ) ;
78115
79- let id = this. os_unfair_lock_getid ( lock_op) ?;
116+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
117+ // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
118+ throw_machine_stop ! ( TerminationInfo :: Abort (
119+ "attempted to unlock an os_unfair_lock not owned by the current thread" . to_owned( )
120+ ) ) ;
121+ } ;
122+
123+ // Now, unlock.
80124 if this. mutex_unlock ( id) ?. is_none ( ) {
81125 // Matching the current macOS implementation: abort.
82126 throw_machine_stop ! ( TerminationInfo :: Abort (
83127 "attempted to unlock an os_unfair_lock not owned by the current thread" . to_owned( )
84128 ) ) ;
85129 }
86130
131+ // If the lock is not locked by anyone now, it went quer.
132+ // Reset to zero so that it can be moved and initialized again for the next phase.
133+ if !this. mutex_is_locked ( id) {
134+ let lock_place = this. deref_pointer_as ( lock_op, this. machine . layouts . u32 ) ?;
135+ this. write_scalar_atomic ( Scalar :: from_u32 ( 0 ) , & lock_place, AtomicWriteOrd :: Relaxed ) ?;
136+ }
137+
87138 interp_ok ( ( ) )
88139 }
89140
90141 fn os_unfair_lock_assert_owner ( & mut self , lock_op : & OpTy < ' tcx > ) -> InterpResult < ' tcx > {
91142 let this = self . eval_context_mut ( ) ;
92143
93- let id = this. os_unfair_lock_getid ( lock_op) ?;
144+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
145+ // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
146+ throw_machine_stop ! ( TerminationInfo :: Abort (
147+ "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread" . to_owned( )
148+ ) ) ;
149+ } ;
94150 if !this. mutex_is_locked ( id) || this. mutex_get_owner ( id) != this. active_thread ( ) {
95151 throw_machine_stop ! ( TerminationInfo :: Abort (
96152 "called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread" . to_owned( )
97153 ) ) ;
98154 }
99155
156+ // The lock is definitely not quiet since we are the owner.
157+
100158 interp_ok ( ( ) )
101159 }
102160
103161 fn os_unfair_lock_assert_not_owner ( & mut self , lock_op : & OpTy < ' tcx > ) -> InterpResult < ' tcx > {
104162 let this = self . eval_context_mut ( ) ;
105163
106- let id = this. os_unfair_lock_getid ( lock_op) ?;
164+ let MacOsUnfairLock :: Active { id } = this. os_unfair_lock_get_data ( lock_op) ? else {
165+ // The lock is poisoned, who knows who owns it... we'll pretend: someone else.
166+ return interp_ok ( ( ) ) ;
167+ } ;
107168 if this. mutex_is_locked ( id) && this. mutex_get_owner ( id) == this. active_thread ( ) {
108169 throw_machine_stop ! ( TerminationInfo :: Abort (
109170 "called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread" . to_owned( )
110171 ) ) ;
111172 }
112173
174+ // If the lock is not locked by anyone now, it went quer.
175+ // Reset to zero so that it can be moved and initialized again for the next phase.
176+ if !this. mutex_is_locked ( id) {
177+ let lock_place = this. deref_pointer_as ( lock_op, this. machine . layouts . u32 ) ?;
178+ this. write_scalar_atomic ( Scalar :: from_u32 ( 0 ) , & lock_place, AtomicWriteOrd :: Relaxed ) ?;
179+ }
180+
113181 interp_ok ( ( ) )
114182 }
115183}
0 commit comments