|
20 | 20 | #define for_each_evl_mutex_waiter(__pos, __mutex) \
|
21 | 21 | list_for_each_entry(__pos, &(__mutex)->wchan.wait_list, wait_next)
|
22 | 22 |
|
23 |
| -#define for_each_evl_booster(__pos, __thread) \ |
24 |
| - list_for_each_entry(__pos, &(__thread)->boosters, next_booster) |
25 |
| - |
26 | 23 | static inline int get_ceiling_value(struct evl_mutex *mutex)
|
27 | 24 | {
|
28 | 25 | /*
|
@@ -409,67 +406,29 @@ static void drop_booster(struct evl_mutex *mutex)
|
409 | 406 | }
|
410 | 407 |
|
411 | 408 | /*
|
412 |
| - * Detect when current which is running out-of-band is about to sleep |
413 |
| - * on a mutex currently owned by another thread running in-band. |
414 |
| - * |
415 |
| - * mutex->wchan.lock held, irqs off, curr == this_evl_rq()->curr. |
416 |
| - */ |
417 |
| -static void detect_inband_owner(struct evl_mutex *mutex, |
418 |
| - struct evl_thread *curr) |
419 |
| -{ |
420 |
| - struct evl_thread *owner = mutex->wchan.owner; |
421 |
| - |
422 |
| - /* |
423 |
| - * @curr == this_evl_rq()->curr so no need to grab |
424 |
| - * @curr->lock. |
425 |
| - */ |
426 |
| - raw_spin_lock(&curr->rq->lock); |
427 |
| - |
428 |
| - if (curr->info & T_PIALERT) { |
429 |
| - curr->info &= ~T_PIALERT; |
430 |
| - } else if (owner->state & T_INBAND) { |
431 |
| - curr->info |= T_PIALERT; |
432 |
| - raw_spin_unlock(&curr->rq->lock); |
433 |
| - evl_notify_thread(curr, EVL_HMDIAG_LKDEPEND, evl_nil); |
434 |
| - return; |
435 |
| - } |
436 |
| - |
437 |
| - raw_spin_unlock(&curr->rq->lock); |
438 |
| -} |
439 |
| - |
440 |
| -/* |
441 |
| - * Detect when current is about to switch in-band while holding a |
442 |
| - * mutex which is causing an active PI or PP boost. Since such a |
443 |
| - * dependency on in-band would cause a priority inversion for the |
444 |
| - * waiter(s), the latter is sent a HM notification if T_WOLI is set. |
| 409 | + * Detect when current is about to switch in-band while owning a |
| 410 | + * mutex, which is plain wrong since this would create a priority |
| 411 | + * inversion. T_WOLI is set for current. |
445 | 412 | */
|
446 |
| -void evl_detect_boost_drop(void) |
| 413 | +void evl_check_no_mutex(void) |
447 | 414 | {
|
448 | 415 | struct evl_thread *curr = evl_current();
|
449 |
| - struct evl_thread *waiter; |
450 |
| - struct evl_mutex *mutex; |
451 | 416 | unsigned long flags;
|
| 417 | + bool notify; |
452 | 418 |
|
453 | 419 | raw_spin_lock_irqsave(&curr->lock, flags);
|
454 |
| - |
455 |
| - /* |
456 |
| - * Iterate over waiters of each mutex we got boosted for due |
457 |
| - * to PI/PP. |
458 |
| - */ |
459 |
| - for_each_evl_booster(mutex, curr) { |
460 |
| - raw_spin_lock(&mutex->wchan.lock); |
461 |
| - for_each_evl_mutex_waiter(waiter, mutex) { |
462 |
| - if (!(waiter->state & (T_WOLI|T_PIALERT))) |
463 |
| - continue; |
464 |
| - raw_spin_lock(&waiter->rq->lock); |
465 |
| - waiter->info |= T_PIALERT; |
466 |
| - raw_spin_unlock(&waiter->rq->lock); |
467 |
| - evl_notify_thread(waiter, EVL_HMDIAG_LKDEPEND, evl_nil); |
| 420 | + if (!(curr->info & T_PIALERT)) { |
| 421 | + notify = !list_empty(&curr->owned_mutexes); |
| 422 | + if (notify) { |
| 423 | + raw_spin_lock(&curr->rq->lock); |
| 424 | + curr->info |= T_PIALERT; |
| 425 | + raw_spin_unlock(&curr->rq->lock); |
468 | 426 | }
|
469 |
| - raw_spin_unlock(&mutex->wchan.lock); |
470 | 427 | }
|
471 |
| - |
472 | 428 | raw_spin_unlock_irqrestore(&curr->lock, flags);
|
| 429 | + |
| 430 | + if (notify) |
| 431 | + evl_notify_thread(curr, EVL_HMDIAG_LKDEPEND, evl_nil); |
473 | 432 | }
|
474 | 433 |
|
475 | 434 | void __evl_init_mutex(struct evl_mutex *mutex,
|
@@ -707,9 +666,6 @@ int evl_lock_mutex_timeout(struct evl_mutex *mutex, ktime_t timeout,
|
707 | 666 | evl_put_element(&owner->element);
|
708 | 667 | }
|
709 | 668 |
|
710 |
| - if (unlikely(curr->state & T_WOLI)) |
711 |
| - detect_inband_owner(mutex, curr); |
712 |
| - |
713 | 669 | evl_double_thread_lock(curr, owner);
|
714 | 670 |
|
715 | 671 | walk_mode = evl_pi_check;
|
|
0 commit comments