@@ -506,13 +506,13 @@ static void __print_lock_name(struct lock_class *class)
506
506
name = class -> name ;
507
507
if (!name ) {
508
508
name = __get_key_name (class -> key , str );
509
- printk ("%s" , name );
509
+ printk (KERN_CONT "%s" , name );
510
510
} else {
511
- printk ("%s" , name );
511
+ printk (KERN_CONT "%s" , name );
512
512
if (class -> name_version > 1 )
513
- printk ("#%d" , class -> name_version );
513
+ printk (KERN_CONT "#%d" , class -> name_version );
514
514
if (class -> subclass )
515
- printk ("/%d" , class -> subclass );
515
+ printk (KERN_CONT "/%d" , class -> subclass );
516
516
}
517
517
}
518
518
@@ -522,9 +522,9 @@ static void print_lock_name(struct lock_class *class)
522
522
523
523
get_usage_chars (class , usage );
524
524
525
- printk (" (" );
525
+ printk (KERN_CONT " (" );
526
526
__print_lock_name (class );
527
- printk ("){%s}" , usage );
527
+ printk (KERN_CONT "){%s}" , usage );
528
528
}
529
529
530
530
static void print_lockdep_cache (struct lockdep_map * lock )
@@ -536,7 +536,7 @@ static void print_lockdep_cache(struct lockdep_map *lock)
536
536
if (!name )
537
537
name = __get_key_name (lock -> key -> subkeys , str );
538
538
539
- printk ("%s" , name );
539
+ printk (KERN_CONT "%s" , name );
540
540
}
541
541
542
542
static void print_lock (struct held_lock * hlock )
@@ -551,13 +551,13 @@ static void print_lock(struct held_lock *hlock)
551
551
barrier ();
552
552
553
553
if (!class_idx || (class_idx - 1 ) >= MAX_LOCKDEP_KEYS ) {
554
- printk ("<RELEASED>\n" );
554
+ printk (KERN_CONT "<RELEASED>\n" );
555
555
return ;
556
556
}
557
557
558
558
print_lock_name (lock_classes + class_idx - 1 );
559
- printk (", at: " );
560
- print_ip_sym ( hlock -> acquire_ip );
559
+ printk (KERN_CONT ", at: [<%p>] %pS\n" ,
560
+ ( void * ) hlock -> acquire_ip , ( void * ) hlock -> acquire_ip );
561
561
}
562
562
563
563
static void lockdep_print_held_locks (struct task_struct * curr )
@@ -792,8 +792,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
792
792
793
793
printk ("\nnew class %p: %s" , class -> key , class -> name );
794
794
if (class -> name_version > 1 )
795
- printk ("#%d" , class -> name_version );
796
- printk ("\n" );
795
+ printk (KERN_CONT "#%d" , class -> name_version );
796
+ printk (KERN_CONT "\n" );
797
797
dump_stack ();
798
798
799
799
if (!graph_lock ()) {
@@ -1071,7 +1071,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
1071
1071
return 0 ;
1072
1072
printk ("\n-> #%u" , depth );
1073
1073
print_lock_name (target -> class );
1074
- printk (":\n" );
1074
+ printk (KERN_CONT ":\n" );
1075
1075
print_stack_trace (& target -> trace , 6 );
1076
1076
1077
1077
return 0 ;
@@ -1102,28 +1102,28 @@ print_circular_lock_scenario(struct held_lock *src,
1102
1102
if (parent != source ) {
1103
1103
printk ("Chain exists of:\n " );
1104
1104
__print_lock_name (source );
1105
- printk (" --> " );
1105
+ printk (KERN_CONT " --> " );
1106
1106
__print_lock_name (parent );
1107
- printk (" --> " );
1107
+ printk (KERN_CONT " --> " );
1108
1108
__print_lock_name (target );
1109
- printk ("\n\n" );
1109
+ printk (KERN_CONT "\n\n" );
1110
1110
}
1111
1111
1112
1112
printk (" Possible unsafe locking scenario:\n\n" );
1113
1113
printk (" CPU0 CPU1\n" );
1114
1114
printk (" ---- ----\n" );
1115
1115
printk (" lock(" );
1116
1116
__print_lock_name (target );
1117
- printk (");\n" );
1117
+ printk (KERN_CONT ");\n" );
1118
1118
printk (" lock(" );
1119
1119
__print_lock_name (parent );
1120
- printk (");\n" );
1120
+ printk (KERN_CONT ");\n" );
1121
1121
printk (" lock(" );
1122
1122
__print_lock_name (target );
1123
- printk (");\n" );
1123
+ printk (KERN_CONT ");\n" );
1124
1124
printk (" lock(" );
1125
1125
__print_lock_name (source );
1126
- printk (");\n" );
1126
+ printk (KERN_CONT ");\n" );
1127
1127
printk ("\n *** DEADLOCK ***\n\n" );
1128
1128
}
1129
1129
@@ -1359,22 +1359,22 @@ static void print_lock_class_header(struct lock_class *class, int depth)
1359
1359
1360
1360
printk ("%*s->" , depth , "" );
1361
1361
print_lock_name (class );
1362
- printk (" ops: %lu" , class -> ops );
1363
- printk (" {\n" );
1362
+ printk (KERN_CONT " ops: %lu" , class -> ops );
1363
+ printk (KERN_CONT " {\n" );
1364
1364
1365
1365
for (bit = 0 ; bit < LOCK_USAGE_STATES ; bit ++ ) {
1366
1366
if (class -> usage_mask & (1 << bit )) {
1367
1367
int len = depth ;
1368
1368
1369
1369
len += printk ("%*s %s" , depth , "" , usage_str [bit ]);
1370
- len += printk (" at:\n" );
1370
+ len += printk (KERN_CONT " at:\n" );
1371
1371
print_stack_trace (class -> usage_traces + bit , len );
1372
1372
}
1373
1373
}
1374
1374
printk ("%*s }\n" , depth , "" );
1375
1375
1376
- printk ("%*s ... key at: " , depth , "" );
1377
- print_ip_sym (( unsigned long ) class -> key );
1376
+ printk ("%*s ... key at: [<%p>] %pS\n" ,
1377
+ depth , "" , class -> key , class -> key );
1378
1378
}
1379
1379
1380
1380
/*
@@ -1437,30 +1437,30 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
1437
1437
if (middle_class != unsafe_class ) {
1438
1438
printk ("Chain exists of:\n " );
1439
1439
__print_lock_name (safe_class );
1440
- printk (" --> " );
1440
+ printk (KERN_CONT " --> " );
1441
1441
__print_lock_name (middle_class );
1442
- printk (" --> " );
1442
+ printk (KERN_CONT " --> " );
1443
1443
__print_lock_name (unsafe_class );
1444
- printk ("\n\n" );
1444
+ printk (KERN_CONT "\n\n" );
1445
1445
}
1446
1446
1447
1447
printk (" Possible interrupt unsafe locking scenario:\n\n" );
1448
1448
printk (" CPU0 CPU1\n" );
1449
1449
printk (" ---- ----\n" );
1450
1450
printk (" lock(" );
1451
1451
__print_lock_name (unsafe_class );
1452
- printk (");\n" );
1452
+ printk (KERN_CONT ");\n" );
1453
1453
printk (" local_irq_disable();\n" );
1454
1454
printk (" lock(" );
1455
1455
__print_lock_name (safe_class );
1456
- printk (");\n" );
1456
+ printk (KERN_CONT ");\n" );
1457
1457
printk (" lock(" );
1458
1458
__print_lock_name (middle_class );
1459
- printk (");\n" );
1459
+ printk (KERN_CONT ");\n" );
1460
1460
printk (" <Interrupt>\n" );
1461
1461
printk (" lock(" );
1462
1462
__print_lock_name (safe_class );
1463
- printk (");\n" );
1463
+ printk (KERN_CONT ");\n" );
1464
1464
printk ("\n *** DEADLOCK ***\n\n" );
1465
1465
}
1466
1466
@@ -1497,9 +1497,9 @@ print_bad_irq_dependency(struct task_struct *curr,
1497
1497
print_lock (prev );
1498
1498
printk ("which would create a new lock dependency:\n" );
1499
1499
print_lock_name (hlock_class (prev ));
1500
- printk (" ->" );
1500
+ printk (KERN_CONT " ->" );
1501
1501
print_lock_name (hlock_class (next ));
1502
- printk ("\n" );
1502
+ printk (KERN_CONT "\n" );
1503
1503
1504
1504
printk ("\nbut this new dependency connects a %s-irq-safe lock:\n" ,
1505
1505
irqclass );
@@ -1521,8 +1521,7 @@ print_bad_irq_dependency(struct task_struct *curr,
1521
1521
1522
1522
lockdep_print_held_locks (curr );
1523
1523
1524
- printk ("\nthe dependencies between %s-irq-safe lock" , irqclass );
1525
- printk (" and the holding lock:\n" );
1524
+ printk ("\nthe dependencies between %s-irq-safe lock and the holding lock:\n" , irqclass );
1526
1525
if (!save_trace (& prev_root -> trace ))
1527
1526
return 0 ;
1528
1527
print_shortest_lock_dependencies (backwards_entry , prev_root );
@@ -1694,10 +1693,10 @@ print_deadlock_scenario(struct held_lock *nxt,
1694
1693
printk (" ----\n" );
1695
1694
printk (" lock(" );
1696
1695
__print_lock_name (prev );
1697
- printk (");\n" );
1696
+ printk (KERN_CONT ");\n" );
1698
1697
printk (" lock(" );
1699
1698
__print_lock_name (next );
1700
- printk (");\n" );
1699
+ printk (KERN_CONT ");\n" );
1701
1700
printk ("\n *** DEADLOCK ***\n\n" );
1702
1701
printk (" May be due to missing lock nesting notation\n\n" );
1703
1702
}
@@ -1891,9 +1890,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1891
1890
graph_unlock ();
1892
1891
printk ("\n new dependency: " );
1893
1892
print_lock_name (hlock_class (prev ));
1894
- printk (" => " );
1893
+ printk (KERN_CONT " => " );
1895
1894
print_lock_name (hlock_class (next ));
1896
- printk ("\n" );
1895
+ printk (KERN_CONT "\n" );
1897
1896
dump_stack ();
1898
1897
return graph_lock ();
1899
1898
}
@@ -2343,11 +2342,11 @@ print_usage_bug_scenario(struct held_lock *lock)
2343
2342
printk (" ----\n" );
2344
2343
printk (" lock(" );
2345
2344
__print_lock_name (class );
2346
- printk (");\n" );
2345
+ printk (KERN_CONT ");\n" );
2347
2346
printk (" <Interrupt>\n" );
2348
2347
printk (" lock(" );
2349
2348
__print_lock_name (class );
2350
- printk (");\n" );
2349
+ printk (KERN_CONT ");\n" );
2351
2350
printk ("\n *** DEADLOCK ***\n\n" );
2352
2351
}
2353
2352
@@ -2522,14 +2521,18 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2522
2521
void print_irqtrace_events (struct task_struct * curr )
2523
2522
{
2524
2523
printk ("irq event stamp: %u\n" , curr -> irq_events );
2525
- printk ("hardirqs last enabled at (%u): " , curr -> hardirq_enable_event );
2526
- print_ip_sym (curr -> hardirq_enable_ip );
2527
- printk ("hardirqs last disabled at (%u): " , curr -> hardirq_disable_event );
2528
- print_ip_sym (curr -> hardirq_disable_ip );
2529
- printk ("softirqs last enabled at (%u): " , curr -> softirq_enable_event );
2530
- print_ip_sym (curr -> softirq_enable_ip );
2531
- printk ("softirqs last disabled at (%u): " , curr -> softirq_disable_event );
2532
- print_ip_sym (curr -> softirq_disable_ip );
2524
+ printk ("hardirqs last enabled at (%u): [<%p>] %pS\n" ,
2525
+ curr -> hardirq_enable_event , (void * )curr -> hardirq_enable_ip ,
2526
+ (void * )curr -> hardirq_enable_ip );
2527
+ printk ("hardirqs last disabled at (%u): [<%p>] %pS\n" ,
2528
+ curr -> hardirq_disable_event , (void * )curr -> hardirq_disable_ip ,
2529
+ (void * )curr -> hardirq_disable_ip );
2530
+ printk ("softirqs last enabled at (%u): [<%p>] %pS\n" ,
2531
+ curr -> softirq_enable_event , (void * )curr -> softirq_enable_ip ,
2532
+ (void * )curr -> softirq_enable_ip );
2533
+ printk ("softirqs last disabled at (%u): [<%p>] %pS\n" ,
2534
+ curr -> softirq_disable_event , (void * )curr -> softirq_disable_ip ,
2535
+ (void * )curr -> softirq_disable_ip );
2533
2536
}
2534
2537
2535
2538
static int HARDIRQ_verbose (struct lock_class * class )
@@ -3235,8 +3238,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3235
3238
if (very_verbose (class )) {
3236
3239
printk ("\nacquire class [%p] %s" , class -> key , class -> name );
3237
3240
if (class -> name_version > 1 )
3238
- printk ("#%d" , class -> name_version );
3239
- printk ("\n" );
3241
+ printk (KERN_CONT "#%d" , class -> name_version );
3242
+ printk (KERN_CONT "\n" );
3240
3243
dump_stack ();
3241
3244
}
3242
3245
@@ -3378,7 +3381,7 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3378
3381
printk ("%s/%d is trying to release lock (" ,
3379
3382
curr -> comm , task_pid_nr (curr ));
3380
3383
print_lockdep_cache (lock );
3381
- printk (") at:\n" );
3384
+ printk (KERN_CONT ") at:\n" );
3382
3385
print_ip_sym (ip );
3383
3386
printk ("but there are no more locks to release!\n" );
3384
3387
printk ("\nother info that might help us debug this:\n" );
@@ -3871,7 +3874,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3871
3874
printk ("%s/%d is trying to contend lock (" ,
3872
3875
curr -> comm , task_pid_nr (curr ));
3873
3876
print_lockdep_cache (lock );
3874
- printk (") at:\n" );
3877
+ printk (KERN_CONT ") at:\n" );
3875
3878
print_ip_sym (ip );
3876
3879
printk ("but there are no locks held!\n" );
3877
3880
printk ("\nother info that might help us debug this:\n" );
0 commit comments