@@ -96,6 +96,9 @@ static void __init its_update_mitigation(void);
96
96
static void __init its_apply_mitigation (void );
97
97
static void __init tsa_select_mitigation (void );
98
98
static void __init tsa_apply_mitigation (void );
99
+ static void __init vmscape_select_mitigation (void );
100
+ static void __init vmscape_update_mitigation (void );
101
+ static void __init vmscape_apply_mitigation (void );
99
102
100
103
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
101
104
u64 x86_spec_ctrl_base ;
@@ -270,6 +273,7 @@ void __init cpu_select_mitigations(void)
270
273
its_select_mitigation ();
271
274
bhi_select_mitigation ();
272
275
tsa_select_mitigation ();
276
+ vmscape_select_mitigation ();
273
277
274
278
/*
275
279
* After mitigations are selected, some may need to update their
@@ -301,6 +305,7 @@ void __init cpu_select_mitigations(void)
301
305
bhi_update_mitigation ();
302
306
/* srso_update_mitigation() depends on retbleed_update_mitigation(). */
303
307
srso_update_mitigation ();
308
+ vmscape_update_mitigation ();
304
309
305
310
spectre_v1_apply_mitigation ();
306
311
spectre_v2_apply_mitigation ();
@@ -318,6 +323,7 @@ void __init cpu_select_mitigations(void)
318
323
its_apply_mitigation ();
319
324
bhi_apply_mitigation ();
320
325
tsa_apply_mitigation ();
326
+ vmscape_apply_mitigation ();
321
327
}
322
328
323
329
/*
@@ -3322,6 +3328,77 @@ static void __init srso_apply_mitigation(void)
3322
3328
}
3323
3329
}
3324
3330
3331
+ #undef pr_fmt
3332
+ #define pr_fmt (fmt ) "VMSCAPE: " fmt
3333
+
3334
+ enum vmscape_mitigations {
3335
+ VMSCAPE_MITIGATION_NONE ,
3336
+ VMSCAPE_MITIGATION_AUTO ,
3337
+ VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER ,
3338
+ VMSCAPE_MITIGATION_IBPB_ON_VMEXIT ,
3339
+ };
3340
+
3341
+ static const char * const vmscape_strings [] = {
3342
+ [VMSCAPE_MITIGATION_NONE ] = "Vulnerable" ,
3343
+ /* [VMSCAPE_MITIGATION_AUTO] */
3344
+ [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER ] = "Mitigation: IBPB before exit to userspace" ,
3345
+ [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT ] = "Mitigation: IBPB on VMEXIT" ,
3346
+ };
3347
+
3348
+ static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
3349
+ IS_ENABLED (CONFIG_MITIGATION_VMSCAPE ) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE ;
3350
+
3351
+ static int __init vmscape_parse_cmdline (char * str )
3352
+ {
3353
+ if (!str )
3354
+ return - EINVAL ;
3355
+
3356
+ if (!strcmp (str , "off" )) {
3357
+ vmscape_mitigation = VMSCAPE_MITIGATION_NONE ;
3358
+ } else if (!strcmp (str , "ibpb" )) {
3359
+ vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER ;
3360
+ } else if (!strcmp (str , "force" )) {
3361
+ setup_force_cpu_bug (X86_BUG_VMSCAPE );
3362
+ vmscape_mitigation = VMSCAPE_MITIGATION_AUTO ;
3363
+ } else {
3364
+ pr_err ("Ignoring unknown vmscape=%s option.\n" , str );
3365
+ }
3366
+
3367
+ return 0 ;
3368
+ }
3369
+ early_param ("vmscape" , vmscape_parse_cmdline );
3370
+
3371
+ static void __init vmscape_select_mitigation (void )
3372
+ {
3373
+ if (cpu_mitigations_off () ||
3374
+ !boot_cpu_has_bug (X86_BUG_VMSCAPE ) ||
3375
+ !boot_cpu_has (X86_FEATURE_IBPB )) {
3376
+ vmscape_mitigation = VMSCAPE_MITIGATION_NONE ;
3377
+ return ;
3378
+ }
3379
+
3380
+ if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO )
3381
+ vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER ;
3382
+ }
3383
+
3384
+ static void __init vmscape_update_mitigation (void )
3385
+ {
3386
+ if (!boot_cpu_has_bug (X86_BUG_VMSCAPE ))
3387
+ return ;
3388
+
3389
+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
3390
+ srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT )
3391
+ vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT ;
3392
+
3393
+ pr_info ("%s\n" , vmscape_strings [vmscape_mitigation ]);
3394
+ }
3395
+
3396
+ static void __init vmscape_apply_mitigation (void )
3397
+ {
3398
+ if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER )
3399
+ setup_force_cpu_cap (X86_FEATURE_IBPB_EXIT_TO_USER );
3400
+ }
3401
+
3325
3402
#undef pr_fmt
3326
3403
#define pr_fmt (fmt ) fmt
3327
3404
@@ -3570,6 +3647,11 @@ static ssize_t tsa_show_state(char *buf)
3570
3647
return sysfs_emit (buf , "%s\n" , tsa_strings [tsa_mitigation ]);
3571
3648
}
3572
3649
3650
+ static ssize_t vmscape_show_state (char * buf )
3651
+ {
3652
+ return sysfs_emit (buf , "%s\n" , vmscape_strings [vmscape_mitigation ]);
3653
+ }
3654
+
3573
3655
static ssize_t cpu_show_common (struct device * dev , struct device_attribute * attr ,
3574
3656
char * buf , unsigned int bug )
3575
3657
{
@@ -3636,6 +3718,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
3636
3718
case X86_BUG_TSA :
3637
3719
return tsa_show_state (buf );
3638
3720
3721
+ case X86_BUG_VMSCAPE :
3722
+ return vmscape_show_state (buf );
3723
+
3639
3724
default :
3640
3725
break ;
3641
3726
}
@@ -3727,6 +3812,11 @@ ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *bu
3727
3812
{
3728
3813
return cpu_show_common (dev , attr , buf , X86_BUG_TSA );
3729
3814
}
3815
+
3816
+ ssize_t cpu_show_vmscape (struct device * dev , struct device_attribute * attr , char * buf )
3817
+ {
3818
+ return cpu_show_common (dev , attr , buf , X86_BUG_VMSCAPE );
3819
+ }
3730
3820
#endif
3731
3821
3732
3822
void __warn_thunk (void )
0 commit comments