@@ -1229,6 +1229,9 @@ void vcpu_save_host_state(struct vcpu_t *vcpu)
1229
1229
vmwrite (vcpu , HOST_EFER , hstate -> _efer );
1230
1230
}
1231
1231
1232
+ hstate -> _pat = ia32_rdmsr (IA32_CR_PAT );
1233
+ vmwrite (vcpu , HOST_PAT , hstate -> _pat );
1234
+
1232
1235
#ifdef HAX_ARCH_X86_64
1233
1236
vmwrite (vcpu , HOST_CS_SELECTOR , get_kernel_cs ());
1234
1237
#else
@@ -1395,15 +1398,15 @@ static void fill_common_vmcs(struct vcpu_t *vcpu)
1395
1398
1396
1399
#ifdef HAX_ARCH_X86_64
1397
1400
exit_ctls = EXIT_CONTROL_HOST_ADDR_SPACE_SIZE | EXIT_CONTROL_LOAD_EFER |
1398
- EXIT_CONTROL_SAVE_DEBUG_CONTROLS ;
1401
+ EXIT_CONTROL_SAVE_DEBUG_CONTROLS | EXIT_CONTROL_LOAD_PAT ;
1399
1402
#endif
1400
1403
1401
1404
#ifdef HAX_ARCH_X86_32
1402
1405
if (is_compatible ()) {
1403
1406
exit_ctls = EXIT_CONTROL_HOST_ADDR_SPACE_SIZE | EXIT_CONTROL_LOAD_EFER |
1404
- EXIT_CONTROL_SAVE_DEBUG_CONTROLS ;
1407
+ EXIT_CONTROL_SAVE_DEBUG_CONTROLS | EXIT_CONTROL_LOAD_PAT ;
1405
1408
} else {
1406
- exit_ctls = EXIT_CONTROL_SAVE_DEBUG_CONTROLS ;
1409
+ exit_ctls = EXIT_CONTROL_SAVE_DEBUG_CONTROLS | EXIT_CONTROL_LOAD_PAT ;
1407
1410
}
1408
1411
#endif
1409
1412
@@ -1474,6 +1477,8 @@ static void fill_common_vmcs(struct vcpu_t *vcpu)
1474
1477
if (exit_ctls & EXIT_CONTROL_LOAD_EFER ) {
1475
1478
vmwrite (vcpu , HOST_EFER , ia32_rdmsr (IA32_EFER ));
1476
1479
}
1480
+ vmwrite (vcpu , HOST_PAT , ia32_rdmsr (IA32_CR_PAT ));
1481
+ vmwrite (vcpu , GUEST_PAT , vcpu -> cr_pat );
1477
1482
1478
1483
WRITE_CONTROLS (vcpu , VMX_ENTRY_CONTROLS , entry_ctls );
1479
1484
@@ -2070,6 +2075,8 @@ static void vmwrite_cr(struct vcpu_t *vcpu)
2070
2075
entry_ctls &= ~ENTRY_CONTROL_LOAD_EFER ;
2071
2076
}
2072
2077
2078
+ entry_ctls |= ENTRY_CONTROL_LOAD_PAT ;
2079
+
2073
2080
if (pcpu_ctls != vmx (vcpu , pcpu_ctls )) {
2074
2081
vmx (vcpu , pcpu_ctls ) = pcpu_ctls ;
2075
2082
vcpu -> pcpu_ctls_dirty = 1 ;
@@ -2575,7 +2582,7 @@ static void handle_cpuid_virtual(struct vcpu_t *vcpu, uint32_t a, uint32_t c)
2575
2582
uint8_t physical_address_size ;
2576
2583
2577
2584
static uint32_t cpuid_1_features_edx =
2578
- // pat is disabled!
2585
+ FEATURE ( PAT ) |
2579
2586
FEATURE (FPU ) |
2580
2587
FEATURE (VME ) |
2581
2588
FEATURE (DE ) |
@@ -3605,6 +3612,15 @@ static int misc_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val)
3605
3612
return 1 ;
3606
3613
}
3607
3614
3615
+ static inline bool pat_valid (uint64_t val )
3616
+ {
3617
+ if (val & 0xF8F8F8F8F8F8F8F8 )
3618
+ return false;
3619
+
3620
+ /* 0, 1, 4, 5, 6, 7 are valid values. */
3621
+ return (val | ((val & 0x0202020202020202 ) << 1 )) == val ;
3622
+ }
3623
+
3608
3624
static int handle_msr_write (struct vcpu_t * vcpu , uint32_t msr , uint64_t val ,
3609
3625
bool by_host )
3610
3626
{
@@ -3763,7 +3779,15 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val,
3763
3779
break ;
3764
3780
}
3765
3781
case IA32_CR_PAT : {
3782
+ //Attempting to write an undefined memory type encoding into the PAT causes a
3783
+ //general-protection (#GP) exception to be generated.
3784
+ if (!pat_valid (val )) {
3785
+ r = 1 ;
3786
+ break ;
3787
+ }
3788
+
3766
3789
vcpu -> cr_pat = val ;
3790
+ vmwrite (vcpu , GUEST_PAT , vcpu -> cr_pat );
3767
3791
break ;
3768
3792
}
3769
3793
case IA32_MTRR_DEF_TYPE : {
0 commit comments