Skip to content
This repository was archived by the owner on Jan 28, 2023. It is now read-only.

Commit a5da3c3

Browse files
authored
Merge pull request #117 from intel/vm_exit_optimizition
avoid unnecessary load_vmcs calls in VM exit
2 parents 56f1ee2 + 96af3d2 commit a5da3c3

File tree

4 files changed

+84
-66
lines changed

4 files changed

+84
-66
lines changed

core/cpu.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -480,6 +480,10 @@ int cpu_vmx_execute(struct vcpu_t *vcpu, struct hax_tunnel *htun)
480480
vcpu, VM_EXIT_INFO_IDT_VECTORING);
481481
vmx(vcpu, exit_instr_length) = vmread(
482482
vcpu, VM_EXIT_INFO_INSTRUCTION_LENGTH);
483+
vmx(vcpu, exit_gpa) = vmread(
484+
vcpu, VM_EXIT_INFO_GUEST_PHYSICAL_ADDRESS);
485+
vmx(vcpu, interruptibility_state).raw = vmread(
486+
vcpu, GUEST_INTERRUPTIBILITY);
483487

484488
state->_rflags = vmread(vcpu, GUEST_RFLAGS);
485489
state->_rsp = vmread(vcpu, GUEST_RSP);

core/include/vcpu.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,8 @@ struct vcpu_vmx_data {
9898
exit_reason_t exit_reason;
9999
exit_qualification_t exit_qualification;
100100
interruptibility_state_t interruptibility_state;
101+
102+
uint64_t exit_gpa;
101103
};
102104

103105
/* Information saved by instruction decoder and used by post-MMIO handler */
@@ -193,7 +195,12 @@ struct vcpu_t {
193195
uint64_t vmcs_pending_guest_cr3 : 1;
194196
uint64_t debug_control_dirty : 1;
195197
uint64_t dr_dirty : 1;
196-
uint64_t padding : 51;
198+
uint64_t rflags_dirty : 1;
199+
uint64_t rip_dirty : 1;
200+
uint64_t fs_base_dirty : 1;
201+
uint64_t interruptibility_dirty : 1;
202+
uint64_t pcpu_ctls_dirty : 1;
203+
uint64_t padding : 46;
197204
};
198205

199206
/* For TSC offseting feature*/

core/intr_exc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ uint hax_intr_is_blocked(struct vcpu_t *vcpu)
125125
if (!(state->_eflags & EFLAGS_IF))
126126
return 1;
127127

128-
intr_status = vmread(vcpu, GUEST_INTERRUPTIBILITY);
128+
intr_status = vmx(vcpu, interruptibility_state).raw;
129129
if (intr_status & 3)
130130
return 1;
131131
return 0;

core/vcpu.c

Lines changed: 71 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -1055,22 +1055,50 @@ static inline bool is_thread_migrated(struct vcpu_t *vcpu)
10551055
return (vcpu->cpu_id != vcpu->prev_cpu_id);
10561056
}
10571057

1058-
static void load_guest_rip_rflags(struct vcpu_t *vcpu)
1058+
static void load_dirty_vmcs_fields(struct vcpu_t *vcpu)
10591059
{
1060-
// TODO vmcs rip rflags could be loaded here in dirty case
10611060
struct vcpu_state_t *state = vcpu->state;
10621061

1062+
// rflags
10631063
if (vcpu->debug_control_dirty) {
1064-
state->_rflags = vmread(vcpu, GUEST_RFLAGS);
10651064
// Single-stepping
10661065
if (vcpu->debug_control & HAX_DEBUG_STEP) {
10671066
state->_rflags |= EFLAGS_TF;
10681067
} else {
10691068
state->_rflags &= ~EFLAGS_TF;
10701069
}
1071-
vmwrite(vcpu, GUEST_RFLAGS, state->_rflags);
1070+
vcpu->rflags_dirty = 1;
10721071
vcpu->debug_control_dirty = 0;
10731072
}
1073+
if (vcpu->rflags_dirty) {
1074+
vmwrite(vcpu, GUEST_RFLAGS, state->_rflags);
1075+
vcpu->rflags_dirty = 0;
1076+
}
1077+
1078+
// interruptibility
1079+
if (vcpu->interruptibility_dirty) {
1080+
vmwrite(vcpu, GUEST_INTERRUPTIBILITY,
1081+
vmx(vcpu, interruptibility_state).raw);
1082+
vcpu->interruptibility_dirty = 0;
1083+
}
1084+
1085+
// rip
1086+
if (vcpu->rip_dirty) {
1087+
vmwrite(vcpu, GUEST_RIP, state->_rip);
1088+
vcpu->rip_dirty = 0;
1089+
}
1090+
1091+
// primary cpu ctrl
1092+
if (vcpu->pcpu_ctls_dirty) {
1093+
vmwrite(vcpu, VMX_PRIMARY_PROCESSOR_CONTROLS, vmx(vcpu, pcpu_ctls));
1094+
vcpu->pcpu_ctls_dirty = 0;
1095+
}
1096+
1097+
// FS base
1098+
if (vcpu->fs_base_dirty) {
1099+
vmwrite(vcpu, GUEST_FS_BASE, vcpu->state->_fs.base);
1100+
vcpu->fs_base_dirty = 0;
1101+
}
10741102
}
10751103

10761104
static inline bool is_guest_dr_dirty(struct vcpu_t *vcpu)
@@ -1511,7 +1539,7 @@ void vcpu_load_guest_state(struct vcpu_t *vcpu)
15111539

15121540
load_guest_dr(vcpu);
15131541

1514-
load_guest_rip_rflags(vcpu);
1542+
load_dirty_vmcs_fields(vcpu);
15151543
}
15161544

15171545
/*
@@ -1711,55 +1739,16 @@ int vtlb_active(struct vcpu_t *vcpu)
17111739
static void advance_rip(struct vcpu_t *vcpu)
17121740
{
17131741
struct vcpu_state_t *state = vcpu->state;
1714-
preempt_flag flags;
1715-
uint32_t interruptibility = vmread(vcpu, GUEST_INTERRUPTIBILITY);
1716-
uint32_t vmcs_err = 0;
1717-
if ((vmcs_err = load_vmcs(vcpu, &flags))) {
1718-
hax_panic_vcpu(vcpu, "load_vmcs while advance_rip: %x", vmcs_err);
1719-
hax_panic_log(vcpu);
1720-
return;
1721-
}
1742+
uint32_t interruptibility = vmx(vcpu, interruptibility_state).raw;
17221743

17231744
if (interruptibility & 3u) {
17241745
interruptibility &= ~3u;
1725-
vmwrite(vcpu, GUEST_INTERRUPTIBILITY, interruptibility);
1746+
vmx(vcpu, interruptibility_state).raw = interruptibility;
1747+
vcpu->interruptibility_dirty = 1;
17261748
}
1727-
state->_rip += vmread(vcpu, VM_EXIT_INFO_INSTRUCTION_LENGTH);
1728-
vmwrite(vcpu, GUEST_RIP, state->_rip);
17291749

1730-
if ((vmcs_err = put_vmcs(vcpu, &flags))) {
1731-
hax_panic_vcpu(vcpu, "put_vmcs while advance_rip: %x\n", vmcs_err);
1732-
hax_panic_log(vcpu);
1733-
}
1734-
}
1735-
1736-
static void advance_rip_step(struct vcpu_t *vcpu, int step)
1737-
{
1738-
struct vcpu_state_t *state = vcpu->state;
1739-
preempt_flag flags;
1740-
uint32_t interruptibility = vmread(vcpu, GUEST_INTERRUPTIBILITY);
1741-
uint32_t vmcs_err = 0;
1742-
if ((vmcs_err = load_vmcs(vcpu, &flags))) {
1743-
hax_panic_vcpu(vcpu, "load_vmcs while advance_rip_step: %x\n",
1744-
vmcs_err);
1745-
hax_panic_log(vcpu);
1746-
return;
1747-
}
1748-
1749-
if (interruptibility & 3u) {
1750-
interruptibility &= ~3u;
1751-
vmwrite(vcpu, GUEST_INTERRUPTIBILITY, interruptibility);
1752-
}
1753-
if (step) {
1754-
state->_rip += step;
1755-
vmwrite(vcpu, GUEST_RIP, state->_rip);
1756-
}
1757-
1758-
if ((vmcs_err = put_vmcs(vcpu, &flags))) {
1759-
hax_panic_vcpu(vcpu, "put_vmcs() while advance_rip_step: %x\n",
1760-
vmcs_err);
1761-
hax_panic_log(vcpu);
1762-
}
1750+
state->_rip += vmx(vcpu, exit_instr_length);
1751+
vcpu->rip_dirty = 1;
17631752
}
17641753

17651754
void vcpu_vmread_all(struct vcpu_t *vcpu)
@@ -1779,14 +1768,19 @@ void vcpu_vmread_all(struct vcpu_t *vcpu)
17791768
return;
17801769
}
17811770

1782-
state->_rip = vmread(vcpu, GUEST_RIP);
1783-
state->_rflags = vmread(vcpu, GUEST_RFLAGS);
1771+
if (!vcpu->rip_dirty)
1772+
state->_rip = vmread(vcpu, GUEST_RIP);
1773+
1774+
if (!vcpu->rflags_dirty)
1775+
state->_rflags = vmread(vcpu, GUEST_RFLAGS);
1776+
17841777
state->_rsp = vmread(vcpu, GUEST_RSP);
17851778

17861779
VMREAD_SEG(vcpu, CS, state->_cs);
17871780
VMREAD_SEG(vcpu, DS, state->_ds);
17881781
VMREAD_SEG(vcpu, ES, state->_es);
1789-
VMREAD_SEG(vcpu, FS, state->_fs);
1782+
if (!vcpu->fs_base_dirty)
1783+
VMREAD_SEG(vcpu, FS, state->_fs);
17901784
VMREAD_SEG(vcpu, GS, state->_gs);
17911785
VMREAD_SEG(vcpu, SS, state->_ss);
17921786
VMREAD_SEG(vcpu, LDTR, state->_ldt);
@@ -1810,13 +1804,16 @@ void vcpu_vmwrite_all(struct vcpu_t *vcpu, int force_tlb_flush)
18101804
struct vcpu_state_t *state = vcpu->state;
18111805

18121806
vmwrite(vcpu, GUEST_RIP, state->_rip);
1807+
vcpu->rip_dirty = 0;
18131808
vmwrite(vcpu, GUEST_RFLAGS, state->_rflags);
1809+
vcpu->rflags_dirty = 0;
18141810
vmwrite(vcpu, GUEST_RSP, state->_rsp);
18151811

18161812
VMWRITE_SEG(vcpu, CS, state->_cs);
18171813
VMWRITE_SEG(vcpu, DS, state->_ds);
18181814
VMWRITE_SEG(vcpu, ES, state->_es);
18191815
VMWRITE_SEG(vcpu, FS, state->_fs);
1816+
vcpu->fs_base_dirty = 0;
18201817
VMWRITE_SEG(vcpu, GS, state->_gs);
18211818
VMWRITE_SEG(vcpu, SS, state->_ss);
18221819
VMWRITE_SEG(vcpu, LDTR, state->_ldt);
@@ -2015,8 +2012,8 @@ static void vmwrite_cr(struct vcpu_t *vcpu)
20152012
entry_ctls |= ENTRY_CONTROL_LONG_MODE_GUEST;
20162013
}
20172014
if (pcpu_ctls != vmx(vcpu, pcpu_ctls)) {
2018-
vmwrite(vcpu, VMX_PRIMARY_PROCESSOR_CONTROLS,
2019-
vmx(vcpu, pcpu_ctls) = pcpu_ctls);
2015+
vmx(vcpu, pcpu_ctls) = pcpu_ctls;
2016+
vcpu->pcpu_ctls_dirty = 1;
20202017
}
20212018
if (scpu_ctls != vmx(vcpu, scpu_ctls)) {
20222019
vmwrite(vcpu, VMX_SECONDARY_PROCESSOR_CONTROLS,
@@ -2198,7 +2195,7 @@ void vcpu_write_rflags(void *obj, uint64_t value)
21982195
{
21992196
struct vcpu_t *vcpu = obj;
22002197
vcpu->state->_rflags = value;
2201-
vmwrite(vcpu, GUEST_RFLAGS, vcpu->state->_rflags);
2198+
vcpu->rflags_dirty = 1;
22022199
}
22032200

22042201
static uint64_t vcpu_get_segment_base(void *obj, uint32_t segment)
@@ -2235,7 +2232,7 @@ static em_status_t vcpu_read_memory(void *obj, uint64_t ea, uint64_t *value,
22352232
uint64_t pa;
22362233

22372234
if (flags & EM_OPS_NO_TRANSLATION) {
2238-
pa = vmread(vcpu, VM_EXIT_INFO_GUEST_PHYSICAL_ADDRESS);
2235+
pa = vmx(vcpu, exit_gpa);
22392236
} else {
22402237
vcpu_translate(vcpu, ea, 0, &pa, NULL, false);
22412238
}
@@ -2273,7 +2270,7 @@ static em_status_t vcpu_write_memory(void *obj, uint64_t ea, uint64_t *value,
22732270
uint64_t pa;
22742271

22752272
if (flags & EM_OPS_NO_TRANSLATION) {
2276-
pa = vmread(vcpu, VM_EXIT_INFO_GUEST_PHYSICAL_ADDRESS);
2273+
pa = vmx(vcpu, exit_gpa);
22772274
} else {
22782275
vcpu_translate(vcpu, ea, 0, &pa, NULL, false);
22792276
}
@@ -2452,7 +2449,7 @@ static int exit_interrupt_window(struct vcpu_t *vcpu, struct hax_tunnel *htun)
24522449
vmx(vcpu, exit_reason).basic_reason == VMX_EXIT_PENDING_INTERRUPT
24532450
? ~INTERRUPT_WINDOW_EXITING : ~NMI_WINDOW_EXITING;
24542451

2455-
vmwrite(vcpu, VMX_PRIMARY_PROCESSOR_CONTROLS, vmx(vcpu, pcpu_ctls));
2452+
vcpu->pcpu_ctls_dirty = 1;
24562453
htun->_exit_reason = vmx(vcpu, exit_reason).basic_reason;
24572454
return HAX_RESUME;
24582455
}
@@ -3296,7 +3293,10 @@ static int handle_msr_read(struct vcpu_t *vcpu, uint32_t msr, uint64_t *val)
32963293
break;
32973294
}
32983295
case IA32_FS_BASE: {
3299-
*val = vmread(vcpu, GUEST_FS_BASE);
3296+
if (vcpu->fs_base_dirty)
3297+
*val = vcpu->state->_fs.base;
3298+
else
3299+
*val = vmread(vcpu, GUEST_FS_BASE);
33003300
break;
33013301
}
33023302
case IA32_GS_BASE: {
@@ -3567,7 +3567,14 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val)
35673567
break;
35683568
}
35693569
case IA32_FS_BASE: {
3570-
vmwrite(vcpu, GUEST_FS_BASE, val);
3570+
/*
3571+
* During Android emulator running, there are a lot of FS_BASE
3572+
* msr write. To avoid unnecessary vmcs loading/putting, don't
3573+
* write it to vmcs until right before next VM entry, when the
3574+
* VMCS region has been loaded into memory.
3575+
*/
3576+
vcpu->state->_fs.base = val;
3577+
vcpu->fs_base_dirty = 1;
35713578
break;
35723579
}
35733580
case IA32_GS_BASE: {
@@ -3672,7 +3679,7 @@ static int exit_ept_misconfiguration(struct vcpu_t *vcpu,
36723679

36733680
htun->_exit_reason = vmx(vcpu, exit_reason).basic_reason;
36743681
#ifdef CONFIG_HAX_EPT2
3675-
gpa = vmread(vcpu, VM_EXIT_INFO_GUEST_PHYSICAL_ADDRESS);
3682+
gpa = vmx(vcpu, exit_gpa);
36763683
ret = ept_handle_misconfiguration(&vcpu->vm->gpa_space, &vcpu->vm->ept_tree,
36773684
gpa);
36783685
if (ret > 0) {
@@ -3704,7 +3711,7 @@ static int exit_ept_violation(struct vcpu_t *vcpu, struct hax_tunnel *htun)
37043711
return HAX_RESUME;
37053712
}
37063713

3707-
gpa = vmread(vcpu, VM_EXIT_INFO_GUEST_PHYSICAL_ADDRESS);
3714+
gpa = vmx(vcpu, exit_gpa);
37083715

37093716
#ifdef CONFIG_HAX_EPT2
37103717
ret = ept_handle_access_violation(&vcpu->vm->gpa_space, &vcpu->vm->ept_tree,
@@ -3862,11 +3869,11 @@ int vcpu_set_regs(struct vcpu_t *vcpu, struct vcpu_state_t *ustate)
38623869

38633870
if (state->_rip != ustate->_rip) {
38643871
state->_rip = ustate->_rip;
3865-
vmwrite(vcpu, GUEST_RIP, state->_rip);
3872+
vcpu->rip_dirty = 1;
38663873
}
38673874
if (state->_rflags != ustate->_rflags) {
38683875
state->_rflags = ustate->_rflags;
3869-
vmwrite(vcpu, GUEST_RFLAGS, state->_rflags);
3876+
vcpu->rflags_dirty = 1;
38703877
}
38713878
if (rsp_dirty) {
38723879
state->_rsp = ustate->_rsp;

0 commit comments

Comments
 (0)