Skip to content

Commit 5c99a68

Browse files
committed
Merge tag 'kvmarm-fixes-6.13-3' of https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 changes for 6.13, part #3 - Always check page state in hyp_ack_unshare() - Align set_id_regs selftest with the fact that ASIDBITS field is RO - Various vPMU fixes for bugs that only affect nested virt
2 parents 71b7bf1 + e96d8b8 commit 5c99a68

File tree

5 files changed

+62
-69
lines changed

5 files changed

+62
-69
lines changed

arch/arm64/kvm/hyp/nvhe/mem_protect.c

-3
Original file line numberDiff line numberDiff line change
@@ -783,9 +783,6 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
783783
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
784784
return -EBUSY;
785785

786-
if (__hyp_ack_skip_pgtable_check(tx))
787-
return 0;
788-
789786
return __hyp_check_page_state_range(addr, size,
790787
PKVM_PAGE_SHARED_BORROWED);
791788
}

arch/arm64/kvm/pmu-emul.c

+35-54
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ static DEFINE_MUTEX(arm_pmus_lock);
2424

2525
static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
2626
static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
27+
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
2728

2829
static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
2930
{
@@ -327,65 +328,44 @@ u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
327328
return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
328329
}
329330

330-
/**
331-
* kvm_pmu_enable_counter_mask - enable selected PMU counters
332-
* @vcpu: The vcpu pointer
333-
* @val: the value guest writes to PMCNTENSET register
334-
*
335-
* Call perf_event_enable to start counting the perf event
336-
*/
337-
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
331+
static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc)
338332
{
339-
int i;
340-
if (!kvm_vcpu_has_pmu(vcpu))
341-
return;
342-
343-
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val)
333+
if (!pmc->perf_event) {
334+
kvm_pmu_create_perf_event(pmc);
344335
return;
336+
}
345337

346-
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
347-
struct kvm_pmc *pmc;
348-
349-
if (!(val & BIT(i)))
350-
continue;
351-
352-
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
338+
perf_event_enable(pmc->perf_event);
339+
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
340+
kvm_debug("fail to enable perf event\n");
341+
}
353342

354-
if (!pmc->perf_event) {
355-
kvm_pmu_create_perf_event(pmc);
356-
} else {
357-
perf_event_enable(pmc->perf_event);
358-
if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
359-
kvm_debug("fail to enable perf event\n");
360-
}
361-
}
343+
static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc)
344+
{
345+
if (pmc->perf_event)
346+
perf_event_disable(pmc->perf_event);
362347
}
363348

364-
/**
365-
* kvm_pmu_disable_counter_mask - disable selected PMU counters
366-
* @vcpu: The vcpu pointer
367-
* @val: the value guest writes to PMCNTENCLR register
368-
*
369-
* Call perf_event_disable to stop counting the perf event
370-
*/
371-
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
349+
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val)
372350
{
373351
int i;
374352

375353
if (!kvm_vcpu_has_pmu(vcpu) || !val)
376354
return;
377355

378356
for (i = 0; i < KVM_ARMV8_PMU_MAX_COUNTERS; i++) {
379-
struct kvm_pmc *pmc;
357+
struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
380358

381359
if (!(val & BIT(i)))
382360
continue;
383361

384-
pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
385-
386-
if (pmc->perf_event)
387-
perf_event_disable(pmc->perf_event);
362+
if (kvm_pmu_counter_is_enabled(pmc))
363+
kvm_pmc_enable_perf_event(pmc);
364+
else
365+
kvm_pmc_disable_perf_event(pmc);
388366
}
367+
368+
kvm_vcpu_pmu_restore_guest(vcpu);
389369
}
390370

391371
/*
@@ -626,27 +606,28 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
626606
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
627607
val &= ~ARMV8_PMU_PMCR_LP;
628608

609+
/* Request a reload of the PMU to enable/disable affected counters */
610+
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) ^ val) & ARMV8_PMU_PMCR_E)
611+
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
612+
629613
/* The reset bits don't indicate any state, and shouldn't be saved. */
630614
__vcpu_sys_reg(vcpu, PMCR_EL0) = val & ~(ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_P);
631615

632-
if (val & ARMV8_PMU_PMCR_E) {
633-
kvm_pmu_enable_counter_mask(vcpu,
634-
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
635-
} else {
636-
kvm_pmu_disable_counter_mask(vcpu,
637-
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
638-
}
639-
640616
if (val & ARMV8_PMU_PMCR_C)
641617
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
642618

643619
if (val & ARMV8_PMU_PMCR_P) {
644-
unsigned long mask = kvm_pmu_accessible_counter_mask(vcpu);
645-
mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
620+
/*
621+
* Unlike other PMU sysregs, the controls in PMCR_EL0 always apply
622+
* to the 'guest' range of counters and never the 'hyp' range.
623+
*/
624+
unsigned long mask = kvm_pmu_implemented_counter_mask(vcpu) &
625+
~kvm_pmu_hyp_counter_mask(vcpu) &
626+
~BIT(ARMV8_PMU_CYCLE_IDX);
627+
646628
for_each_set_bit(i, &mask, 32)
647629
kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
648630
}
649-
kvm_vcpu_pmu_restore_guest(vcpu);
650631
}
651632

652633
static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
@@ -910,11 +891,11 @@ void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu)
910891
{
911892
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
912893

913-
kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu));
914-
915894
__vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask;
916895
__vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask;
917896
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask;
897+
898+
kvm_pmu_reprogram_counter_mask(vcpu, mask);
918899
}
919900

920901
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)

arch/arm64/kvm/sys_regs.c

+25-7
Original file line numberDiff line numberDiff line change
@@ -1208,16 +1208,14 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
12081208
mask = kvm_pmu_accessible_counter_mask(vcpu);
12091209
if (p->is_write) {
12101210
val = p->regval & mask;
1211-
if (r->Op2 & 0x1) {
1211+
if (r->Op2 & 0x1)
12121212
/* accessing PMCNTENSET_EL0 */
12131213
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1214-
kvm_pmu_enable_counter_mask(vcpu, val);
1215-
kvm_vcpu_pmu_restore_guest(vcpu);
1216-
} else {
1214+
else
12171215
/* accessing PMCNTENCLR_EL0 */
12181216
__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1219-
kvm_pmu_disable_counter_mask(vcpu, val);
1220-
}
1217+
1218+
kvm_pmu_reprogram_counter_mask(vcpu, val);
12211219
} else {
12221220
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
12231221
}
@@ -2450,6 +2448,26 @@ static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
24502448
return __el2_visibility(vcpu, rd, s1pie_visibility);
24512449
}
24522450

2451+
static bool access_mdcr(struct kvm_vcpu *vcpu,
2452+
struct sys_reg_params *p,
2453+
const struct sys_reg_desc *r)
2454+
{
2455+
u64 old = __vcpu_sys_reg(vcpu, MDCR_EL2);
2456+
2457+
if (!access_rw(vcpu, p, r))
2458+
return false;
2459+
2460+
/*
2461+
* Request a reload of the PMU to enable/disable the counters affected
2462+
* by HPME.
2463+
*/
2464+
if ((old ^ __vcpu_sys_reg(vcpu, MDCR_EL2)) & MDCR_EL2_HPME)
2465+
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
2466+
2467+
return true;
2468+
}
2469+
2470+
24532471
/*
24542472
* Architected system registers.
24552473
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -2983,7 +3001,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
29833001
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
29843002
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
29853003
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
2986-
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
3004+
EL2_REG(MDCR_EL2, access_mdcr, reset_val, 0),
29873005
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
29883006
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
29893007
EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),

include/kvm/arm_pmu.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
5353
void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
5454
void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
5555
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
56-
void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
57-
void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
56+
void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val);
5857
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
5958
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
6059
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
@@ -127,8 +126,7 @@ static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
127126
static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
128127
static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
129128
static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
130-
static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
131-
static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
129+
static inline void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
132130
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
133131
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
134132
static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)

tools/testing/selftests/kvm/aarch64/set_id_regs.c

-1
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,6 @@ static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
152152
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
153153
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
154154
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
155-
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
156155
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
157156
REG_FTR_END,
158157
};

0 commit comments

Comments
 (0)