Skip to content

Commit 4647c82

Browse files
committed
Merge tag 'kvmarm-fixes-6.14-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for 6.14, take #3 - Fix TCR_EL2 configuration to not use the ASID in TTBR1_EL2 and not mess-up T1SZ/PS by using the HCR_EL2.E2H==0 layout. - Bring back the VMID allocation to the vcpu_load phase, ensuring that we only setup VTTBR_EL2 once on VHE. This cures an ugly race that would lead to running with an unallocated VMID.
2 parents 0ad2507 + fa808ed commit 4647c82

File tree

4 files changed

+22
-30
lines changed

4 files changed

+22
-30
lines changed

arch/arm64/include/asm/kvm_arm.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@
119119
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
120120
#define TCR_EL2_T0SZ_MASK 0x3f
121121
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
122-
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
122+
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
123123

124124
/* VTCR_EL2 Registers bits */
125125
#define VTCR_EL2_DS TCR_EL2_DS

arch/arm64/include/asm/kvm_host.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1259,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
12591259
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
12601260
int __init kvm_arm_vmid_alloc_init(void);
12611261
void __init kvm_arm_vmid_alloc_free(void);
1262-
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
1262+
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
12631263
void kvm_arm_vmid_clear_active(void);
12641264

12651265
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)

arch/arm64/kvm/arm.c

+17-20
Original file line numberDiff line numberDiff line change
@@ -559,6 +559,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
559559
mmu = vcpu->arch.hw_mmu;
560560
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
561561

562+
/*
563+
* Ensure a VMID is allocated for the MMU before programming VTTBR_EL2,
564+
* which happens eagerly in VHE.
565+
*
566+
* Also, the VMID allocator only preserves VMIDs that are active at the
567+
* time of rollover, so KVM might need to grab a new VMID for the MMU if
568+
* this is called from kvm_sched_in().
569+
*/
570+
kvm_arm_vmid_update(&mmu->vmid);
571+
562572
/*
563573
* We guarantee that both TLBs and I-cache are private to each
564574
* vcpu. If detecting that a vcpu from the same VM has
@@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11381148
*/
11391149
preempt_disable();
11401150

1141-
/*
1142-
* The VMID allocator only tracks active VMIDs per
1143-
* physical CPU, and therefore the VMID allocated may not be
1144-
* preserved on VMID roll-over if the task was preempted,
1145-
* making a thread's VMID inactive. So we need to call
1146-
* kvm_arm_vmid_update() in non-premptible context.
1147-
*/
1148-
if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) &&
1149-
has_vhe())
1150-
__load_stage2(vcpu->arch.hw_mmu,
1151-
vcpu->arch.hw_mmu->arch);
1152-
11531151
kvm_pmu_flush_hwstate(vcpu);
11541152

11551153
local_irq_disable();
@@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void)
19801978
static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
19811979
{
19821980
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
1983-
unsigned long tcr, ips;
1981+
unsigned long tcr;
19841982

19851983
/*
19861984
* Calculate the raw per-cpu offset without a translation from the
@@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits)
19941992
params->mair_el2 = read_sysreg(mair_el1);
19951993

19961994
tcr = read_sysreg(tcr_el1);
1997-
ips = FIELD_GET(TCR_IPS_MASK, tcr);
19981995
if (cpus_have_final_cap(ARM64_KVM_HVHE)) {
1996+
tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK);
19991997
tcr |= TCR_EPD1_MASK;
20001998
} else {
1999+
unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr);
2000+
20012001
tcr &= TCR_EL2_MASK;
2002-
tcr |= TCR_EL2_RES1;
2002+
tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips);
2003+
if (lpa2_is_enabled())
2004+
tcr |= TCR_EL2_DS;
20032005
}
2004-
tcr &= ~TCR_T0SZ_MASK;
20052006
tcr |= TCR_T0SZ(hyp_va_bits);
2006-
tcr &= ~TCR_EL2_PS_MASK;
2007-
tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips);
2008-
if (lpa2_is_enabled())
2009-
tcr |= TCR_EL2_DS;
20102007
params->tcr_el2 = tcr;
20112008

20122009
params->pgd_pa = kvm_mmu_get_httbr();

arch/arm64/kvm/vmid.c

+3-8
Original file line numberDiff line numberDiff line change
@@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void)
135135
atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
136136
}
137137

138-
bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
138+
void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
139139
{
140140
unsigned long flags;
141141
u64 vmid, old_active_vmid;
142-
bool updated = false;
143142

144143
vmid = atomic64_read(&kvm_vmid->id);
145144

@@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
157156
if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
158157
0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
159158
old_active_vmid, vmid))
160-
return false;
159+
return;
161160

162161
raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
163162

164163
/* Check that our VMID belongs to the current generation. */
165164
vmid = atomic64_read(&kvm_vmid->id);
166-
if (!vmid_gen_match(vmid)) {
165+
if (!vmid_gen_match(vmid))
167166
vmid = new_vmid(kvm_vmid);
168-
updated = true;
169-
}
170167

171168
atomic64_set(this_cpu_ptr(&active_vmids), vmid);
172169
raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
173-
174-
return updated;
175170
}
176171

177172
/*

0 commit comments

Comments
 (0)