From 5c646b7e1d8bcb12317426287c516dfa4c5171c2 Mon Sep 17 00:00:00 2001 From: Yanan Wang Date: Wed, 2 Dec 2020 04:10:32 +0800 Subject: [PATCH 1/3] KVM: arm64: Fix memory leak on stage2 update of a valid PTE When installing a new leaf PTE onto an invalid ptep, we need to get_page(ptep) to account for the new mapping. However, simply updating a valid PTE shouldn't result in any additional refcounting, as there is new mapping. This otherwise results in a page being forever wasted. Address this by fixing-up the refcount in stage2_map_walker_try_leaf() if the PTE was already valid, balancing out the later get_page() in stage2_map_walk_leaf(). Signed-off-by: Yanan Wang [maz: update commit message, add comment in the code] Signed-off-by: Marc Zyngier Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201201201034.116760-2-wangyanan55@huawei.com --- arch/arm64/kvm/hyp/pgtable.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 0271b4a3b9fe18..2beba1dc40ec5c 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -470,6 +470,15 @@ static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level, if (!kvm_block_mapping_supported(addr, end, phys, level)) return false; + /* + * If the PTE was already valid, drop the refcount on the table + * early, as it will be bumped-up again in stage2_map_walk_leaf(). + * This ensures that the refcount stays constant across a valid to + * valid PTE update. + */ + if (kvm_pte_valid(*ptep)) + put_page(virt_to_page(ptep)); + if (kvm_set_valid_leaf_pte(ptep, phys, data->attr, level)) goto out; From 3a0b870e3448302ca2ba703bea1b79b61c3f33c6 Mon Sep 17 00:00:00 2001 From: Yanan Wang Date: Wed, 2 Dec 2020 04:10:33 +0800 Subject: [PATCH 2/3] KVM: arm64: Fix handling of merging tables into a block entry When dirty logging is enabled, we collapse block entries into tables as necessary. If dirty logging gets canceled, we can end-up merging tables back into block entries. When this happens, we must not only free the non-huge page-table pages but also invalidate all the TLB entries that can potentially cover the block. Otherwise, we end-up with multiple possible translations for the same physical page, which can legitimately result in a TLB conflict. To address this, replease the bogus invalidation by IPA with a full VM invalidation. Although this is pretty heavy handed, it happens very infrequently and saves a bunch of invalidations by IPA. Signed-off-by: Yanan Wang [maz: fixup commit message] Signed-off-by: Marc Zyngier Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201201201034.116760-3-wangyanan55@huawei.com --- arch/arm64/kvm/hyp/pgtable.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 2beba1dc40ec5c..bdf8e55ed308e7 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -502,7 +502,13 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level, return 0; kvm_set_invalid_pte(ptep); - kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, 0); + + /* + * Invalidate the whole stage-2, as we may have numerous leaf + * entries below us which would otherwise need invalidating + * individually. + */ + kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu); data->anchor = ptep; return 0; } From 7d894834a305568a0168c55d4729216f5f8cb4e6 Mon Sep 17 00:00:00 2001 From: Yanan Wang Date: Wed, 2 Dec 2020 04:10:34 +0800 Subject: [PATCH 3/3] KVM: arm64: Add usage of stage 2 fault lookup level in user_mem_abort() If we get a FSC_PERM fault, just using (logging_active && writable) to determine calling kvm_pgtable_stage2_map(). There will be two more cases we should consider. (1) After logging_active is configged back to false from true. When we get a FSC_PERM fault with write_fault and adjustment of hugepage is needed, we should merge tables back to a block entry. This case is ignored by still calling kvm_pgtable_stage2_relax_perms(), which will lead to an endless loop and guest panic due to soft lockup. (2) We use (FSC_PERM && logging_active && writable) to determine collapsing a block entry into a table by calling kvm_pgtable_stage2_map(). But sometimes we may only need to relax permissions when trying to write to a page other than a block. In this condition,using kvm_pgtable_stage2_relax_perms() will be fine. The ISS filed bit[1:0] in ESR_EL2 regesiter indicates the stage2 lookup level at which a D-abort or I-abort occurred. By comparing granule of the fault lookup level with vma_pagesize, we can strictly distinguish conditions of calling kvm_pgtable_stage2_relax_perms() or kvm_pgtable_stage2_map(), and the above two cases will be well considered. Suggested-by: Keqian Zhu Signed-off-by: Yanan Wang Signed-off-by: Marc Zyngier Acked-by: Will Deacon Link: https://lore.kernel.org/r/20201201201034.116760-4-wangyanan55@huawei.com --- arch/arm64/include/asm/esr.h | 1 + arch/arm64/include/asm/kvm_emulate.h | 5 +++++ arch/arm64/kvm/mmu.c | 11 +++++++++-- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 22c81f1edda2a1..85a3e49f92f4aa 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -104,6 +104,7 @@ /* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */ #define ESR_ELx_FSC (0x3F) #define ESR_ELx_FSC_TYPE (0x3C) +#define ESR_ELx_FSC_LEVEL (0x03) #define ESR_ELx_FSC_EXTABT (0x10) #define ESR_ELx_FSC_SERROR (0x11) #define ESR_ELx_FSC_ACCESS (0x08) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 5ef2669ccd6c39..00bc6f1234ba3e 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -350,6 +350,11 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vc return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE; } +static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL; +} + static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu) { switch (kvm_vcpu_trap_get_fault(vcpu)) { diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 1a01da9fdc99cd..75814a02d1894b 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -754,10 +754,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, gfn_t gfn; kvm_pfn_t pfn; bool logging_active = memslot_is_logging(memslot); - unsigned long vma_pagesize; + unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu); + unsigned long vma_pagesize, fault_granule; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; struct kvm_pgtable *pgt; + fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); write_fault = kvm_is_write_fault(vcpu); exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); @@ -896,7 +898,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) prot |= KVM_PGTABLE_PROT_X; - if (fault_status == FSC_PERM && !(logging_active && writable)) { + /* + * Under the premise of getting a FSC_PERM fault, we just need to relax + * permissions only if vma_pagesize equals fault_granule. Otherwise, + * kvm_pgtable_stage2_map() should be called to change block size. + */ + if (fault_status == FSC_PERM && vma_pagesize == fault_granule) { ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); } else { ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,