Skip to content

Commit fae2211

Browse files
kvaneeshmpe
authored andcommitted
powerpc/mm: Fix crashes with 16G huge pages
To support memory keys, we moved the hash pte slot information to the second half of the page table. This was ok with PTE entries at level 4 (PTE page) and level 3 (PMD). We already allocate larger page table pages at those levels to accomodate extra details. For level 4 we already have the extra space which was used to track 4k hash page table entry details and at level 3 the extra space was allocated to track the THP details. With hugetlbfs PTE, we used this extra space at the PMD level to store the slot details. But we also support hugetlbfs PTE at PUD level for 16GB pages and PUD level page didn't allocate extra space. This resulted in memory corruption. Fix this by allocating extra space at PUD level when HUGETLB is enabled. Fixes: bf9a95f ("powerpc: Free up four 64K PTE bits in 64K backed HPTE pages") Signed-off-by: Aneesh Kumar K.V <[email protected]> Reviewed-by: Ram Pai <[email protected]> Signed-off-by: Michael Ellerman <[email protected]>
1 parent 62e984d commit fae2211

File tree

11 files changed

+29
-5
lines changed

11 files changed

+29
-5
lines changed

arch/powerpc/include/asm/book3s/32/pgtable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
1717

1818
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
19+
#define PUD_CACHE_INDEX PUD_INDEX_SIZE
1920

2021
#ifndef __ASSEMBLY__
2122
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)

arch/powerpc/include/asm/book3s/64/hash-64k.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,12 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
146146
#else
147147
#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
148148
#endif
149+
#ifdef CONFIG_HUGETLB_PAGE
150+
#define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \
151+
(sizeof(unsigned long) << PUD_INDEX_SIZE))
152+
#else
149153
#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
154+
#endif
150155
#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
151156

152157
#ifdef CONFIG_TRANSPARENT_HUGEPAGE

arch/powerpc/include/asm/book3s/64/hash.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,16 @@
3232
#else
3333
#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
3434
#endif
35+
/*
36+
* We store the slot details in the second half of page table.
37+
* Increase the pud level table so that hugetlb ptes can be stored
38+
* at pud level.
39+
*/
40+
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES)
41+
#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1)
42+
#else
43+
#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE)
44+
#endif
3545
/*
3646
* Define the address range of the kernel non-linear virtual area
3747
*/

arch/powerpc/include/asm/book3s/64/pgalloc.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,13 +93,13 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
9393

9494
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
9595
{
96-
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
96+
return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
9797
pgtable_gfp_flags(mm, GFP_KERNEL));
9898
}
9999

100100
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
101101
{
102-
kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
102+
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
103103
}
104104

105105
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -115,7 +115,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
115115
* ahead and flush the page walk cache
116116
*/
117117
flush_tlb_pgtable(tlb, address);
118-
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
118+
pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
119119
}
120120

121121
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)

arch/powerpc/include/asm/book3s/64/pgtable.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -232,11 +232,13 @@ extern unsigned long __pmd_index_size;
232232
extern unsigned long __pud_index_size;
233233
extern unsigned long __pgd_index_size;
234234
extern unsigned long __pmd_cache_index;
235+
extern unsigned long __pud_cache_index;
235236
#define PTE_INDEX_SIZE __pte_index_size
236237
#define PMD_INDEX_SIZE __pmd_index_size
237238
#define PUD_INDEX_SIZE __pud_index_size
238239
#define PGD_INDEX_SIZE __pgd_index_size
239240
#define PMD_CACHE_INDEX __pmd_cache_index
241+
#define PUD_CACHE_INDEX __pud_cache_index
240242
/*
241243
* Because of use of pte fragments and THP, size of page table
242244
* are not always derived out of index size above.

arch/powerpc/include/asm/nohash/32/pgtable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ extern int icache_44x_need_flush;
2424
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
2525

2626
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
27+
#define PUD_CACHE_INDEX PUD_INDEX_SIZE
2728

2829
#ifndef __ASSEMBLY__
2930
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)

arch/powerpc/include/asm/nohash/64/pgtable.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#else
2828
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
2929
#endif
30+
#define PUD_CACHE_INDEX PUD_INDEX_SIZE
3031

3132
/*
3233
* Define the address range of the kernel non-linear virtual area

arch/powerpc/mm/hash_utils_64.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1008,6 +1008,7 @@ void __init hash__early_init_mmu(void)
10081008
__pmd_index_size = H_PMD_INDEX_SIZE;
10091009
__pud_index_size = H_PUD_INDEX_SIZE;
10101010
__pgd_index_size = H_PGD_INDEX_SIZE;
1011+
__pud_cache_index = H_PUD_CACHE_INDEX;
10111012
__pmd_cache_index = H_PMD_CACHE_INDEX;
10121013
__pte_table_size = H_PTE_TABLE_SIZE;
10131014
__pmd_table_size = H_PMD_TABLE_SIZE;

arch/powerpc/mm/init-common.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,6 @@ void pgtable_cache_init(void)
100100
* same size as either the pgd or pmd index except with THP enabled
101101
* on book3s 64
102102
*/
103-
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
104-
pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
103+
if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX))
104+
pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor);
105105
}

arch/powerpc/mm/pgtable-radix.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -553,6 +553,7 @@ void __init radix__early_init_mmu(void)
553553
__pmd_index_size = RADIX_PMD_INDEX_SIZE;
554554
__pud_index_size = RADIX_PUD_INDEX_SIZE;
555555
__pgd_index_size = RADIX_PGD_INDEX_SIZE;
556+
__pud_cache_index = RADIX_PUD_INDEX_SIZE;
556557
__pmd_cache_index = RADIX_PMD_INDEX_SIZE;
557558
__pte_table_size = RADIX_PTE_TABLE_SIZE;
558559
__pmd_table_size = RADIX_PMD_TABLE_SIZE;

0 commit comments

Comments
 (0)