Skip to content

Commit

Permalink
x86, kfence: enable KFENCE for x86
Browse files Browse the repository at this point in the history
Add architecture specific implementation details for KFENCE and enable
KFENCE for the x86 architecture. In particular, this implements the
required interface in <asm/kfence.h> for setting up the pool and
providing helper functions for protecting and unprotecting pages.

For x86, we need to ensure that the pool uses 4K pages, which is done
using the set_memory_4k() helper function.

[[email protected]: add missing copyright and description header]
  Link: https://lkml.kernel.org/r/[email protected]

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Marco Elver <[email protected]>
Signed-off-by: Alexander Potapenko <[email protected]>
Reviewed-by: Dmitry Vyukov <[email protected]>
Co-developed-by: Marco Elver <[email protected]>
Reviewed-by: Jann Horn <[email protected]>
Cc: Andrey Konovalov <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Christopher Lameter <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Eric Dumazet <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
Cc: Hillf Danton <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Joern Engel <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: SeongJae Park <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Will Deacon <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
ramosian-glider authored and torvalds committed Feb 26, 2021
1 parent 0ce20dd commit 1dc0da6
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 0 deletions.
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,7 @@ config X86
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if X86_64
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
Expand Down
70 changes: 70 additions & 0 deletions arch/x86/include/asm/kfence.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* x86 KFENCE support.
*
* Copyright (C) 2020, Google LLC.
*/

#ifndef _ASM_X86_KFENCE_H
#define _ASM_X86_KFENCE_H

#include <linux/bug.h>
#include <linux/kfence.h>

#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>

/*
* The page fault handler entry function, up to which the stack trace is
* truncated in reports.
*/
#define KFENCE_SKIP_ARCH_FAULT_HANDLER "asm_exc_page_fault"

/* Force 4K pages for __kfence_pool. */
static inline bool arch_kfence_init_pool(void)
{
unsigned long addr;

for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
addr += PAGE_SIZE) {
unsigned int level;

if (!lookup_address(addr, &level))
return false;

if (level != PG_LEVEL_4K)
set_memory_4k(addr, 1);
}

return true;
}

/* Protect the given page and flush TLB. */
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
unsigned int level;
pte_t *pte = lookup_address(addr, &level);

if (WARN_ON(!pte || level != PG_LEVEL_4K))
return false;

/*
* We need to avoid IPIs, as we may get KFENCE allocations or faults
* with interrupts disabled. Therefore, the below is best-effort, and
* does not flush TLBs on all CPUs. We can tolerate some inaccuracy;
* lazy fault handling takes care of faults after the page is PRESENT.
*/

if (protect)
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
else
set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));

/* Flush this CPU's TLB. */
flush_tlb_one_kernel(addr);
return true;
}

#endif /* _ASM_X86_KFENCE_H */
5 changes: 5 additions & 0 deletions arch/x86/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <linux/kdebug.h> /* oops_begin/end, ... */
#include <linux/extable.h> /* search_exception_tables */
#include <linux/memblock.h> /* max_low_pfn */
#include <linux/kfence.h> /* kfence_handle_page_fault */
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
#include <linux/mmiotrace.h> /* kmmio_handler, ... */
#include <linux/perf_event.h> /* perf_sw_event */
Expand Down Expand Up @@ -680,6 +681,10 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code,
if (IS_ENABLED(CONFIG_EFI))
efi_crash_gracefully_on_page_fault(address);

/* Only not-present faults should be handled by KFENCE. */
if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address))
return;

oops:
/*
* Oops. The kernel tried to access some bad page. We'll have to
Expand Down

0 comments on commit 1dc0da6

Please sign in to comment.