This is a note to let you know that I've just added the patch titled
x86/cpu_entry_area: Add debugstore entries to cpu_entry_area
to the 4.14-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
From 10043e02db7f8a4161f76434931051e7d797a5f6 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner tglx@linutronix.de Date: Mon, 4 Dec 2017 15:07:49 +0100 Subject: x86/cpu_entry_area: Add debugstore entries to cpu_entry_area
From: Thomas Gleixner tglx@linutronix.de
commit 10043e02db7f8a4161f76434931051e7d797a5f6 upstream.
The Intel PEBS/BTS debug store is a design trainwreck as it expects virtual addresses which must be visible in any execution context.
So it is required to make these mappings visible to user space when kernel page table isolation is active.
Provide enough room for the buffer mappings in the cpu_entry_area so the buffers are available in the user space visible page tables.
At the point where the kernel side entry area is populated there is no buffer available yet, but the kernel PMD must be populated. To achieve this set the entries for these buffers to non present.
Signed-off-by: Thomas Gleixner tglx@linutronix.de Cc: Andy Lutomirski luto@kernel.org Cc: Boris Ostrovsky boris.ostrovsky@oracle.com Cc: Borislav Petkov bp@alien8.de Cc: Brian Gerst brgerst@gmail.com Cc: Dave Hansen dave.hansen@linux.intel.com Cc: David Laight David.Laight@aculab.com Cc: Denys Vlasenko dvlasenk@redhat.com Cc: Eduardo Valentin eduval@amazon.com Cc: Greg KH gregkh@linuxfoundation.org Cc: H. Peter Anvin hpa@zytor.com Cc: Josh Poimboeuf jpoimboe@redhat.com Cc: Juergen Gross jgross@suse.com Cc: Linus Torvalds torvalds@linux-foundation.org Cc: Peter Zijlstra peterz@infradead.org Cc: Will Deacon will.deacon@arm.com Cc: aliguori@amazon.com Cc: daniel.gruss@iaik.tugraz.at Cc: hughd@google.com Cc: keescook@google.com Signed-off-by: Ingo Molnar mingo@kernel.org Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org
--- arch/x86/events/intel/ds.c | 5 ++-- arch/x86/events/perf_event.h | 21 +------------------ arch/x86/include/asm/cpu_entry_area.h | 13 ++++++++++++ arch/x86/include/asm/intel_ds.h | 36 ++++++++++++++++++++++++++++++++++ arch/x86/mm/cpu_entry_area.c | 27 +++++++++++++++++++++++++ 5 files changed, 81 insertions(+), 21 deletions(-)
--- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -8,11 +8,12 @@
#include "../perf_event.h"
+/* Waste a full page so it can be mapped into the cpu_entry_area */ +DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + /* The size of a BTS record in bytes: */ #define BTS_RECORD_SIZE 24
-#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) -#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) #define PEBS_FIXUP_SIZE PAGE_SIZE
/* --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -14,6 +14,8 @@
#include <linux/perf_event.h>
+#include <asm/intel_ds.h> + /* To enable MSR tracing please use the generic trace points. */
/* @@ -77,8 +79,6 @@ struct amd_nb { struct event_constraint event_constraints[X86_PMC_IDX_MAX]; };
-/* The maximal number of PEBS events: */ -#define MAX_PEBS_EVENTS 8 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
/* @@ -95,23 +95,6 @@ struct amd_nb { PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
-/* - * A debug store configuration. - * - * We only support architectures that use 64bit fields. - */ -struct debug_store { - u64 bts_buffer_base; - u64 bts_index; - u64 bts_absolute_maximum; - u64 bts_interrupt_threshold; - u64 pebs_buffer_base; - u64 pebs_index; - u64 pebs_absolute_maximum; - u64 pebs_interrupt_threshold; - u64 pebs_event_reset[MAX_PEBS_EVENTS]; -}; - #define PEBS_REGS \ (PERF_REG_X86_AX | \ PERF_REG_X86_BX | \ --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -5,6 +5,7 @@
#include <linux/percpu-defs.h> #include <asm/processor.h> +#include <asm/intel_ds.h>
/* * cpu_entry_area is a percpu region that contains things needed by the CPU @@ -40,6 +41,18 @@ struct cpu_entry_area { */ char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; #endif +#ifdef CONFIG_CPU_SUP_INTEL + /* + * Per CPU debug store for Intel performance monitoring. Wastes a + * full page at the moment. + */ + struct debug_store cpu_debug_store; + /* + * The actual PEBS/BTS buffers must be mapped to user space + * Reserve enough fixmap PTEs. + */ + struct debug_store_buffers cpu_debug_buffers; +#endif };
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) --- /dev/null +++ b/arch/x86/include/asm/intel_ds.h @@ -0,0 +1,36 @@ +#ifndef _ASM_INTEL_DS_H +#define _ASM_INTEL_DS_H + +#include <linux/percpu-defs.h> + +#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) +#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) + +/* The maximal number of PEBS events: */ +#define MAX_PEBS_EVENTS 8 + +/* + * A debug store configuration. + * + * We only support architectures that use 64bit fields. + */ +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[MAX_PEBS_EVENTS]; +} __aligned(PAGE_SIZE); + +DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + +struct debug_store_buffers { + char bts_buffer[BTS_BUFFER_SIZE]; + char pebs_buffer[PEBS_BUFFER_SIZE]; +}; + +#endif --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -38,6 +38,32 @@ cea_map_percpu_pages(void *cea_vaddr, vo cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); }
+static void percpu_setup_debug_store(int cpu) +{ +#ifdef CONFIG_CPU_SUP_INTEL + int npages; + void *cea; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; + + cea = &get_cpu_entry_area(cpu)->cpu_debug_store; + npages = sizeof(struct debug_store) / PAGE_SIZE; + BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0); + cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, + PAGE_KERNEL); + + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers; + /* + * Force the population of PMDs for not yet allocated per cpu + * memory like debug store buffers. + */ + npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; + for (; npages; npages--, cea += PAGE_SIZE) + cea_set_pte(cea, 0, PAGE_NONE); +#endif +} + /* Setup the fixmap mappings only once per-processor */ static void __init setup_cpu_entry_area(int cpu) { @@ -109,6 +135,7 @@ static void __init setup_cpu_entry_area( cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); #endif + percpu_setup_debug_store(cpu); }
static __init void setup_cpu_entry_area_ptes(void)
Patches currently in stable-queue which might be from tglx@linutronix.de are
queue-4.14/x86-mm-pti-allow-nx-poison-to-be-set-in-p4d-pgd.patch queue-4.14/x86-dumpstack-indicate-in-oops-whether-pti-is-configured-and-enabled.patch queue-4.14/x86-mm-clarify-the-whole-asid-kernel-pcid-user-pcid-naming.patch queue-4.14/x86-mm-abstract-switching-cr3.patch queue-4.14/x86-mm-optimize-restore_cr3.patch queue-4.14/x86-mm-pti-force-entry-through-trampoline-when-pti-active.patch queue-4.14/x86-entry-align-entry-text-section-to-pmd-boundary.patch queue-4.14/x86-mm-64-make-a-full-pgd-entry-size-hole-in-the-memory-map.patch queue-4.14/x86-cpu_entry_area-add-debugstore-entries-to-cpu_entry_area.patch queue-4.14/x86-mm-pti-share-entry-text-pmd.patch queue-4.14/x86-ldt-make-the-ldt-mapping-ro.patch queue-4.14/x86-mm-dump_pagetables-check-user-space-page-table-for-wx-pages.patch queue-4.14/x86-pti-map-the-vsyscall-page-if-needed.patch queue-4.14/x86-mm-pti-disable-global-pages-if-page_table_isolation-y.patch queue-4.14/x86-mm-dump_pagetables-allow-dumping-current-pagetables.patch queue-4.14/x86-mm-pti-add-infrastructure-for-page-table-isolation.patch queue-4.14/x86-cpufeatures-add-x86_bug_cpu_insecure.patch queue-4.14/x86-mm-use-fix-pcid-to-optimize-user-kernel-switches.patch queue-4.14/x86-mm-use-invpcid-for-__native_flush_tlb_single.patch queue-4.14/x86-mm-pti-prepare-the-x86-entry-assembly-code-for-entry-exit-cr3-switching.patch queue-4.14/x86-mm-allow-flushing-for-future-asid-switches.patch queue-4.14/x86-mm-pti-add-kconfig.patch queue-4.14/x86-mm-pti-add-mapping-helper-functions.patch queue-4.14/x86-mm-pti-map-espfix-into-user-space.patch queue-4.14/x86-mm-pti-share-cpu_entry_area-with-user-space-page-tables.patch queue-4.14/x86-mm-pti-add-functions-to-clone-kernel-pmds.patch queue-4.14/x86-pti-add-the-pti-cmdline-option-and-documentation.patch queue-4.14/x86-events-intel-ds-map-debug-buffers-in-cpu_entry_area.patch queue-4.14/x86-mm-pti-allocate-a-separate-user-pgd.patch queue-4.14/x86-pti-put-the-ldt-in-its-own-pgd-if-pti-is-on.patch queue-4.14/x86-mm-pti-populate-user-pgd.patch queue-4.14/x86-mm-dump_pagetables-add-page-table-directory-to-the-debugfs-vfs-hierarchy.patch
linux-stable-mirror@lists.linaro.org