Before we go into suspend mode, we enable the imx uart's interrupt for
the awake bit in the UART Status Register 1. If, for some reason, the
awake bit is already set before we enter suspend mode, we get an
interrupt immediately when we enable interrupts for awake. The uart's
clk_ipg is already disabled at this point. We end up in the interrupt
handler, which usually tries to clear the awake bit. This doesn't work
with the clock disabled. Therefore, we keep getting interrupts forever,
resulting in an endless loop.
Move the calls to serial_imx_enable_wakeup() into the _noirq functions,
where interrupts are disabled and clk_ipg is active. This way, we can
safely clear the awake bit and enable the imx interrupt for awake.
Now that we do the wakeup configuration in .suspend_noirq, we need
separate functions for .suspend_noirq and .freeze_noirq. However,
.resume_noirq and .restore_noirq can still be shared. We just disable
the wakeup source there, this does not conflict with hibernation.
Signed-off-by: Martin Kaiser <martin(a)kaiser.cx>
Cc: stable(a)vger.kernel.org
---
drivers/tty/serial/imx.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 250aa26..5df9172 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2221,8 +2221,10 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
unsigned int val;
val = readl(sport->port.membase + UCR3);
- if (on)
+ if (on) {
+ writel(USR1_AWAKE, sport->port.membase + USR1);
val |= UCR3_AWAKEN;
+ }
else
val &= ~UCR3_AWAKEN;
writel(val, sport->port.membase + UCR3);
@@ -2245,6 +2247,9 @@ static int imx_serial_port_suspend_noirq(struct device *dev)
if (ret)
return ret;
+ /* enable wakeup from i.MX UART */
+ serial_imx_enable_wakeup(sport, true);
+
serial_imx_save_context(sport);
clk_disable(sport->clk_ipg);
@@ -2264,6 +2269,9 @@ static int imx_serial_port_resume_noirq(struct device *dev)
serial_imx_restore_context(sport);
+ /* disable wakeup from i.MX UART */
+ serial_imx_enable_wakeup(sport, false);
+
clk_disable(sport->clk_ipg);
return 0;
@@ -2291,9 +2299,6 @@ static int imx_serial_port_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct imx_port *sport = platform_get_drvdata(pdev);
- /* enable wakeup from i.MX UART */
- serial_imx_enable_wakeup(sport, true);
-
uart_suspend_port(&imx_reg, &sport->port);
disable_irq(sport->port.irq);
@@ -2306,9 +2311,6 @@ static int imx_serial_port_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct imx_port *sport = platform_get_drvdata(pdev);
- /* disable wakeup from i.MX UART */
- serial_imx_enable_wakeup(sport, false);
-
uart_resume_port(&imx_reg, &sport->port);
enable_irq(sport->port.irq);
--
2.1.4
This is a note to let you know that I've just added the patch titled
kaiser: enhanced by kernel and user PCIDs
to the 4.4-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
kaiser-enhanced-by-kernel-and-user-pcids.patch
and it can be found in the queue-4.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Jan 3 18:58:12 CET 2018
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Wed, 30 Aug 2017 16:23:00 -0700
Subject: kaiser: enhanced by kernel and user PCIDs
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Merged performance improvements to Kaiser, using distinct kernel
and user Process Context Identifiers to minimize the TLB flushing.
Signed-off-by: Hugh Dickins <hughd(a)google.com>
Acked-by: Jiri Kosina <jkosina(a)suse.cz>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/entry/entry_64.S | 10 ++++-
arch/x86/entry/entry_64_compat.S | 1
arch/x86/include/asm/cpufeature.h | 1
arch/x86/include/asm/kaiser.h | 15 ++++++-
arch/x86/include/asm/pgtable_types.h | 26 +++++++++++++
arch/x86/include/asm/tlbflush.h | 54 +++++++++++++++++++++++-----
arch/x86/include/uapi/asm/processor-flags.h | 3 +
arch/x86/kernel/cpu/common.c | 34 +++++++++++++++++
arch/x86/kvm/x86.c | 3 +
arch/x86/mm/kaiser.c | 7 +++
arch/x86/mm/tlb.c | 46 ++++++++++++++++++++++-
11 files changed, 182 insertions(+), 18 deletions(-)
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1291,7 +1291,10 @@ ENTRY(nmi)
/* %rax is saved above, so OK to clobber here */
movq %cr3, %rax
pushq %rax
- andq $(~KAISER_SHADOW_PGD_OFFSET), %rax
+ /* mask off "user" bit of pgd address and 12 PCID bits: */
+ andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax
+ /* Add back kernel PCID and "no flush" bit */
+ orq X86_CR3_PCID_KERN_VAR, %rax
movq %rax, %cr3
#endif
call do_nmi
@@ -1532,7 +1535,10 @@ end_repeat_nmi:
/* %rax is saved above, so OK to clobber here */
movq %cr3, %rax
pushq %rax
- andq $(~KAISER_SHADOW_PGD_OFFSET), %rax
+ /* mask off "user" bit of pgd address and 12 PCID bits: */
+ andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), %rax
+ /* Add back kernel PCID and "no flush" bit */
+ orq X86_CR3_PCID_KERN_VAR, %rax
movq %rax, %cr3
#endif
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -13,6 +13,7 @@
#include <asm/irqflags.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/pgtable_types.h>
#include <asm/kaiser.h>
#include <linux/linkage.h>
#include <linux/err.h>
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -187,6 +187,7 @@
#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
--- a/arch/x86/include/asm/kaiser.h
+++ b/arch/x86/include/asm/kaiser.h
@@ -1,5 +1,8 @@
#ifndef _ASM_X86_KAISER_H
#define _ASM_X86_KAISER_H
+
+#include <uapi/asm/processor-flags.h> /* For PCID constants */
+
/*
* This file includes the definitions for the KAISER feature.
* KAISER is a counter measure against x86_64 side channel attacks on
@@ -21,13 +24,21 @@
.macro _SWITCH_TO_KERNEL_CR3 reg
movq %cr3, \reg
-andq $(~KAISER_SHADOW_PGD_OFFSET), \reg
+andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), \reg
+orq X86_CR3_PCID_KERN_VAR, \reg
movq \reg, %cr3
.endm
.macro _SWITCH_TO_USER_CR3 reg
movq %cr3, \reg
-orq $(KAISER_SHADOW_PGD_OFFSET), \reg
+andq $(~(X86_CR3_PCID_ASID_MASK | KAISER_SHADOW_PGD_OFFSET)), \reg
+/*
+ * This can obviously be one instruction by putting the
+ * KAISER_SHADOW_PGD_OFFSET bit in the X86_CR3_PCID_USER_VAR.
+ * But, just leave it now for simplicity.
+ */
+orq X86_CR3_PCID_USER_VAR, \reg
+orq $(KAISER_SHADOW_PGD_OFFSET), \reg
movq \reg, %cr3
.endm
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -106,6 +106,32 @@
_PAGE_SOFT_DIRTY)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
+/* The ASID is the lower 12 bits of CR3 */
+#define X86_CR3_PCID_ASID_MASK (_AC((1<<12)-1,UL))
+
+/* Mask for all the PCID-related bits in CR3: */
+#define X86_CR3_PCID_MASK (X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_MASK)
+#if defined(CONFIG_KAISER) && defined(CONFIG_X86_64)
+#define X86_CR3_PCID_ASID_KERN (_AC(0x4,UL))
+#define X86_CR3_PCID_ASID_USER (_AC(0x6,UL))
+
+#define X86_CR3_PCID_KERN_FLUSH (X86_CR3_PCID_ASID_KERN)
+#define X86_CR3_PCID_USER_FLUSH (X86_CR3_PCID_ASID_USER)
+#define X86_CR3_PCID_KERN_NOFLUSH (X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_KERN)
+#define X86_CR3_PCID_USER_NOFLUSH (X86_CR3_PCID_NOFLUSH | X86_CR3_PCID_ASID_USER)
+#else
+#define X86_CR3_PCID_ASID_KERN (_AC(0x0,UL))
+#define X86_CR3_PCID_ASID_USER (_AC(0x0,UL))
+/*
+ * PCIDs are unsupported on 32-bit and none of these bits can be
+ * set in CR3:
+ */
+#define X86_CR3_PCID_KERN_FLUSH (0)
+#define X86_CR3_PCID_USER_FLUSH (0)
+#define X86_CR3_PCID_KERN_NOFLUSH (0)
+#define X86_CR3_PCID_USER_NOFLUSH (0)
+#endif
+
/*
* The cache modes defined here are used to translate between pure SW usage
* and the HW defined cache mode bits and/or PAT entries.
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -12,7 +12,6 @@ static inline void __invpcid(unsigned lo
unsigned long type)
{
struct { u64 d[2]; } desc = { { pcid, addr } };
-
/*
* The memory clobber is because the whole point is to invalidate
* stale TLB entries and, especially if we're flushing global
@@ -133,14 +132,25 @@ static inline void cr4_set_bits_and_upda
static inline void __native_flush_tlb(void)
{
+ if (!cpu_feature_enabled(X86_FEATURE_INVPCID)) {
+ /*
+ * If current->mm == NULL then we borrow a mm which may change during a
+ * task switch and therefore we must not be preempted while we write CR3
+ * back:
+ */
+ preempt_disable();
+ native_write_cr3(native_read_cr3());
+ preempt_enable();
+ return;
+ }
/*
- * If current->mm == NULL then we borrow a mm which may change during a
- * task switch and therefore we must not be preempted while we write CR3
- * back:
- */
- preempt_disable();
- native_write_cr3(native_read_cr3());
- preempt_enable();
+ * We are no longer using globals with KAISER, so a
+ * "nonglobals" flush would work too. But, this is more
+ * conservative.
+ *
+ * Note, this works with CR4.PCIDE=0 or 1.
+ */
+ invpcid_flush_all();
}
static inline void __native_flush_tlb_global_irq_disabled(void)
@@ -162,6 +172,8 @@ static inline void __native_flush_tlb_gl
/*
* Using INVPCID is considerably faster than a pair of writes
* to CR4 sandwiched inside an IRQ flag save/restore.
+ *
+ * Note, this works with CR4.PCIDE=0 or 1.
*/
invpcid_flush_all();
return;
@@ -181,7 +193,31 @@ static inline void __native_flush_tlb_gl
static inline void __native_flush_tlb_single(unsigned long addr)
{
- asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+ /*
+ * SIMICS #GP's if you run INVPCID with type 2/3
+ * and X86_CR4_PCIDE clear. Shame!
+ *
+ * The ASIDs used below are hard-coded. But, we must not
+ * call invpcid(type=1/2) before CR4.PCIDE=1. Just call
+ * invpcid in the case we are called early.
+ */
+ if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) {
+ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
+ return;
+ }
+ /* Flush the address out of both PCIDs. */
+ /*
+ * An optimization here might be to determine addresses
+ * that are only kernel-mapped and only flush the kernel
+ * ASID. But, userspace flushes are probably much more
+ * important performance-wise.
+ *
+ * Make sure to do only a single invpcid when KAISER is
+ * disabled and we have only a single ASID.
+ */
+ if (X86_CR3_PCID_ASID_KERN != X86_CR3_PCID_ASID_USER)
+ invpcid_flush_one(X86_CR3_PCID_ASID_KERN, addr);
+ invpcid_flush_one(X86_CR3_PCID_ASID_USER, addr);
}
static inline void __flush_tlb_all(void)
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -77,7 +77,8 @@
#define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT)
#define X86_CR3_PCD_BIT 4 /* Page Cache Disable */
#define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT)
-#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */
+#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */
+#define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT)
/*
* Intel CPU features in CR4
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -321,11 +321,45 @@ static __always_inline void setup_smap(s
}
}
+/*
+ * These can have bit 63 set, so we can not just use a plain "or"
+ * instruction to get their value or'd into CR3. It would take
+ * another register. So, we use a memory reference to these
+ * instead.
+ *
+ * This is also handy because systems that do not support
+ * PCIDs just end up or'ing a 0 into their CR3, which does
+ * no harm.
+ */
+__aligned(PAGE_SIZE) unsigned long X86_CR3_PCID_KERN_VAR = 0;
+__aligned(PAGE_SIZE) unsigned long X86_CR3_PCID_USER_VAR = 0;
+
static void setup_pcid(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_PCID)) {
if (cpu_has(c, X86_FEATURE_PGE)) {
cr4_set_bits(X86_CR4_PCIDE);
+ /*
+ * These variables are used by the entry/exit
+ * code to change PCIDs.
+ */
+#ifdef CONFIG_KAISER
+ X86_CR3_PCID_KERN_VAR = X86_CR3_PCID_KERN_NOFLUSH;
+ X86_CR3_PCID_USER_VAR = X86_CR3_PCID_USER_NOFLUSH;
+#endif
+ /*
+ * INVPCID has two "groups" of types:
+ * 1/2: Invalidate an individual address
+ * 3/4: Invalidate all contexts
+ *
+ * 1/2 take a PCID, but 3/4 do not. So, 3/4
+ * ignore the PCID argument in the descriptor.
+ * But, we have to be careful not to call 1/2
+ * with an actual non-zero PCID in them before
+ * we do the above cr4_set_bits().
+ */
+ if (cpu_has(c, X86_FEATURE_INVPCID))
+ set_cpu_cap(c, X86_FEATURE_INVPCID_SINGLE);
} else {
/*
* flush_tlb_all(), as currently implemented, won't
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -759,7 +759,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, u
return 1;
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
- if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
+ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_ASID_MASK) ||
+ !is_long_mode(vcpu))
return 1;
}
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -240,6 +240,8 @@ static void __init kaiser_init_all_pgds(
} while (0)
extern char __per_cpu_user_mapped_start[], __per_cpu_user_mapped_end[];
+extern unsigned long X86_CR3_PCID_KERN_VAR;
+extern unsigned long X86_CR3_PCID_USER_VAR;
/*
* If anything in here fails, we will likely die on one of the
* first kernel->user transitions and init will die. But, we
@@ -290,6 +292,11 @@ void __init kaiser_init(void)
kaiser_add_user_map_early(&debug_idt_table,
sizeof(gate_desc) * NR_VECTORS,
__PAGE_KERNEL);
+
+ kaiser_add_user_map_early(&X86_CR3_PCID_KERN_VAR, PAGE_SIZE,
+ __PAGE_KERNEL);
+ kaiser_add_user_map_early(&X86_CR3_PCID_USER_VAR, PAGE_SIZE,
+ __PAGE_KERNEL);
}
/* Add a mapping to the shadow mapping, and synchronize the mappings */
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -34,6 +34,46 @@ struct flush_tlb_info {
unsigned long flush_end;
};
+static void load_new_mm_cr3(pgd_t *pgdir)
+{
+ unsigned long new_mm_cr3 = __pa(pgdir);
+
+ /*
+ * KAISER, plus PCIDs needs some extra work here. But,
+ * if either of features is not present, we need no
+ * PCIDs here and just do a normal, full TLB flush with
+ * the write_cr3()
+ */
+ if (!IS_ENABLED(CONFIG_KAISER) ||
+ !cpu_feature_enabled(X86_FEATURE_PCID))
+ goto out_set_cr3;
+ /*
+ * We reuse the same PCID for different tasks, so we must
+ * flush all the entires for the PCID out when we change
+ * tasks.
+ */
+ new_mm_cr3 = X86_CR3_PCID_KERN_FLUSH | __pa(pgdir);
+
+ /*
+ * The flush from load_cr3() may leave old TLB entries
+ * for userspace in place. We must flush that context
+ * separately. We can theoretically delay doing this
+ * until we actually load up the userspace CR3, but
+ * that's a bit tricky. We have to have the "need to
+ * flush userspace PCID" bit per-cpu and check it in the
+ * exit-to-userspace paths.
+ */
+ invpcid_flush_single_context(X86_CR3_PCID_ASID_USER);
+
+out_set_cr3:
+ /*
+ * Caution: many callers of this function expect
+ * that load_cr3() is serializing and orders TLB
+ * fills with respect to the mm_cpumask writes.
+ */
+ write_cr3(new_mm_cr3);
+}
+
/*
* We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask.
@@ -45,7 +85,7 @@ void leave_mm(int cpu)
BUG();
if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
- load_cr3(swapper_pg_dir);
+ load_new_mm_cr3(swapper_pg_dir);
/*
* This gets called in the idle path where RCU
* functions differently. Tracing normally
@@ -105,7 +145,7 @@ void switch_mm_irqs_off(struct mm_struct
* ordering guarantee we need.
*
*/
- load_cr3(next->pgd);
+ load_new_mm_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
@@ -152,7 +192,7 @@ void switch_mm_irqs_off(struct mm_struct
* As above, load_cr3() is serializing and orders TLB
* fills with respect to the mm_cpumask write.
*/
- load_cr3(next->pgd);
+ load_new_mm_cr3(next->pgd);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
load_mm_cr4(next);
load_mm_ldt(next);
Patches currently in stable-queue which might be from dave.hansen(a)linux.intel.com are
queue-4.4/x86-paravirt-dont-patch-flush_tlb_single.patch
queue-4.4/kaiser-merged-update.patch
queue-4.4/kaiser-enhanced-by-kernel-and-user-pcids.patch
This is a note to let you know that I've just added the patch titled
kaiser: merged update
to the 4.9-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
kaiser-merged-update.patch
and it can be found in the queue-4.9 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Jan 3 20:37:21 CET 2018
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Wed, 30 Aug 2017 16:23:00 -0700
Subject: kaiser: merged update
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Merged fixes and cleanups, rebased to 4.9.51 tree (no 5-level paging).
Signed-off-by: Hugh Dickins <hughd(a)google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/entry/entry_64.S | 105 ++++++++++-
arch/x86/include/asm/kaiser.h | 43 ++--
arch/x86/include/asm/pgtable.h | 18 +
arch/x86/include/asm/pgtable_64.h | 48 ++++-
arch/x86/include/asm/pgtable_types.h | 6
arch/x86/kernel/espfix_64.c | 13 -
arch/x86/kernel/head_64.S | 19 +-
arch/x86/kernel/ldt.c | 27 ++
arch/x86/kernel/tracepoint.c | 2
arch/x86/mm/kaiser.c | 317 +++++++++++++++++++++++++----------
arch/x86/mm/pageattr.c | 63 +++++-
arch/x86/mm/pgtable.c | 40 +---
include/linux/kaiser.h | 26 ++
kernel/fork.c | 9
security/Kconfig | 5
15 files changed, 551 insertions(+), 190 deletions(-)
create mode 100644 include/linux/kaiser.h
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -230,6 +230,13 @@ entry_SYSCALL_64_fastpath:
movq RIP(%rsp), %rcx
movq EFLAGS(%rsp), %r11
RESTORE_C_REGS_EXCEPT_RCX_R11
+ /*
+ * This opens a window where we have a user CR3, but are
+ * running in the kernel. This makes using the CS
+ * register useless for telling whether or not we need to
+ * switch CR3 in NMIs. Normal interrupts are OK because
+ * they are off here.
+ */
SWITCH_USER_CR3
movq RSP(%rsp), %rsp
USERGS_SYSRET64
@@ -326,11 +333,25 @@ return_from_SYSCALL_64:
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
RESTORE_C_REGS_EXCEPT_RCX_R11
+ /*
+ * This opens a window where we have a user CR3, but are
+ * running in the kernel. This makes using the CS
+ * register useless for telling whether or not we need to
+ * switch CR3 in NMIs. Normal interrupts are OK because
+ * they are off here.
+ */
SWITCH_USER_CR3
movq RSP(%rsp), %rsp
USERGS_SYSRET64
opportunistic_sysret_failed:
+ /*
+ * This opens a window where we have a user CR3, but are
+ * running in the kernel. This makes using the CS
+ * register useless for telling whether or not we need to
+ * switch CR3 in NMIs. Normal interrupts are OK because
+ * they are off here.
+ */
SWITCH_USER_CR3
SWAPGS
jmp restore_c_regs_and_iret
@@ -1087,6 +1108,13 @@ ENTRY(error_entry)
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
+ /*
+ * error_entry() always returns with a kernel gsbase and
+ * CR3. We must also have a kernel CR3/gsbase before
+ * calling TRACE_IRQS_*. Just unconditionally switch to
+ * the kernel CR3 here.
+ */
+ SWITCH_KERNEL_CR3
xorl %ebx, %ebx
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace
@@ -1096,7 +1124,6 @@ ENTRY(error_entry)
* from user mode due to an IRET fault.
*/
SWAPGS
- SWITCH_KERNEL_CR3
.Lerror_entry_from_usermode_after_swapgs:
/*
@@ -1148,7 +1175,6 @@ ENTRY(error_entry)
* Switch to kernel gsbase:
*/
SWAPGS
- SWITCH_KERNEL_CR3
/*
* Pretend that the exception came from user mode: set up pt_regs
@@ -1249,7 +1275,10 @@ ENTRY(nmi)
*/
SWAPGS_UNSAFE_STACK
- SWITCH_KERNEL_CR3_NO_STACK
+ /*
+ * percpu variables are mapped with user CR3, so no need
+ * to switch CR3 here.
+ */
cld
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
@@ -1283,14 +1312,33 @@ ENTRY(nmi)
movq %rsp, %rdi
movq $-1, %rsi
+#ifdef CONFIG_KAISER
+ /* Unconditionally use kernel CR3 for do_nmi() */
+ /* %rax is saved above, so OK to clobber here */
+ movq %cr3, %rax
+ pushq %rax
+#ifdef CONFIG_KAISER_REAL_SWITCH
+ andq $(~0x1000), %rax
+#endif
+ movq %rax, %cr3
+#endif
call do_nmi
+ /*
+ * Unconditionally restore CR3. I know we return to
+ * kernel code that needs user CR3, but do we ever return
+ * to "user mode" where we need the kernel CR3?
+ */
+#ifdef CONFIG_KAISER
+ popq %rax
+ mov %rax, %cr3
+#endif
/*
* Return back to user mode. We must *not* do the normal exit
- * work, because we don't want to enable interrupts. Fortunately,
- * do_nmi doesn't modify pt_regs.
+ * work, because we don't want to enable interrupts. Do not
+ * switch to user CR3: we might be going back to kernel code
+ * that had a user CR3 set.
*/
- SWITCH_USER_CR3
SWAPGS
jmp restore_c_regs_and_iret
@@ -1486,23 +1534,54 @@ end_repeat_nmi:
ALLOC_PT_GPREGS_ON_STACK
/*
- * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
- * as we should not be calling schedule in NMI context.
- * Even with normal interrupts enabled. An NMI should not be
- * setting NEED_RESCHED or anything that normal interrupts and
- * exceptions might do.
+ * Use the same approach as paranoid_entry to handle SWAPGS, but
+ * without CR3 handling since we do that differently in NMIs. No
+ * need to use paranoid_exit as we should not be calling schedule
+ * in NMI context. Even with normal interrupts enabled. An NMI
+ * should not be setting NEED_RESCHED or anything that normal
+ * interrupts and exceptions might do.
*/
- call paranoid_entry
+ cld
+ SAVE_C_REGS
+ SAVE_EXTRA_REGS
+ movl $1, %ebx
+ movl $MSR_GS_BASE, %ecx
+ rdmsr
+ testl %edx, %edx
+ js 1f /* negative -> in kernel */
+ SWAPGS
+ xorl %ebx, %ebx
+1:
+#ifdef CONFIG_KAISER
+ /* Unconditionally use kernel CR3 for do_nmi() */
+ /* %rax is saved above, so OK to clobber here */
+ movq %cr3, %rax
+ pushq %rax
+#ifdef CONFIG_KAISER_REAL_SWITCH
+ andq $(~0x1000), %rax
+#endif
+ movq %rax, %cr3
+#endif
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi
+ addq $8, %rdi /* point %rdi at ptregs, fixed up for CR3 */
movq $-1, %rsi
call do_nmi
+ /*
+ * Unconditionally restore CR3. We might be returning to
+ * kernel code that needs user CR3, like just just before
+ * a sysret.
+ */
+#ifdef CONFIG_KAISER
+ popq %rax
+ mov %rax, %cr3
+#endif
testl %ebx, %ebx /* swapgs needed? */
jnz nmi_restore
nmi_swapgs:
- SWITCH_USER_CR3_NO_STACK
+ /* We fixed up CR3 above, so no need to switch it here */
SWAPGS_UNSAFE_STACK
nmi_restore:
RESTORE_EXTRA_REGS
--- a/arch/x86/include/asm/kaiser.h
+++ b/arch/x86/include/asm/kaiser.h
@@ -16,13 +16,17 @@
.macro _SWITCH_TO_KERNEL_CR3 reg
movq %cr3, \reg
+#ifdef CONFIG_KAISER_REAL_SWITCH
andq $(~0x1000), \reg
+#endif
movq \reg, %cr3
.endm
.macro _SWITCH_TO_USER_CR3 reg
movq %cr3, \reg
+#ifdef CONFIG_KAISER_REAL_SWITCH
orq $(0x1000), \reg
+#endif
movq \reg, %cr3
.endm
@@ -65,48 +69,53 @@ movq PER_CPU_VAR(unsafe_stack_register_b
.endm
#endif /* CONFIG_KAISER */
+
#else /* __ASSEMBLY__ */
#ifdef CONFIG_KAISER
-// Upon kernel/user mode switch, it may happen that
-// the address space has to be switched before the registers have been stored.
-// To change the address space, another register is needed.
-// A register therefore has to be stored/restored.
-//
-DECLARE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
+/*
+ * Upon kernel/user mode switch, it may happen that the address
+ * space has to be switched before the registers have been
+ * stored. To change the address space, another register is
+ * needed. A register therefore has to be stored/restored.
+*/
-#endif /* CONFIG_KAISER */
+DECLARE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
/**
- * shadowmem_add_mapping - map a virtual memory part to the shadow mapping
+ * kaiser_add_mapping - map a virtual memory part to the shadow (user) mapping
* @addr: the start address of the range
* @size: the size of the range
* @flags: The mapping flags of the pages
*
- * the mapping is done on a global scope, so no bigger synchronization has to be done.
- * the pages have to be manually unmapped again when they are not needed any longer.
+ * The mapping is done on a global scope, so no bigger
+ * synchronization has to be done. the pages have to be
+ * manually unmapped again when they are not needed any longer.
*/
-extern void kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags);
+extern int kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags);
/**
- * shadowmem_remove_mapping - unmap a virtual memory part of the shadow mapping
+ * kaiser_remove_mapping - unmap a virtual memory part of the shadow mapping
* @addr: the start address of the range
* @size: the size of the range
*/
extern void kaiser_remove_mapping(unsigned long start, unsigned long size);
/**
- * shadowmem_initialize_mapping - Initalize the shadow mapping
+ * kaiser_initialize_mapping - Initalize the shadow mapping
*
- * most parts of the shadow mapping can be mapped upon boot time.
- * only the thread stacks have to be mapped on runtime.
- * the mapped regions are not unmapped at all.
+ * Most parts of the shadow mapping can be mapped upon boot
+ * time. Only per-process things like the thread stacks
+ * or a new LDT have to be mapped at runtime. These boot-
+ * time mappings are permanent and nevertunmapped.
*/
extern void kaiser_init(void);
-#endif
+#endif /* CONFIG_KAISER */
+
+#endif /* __ASSEMBLY */
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -690,7 +690,17 @@ static inline pud_t *pud_offset(pgd_t *p
static inline int pgd_bad(pgd_t pgd)
{
- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+ pgdval_t ignore_flags = _PAGE_USER;
+ /*
+ * We set NX on KAISER pgds that map userspace memory so
+ * that userspace can not meaningfully use the kernel
+ * page table by accident; it will fault on the first
+ * instruction it tries to run. See native_set_pgd().
+ */
+ if (IS_ENABLED(CONFIG_KAISER))
+ ignore_flags |= _PAGE_NX;
+
+ return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
}
static inline int pgd_none(pgd_t pgd)
@@ -905,8 +915,10 @@ static inline void clone_pgd_range(pgd_t
{
memcpy(dst, src, count * sizeof(pgd_t));
#ifdef CONFIG_KAISER
- // clone the shadow pgd part as well
- memcpy(native_get_shadow_pgd(dst), native_get_shadow_pgd(src), count * sizeof(pgd_t));
+ /* Clone the shadow pgd part as well */
+ memcpy(native_get_shadow_pgd(dst),
+ native_get_shadow_pgd(src),
+ count * sizeof(pgd_t));
#endif
}
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -107,26 +107,58 @@ static inline void native_pud_clear(pud_
}
#ifdef CONFIG_KAISER
-static inline pgd_t * native_get_shadow_pgd(pgd_t *pgdp) {
+static inline pgd_t * native_get_shadow_pgd(pgd_t *pgdp)
+{
return (pgd_t *)(void*)((unsigned long)(void*)pgdp | (unsigned long)PAGE_SIZE);
}
-static inline pgd_t * native_get_normal_pgd(pgd_t *pgdp) {
+static inline pgd_t * native_get_normal_pgd(pgd_t *pgdp)
+{
return (pgd_t *)(void*)((unsigned long)(void*)pgdp & ~(unsigned long)PAGE_SIZE);
}
+#else
+static inline pgd_t * native_get_shadow_pgd(pgd_t *pgdp)
+{
+ BUILD_BUG_ON(1);
+ return NULL;
+}
+static inline pgd_t * native_get_normal_pgd(pgd_t *pgdp)
+{
+ return pgdp;
+}
#endif /* CONFIG_KAISER */
+/*
+ * Page table pages are page-aligned. The lower half of the top
+ * level is used for userspace and the top half for the kernel.
+ * This returns true for user pages that need to get copied into
+ * both the user and kernel copies of the page tables, and false
+ * for kernel pages that should only be in the kernel copy.
+ */
+static inline bool is_userspace_pgd(void *__ptr)
+{
+ unsigned long ptr = (unsigned long)__ptr;
+
+ return ((ptr % PAGE_SIZE) < (PAGE_SIZE / 2));
+}
+
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
#ifdef CONFIG_KAISER
- // We know that a pgd is page aligned.
- // Therefore the lower indices have to be mapped to user space.
- // These pages are mapped to the shadow mapping.
- if ((((unsigned long)pgdp) % PAGE_SIZE) < (PAGE_SIZE / 2)) {
+ pteval_t extra_kern_pgd_flags = 0;
+ /* Do we need to also populate the shadow pgd? */
+ if (is_userspace_pgd(pgdp)) {
native_get_shadow_pgd(pgdp)->pgd = pgd.pgd;
+ /*
+ * Even if the entry is *mapping* userspace, ensure
+ * that userspace can not use it. This way, if we
+ * get out to userspace running on the kernel CR3,
+ * userspace will crash instead of running.
+ */
+ extra_kern_pgd_flags = _PAGE_NX;
}
-
- pgdp->pgd = pgd.pgd & ~_PAGE_USER;
+ pgdp->pgd = pgd.pgd;
+ pgdp->pgd |= extra_kern_pgd_flags;
#else /* CONFIG_KAISER */
*pgdp = pgd;
#endif
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -48,7 +48,7 @@
#ifdef CONFIG_KAISER
#define _PAGE_GLOBAL (_AT(pteval_t, 0))
#else
-#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#endif
#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
@@ -123,11 +123,7 @@
#define _PAGE_DEVMAP (_AT(pteval_t, 0))
#endif
-#ifdef CONFIG_KAISER
-#define _PAGE_PROTNONE (_AT(pteval_t, 0))
-#else
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
-#endif
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
_PAGE_ACCESSED | _PAGE_DIRTY)
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -127,11 +127,14 @@ void __init init_espfix_bsp(void)
/* Install the espfix pud into the kernel page directory */
pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
-#ifdef CONFIG_KAISER
- // add the esp stack pud to the shadow mapping here.
- // This can be done directly, because the fixup stack has its own pud
- set_pgd(native_get_shadow_pgd(pgd_p), __pgd(_PAGE_TABLE | __pa((pud_t *)espfix_pud_page)));
-#endif
+ /*
+ * Just copy the top-level PGD that is mapping the espfix
+ * area to ensure it is mapped into the shadow user page
+ * tables.
+ */
+ if (IS_ENABLED(CONFIG_KAISER))
+ set_pgd(native_get_shadow_pgd(pgd_p),
+ __pgd(_KERNPG_TABLE | __pa((pud_t *)espfix_pud_page)));
/* Randomize the locations */
init_espfix_random();
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -406,11 +406,24 @@ GLOBAL(early_recursion_flag)
GLOBAL(name)
#ifdef CONFIG_KAISER
+/*
+ * Each PGD needs to be 8k long and 8k aligned. We do not
+ * ever go out to userspace with these, so we do not
+ * strictly *need* the second page, but this allows us to
+ * have a single set_pgd() implementation that does not
+ * need to worry about whether it has 4k or 8k to work
+ * with.
+ *
+ * This ensures PGDs are 8k long:
+ */
+#define KAISER_USER_PGD_FILL 512
+/* This ensures they are 8k-aligned: */
#define NEXT_PGD_PAGE(name) \
.balign 2 * PAGE_SIZE; \
GLOBAL(name)
#else
#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
+#define KAISER_USER_PGD_FILL 0
#endif
/* Automate the creation of 1 to 1 mapping pmd entries */
@@ -425,6 +438,7 @@ GLOBAL(name)
NEXT_PGD_PAGE(early_level4_pgt)
.fill 511,8,0
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+ .fill KAISER_USER_PGD_FILL,8,0
NEXT_PAGE(early_dynamic_pgts)
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -433,7 +447,8 @@ NEXT_PAGE(early_dynamic_pgts)
#ifndef CONFIG_XEN
NEXT_PGD_PAGE(init_level4_pgt)
- .fill 2*512,8,0
+ .fill 512,8,0
+ .fill KAISER_USER_PGD_FILL,8,0
#else
NEXT_PGD_PAGE(init_level4_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
@@ -442,6 +457,7 @@ NEXT_PGD_PAGE(init_level4_pgt)
.org init_level4_pgt + L4_START_KERNEL*8, 0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+ .fill KAISER_USER_PGD_FILL,8,0
NEXT_PAGE(level3_ident_pgt)
.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
@@ -452,6 +468,7 @@ NEXT_PAGE(level2_ident_pgt)
*/
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
#endif
+ .fill KAISER_USER_PGD_FILL,8,0
NEXT_PAGE(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -18,6 +18,7 @@
#include <linux/uaccess.h>
#include <asm/ldt.h>
+#include <asm/kaiser.h>
#include <asm/desc.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
@@ -34,11 +35,21 @@ static void flush_ldt(void *current_mm)
set_ldt(pc->ldt->entries, pc->ldt->size);
}
+static void __free_ldt_struct(struct ldt_struct *ldt)
+{
+ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(ldt->entries);
+ else
+ free_page((unsigned long)ldt->entries);
+ kfree(ldt);
+}
+
/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
static struct ldt_struct *alloc_ldt_struct(int size)
{
struct ldt_struct *new_ldt;
int alloc_size;
+ int ret = 0;
if (size > LDT_ENTRIES)
return NULL;
@@ -66,6 +77,14 @@ static struct ldt_struct *alloc_ldt_stru
return NULL;
}
+ // FIXME: make kaiser_add_mapping() return an error code
+ // when it fails
+ kaiser_add_mapping((unsigned long)new_ldt->entries, alloc_size,
+ __PAGE_KERNEL);
+ if (ret) {
+ __free_ldt_struct(new_ldt);
+ return NULL;
+ }
new_ldt->size = size;
return new_ldt;
}
@@ -92,12 +111,10 @@ static void free_ldt_struct(struct ldt_s
if (likely(!ldt))
return;
+ kaiser_remove_mapping((unsigned long)ldt->entries,
+ ldt->size * LDT_ENTRY_SIZE);
paravirt_free_ldt(ldt->entries, ldt->size);
- if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(ldt->entries);
- else
- free_page((unsigned long)ldt->entries);
- kfree(ldt);
+ __free_ldt_struct(ldt);
}
/*
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -9,10 +9,12 @@
#include <linux/atomic.h>
atomic_t trace_idt_ctr = ATOMIC_INIT(0);
+__aligned(PAGE_SIZE)
struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
(unsigned long) trace_idt_table };
/* No need to be aligned, but done to keep all IDTs defined the same way. */
+__aligned(PAGE_SIZE)
gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
static int trace_irq_vector_refcount;
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -1,160 +1,305 @@
-
-
+#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
-
#include <linux/uaccess.h>
+
+#include <asm/kaiser.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/desc.h>
#ifdef CONFIG_KAISER
__visible DEFINE_PER_CPU_USER_MAPPED(unsigned long, unsafe_stack_register_backup);
+/*
+ * At runtime, the only things we map are some things for CPU
+ * hotplug, and stacks for new processes. No two CPUs will ever
+ * be populating the same addresses, so we only need to ensure
+ * that we protect between two CPUs trying to allocate and
+ * populate the same page table page.
+ *
+ * Only take this lock when doing a set_p[4um]d(), but it is not
+ * needed for doing a set_pte(). We assume that only the *owner*
+ * of a given allocation will be doing this for _their_
+ * allocation.
+ *
+ * This ensures that once a system has been running for a while
+ * and there have been stacks all over and these page tables
+ * are fully populated, there will be no further acquisitions of
+ * this lock.
+ */
+static DEFINE_SPINLOCK(shadow_table_allocation_lock);
-/**
- * Get the real ppn from a address in kernel mapping.
- * @param address The virtual adrress
- * @return the physical address
+/*
+ * Returns -1 on error.
*/
-static inline unsigned long get_pa_from_mapping (unsigned long address)
+static inline unsigned long get_pa_from_mapping(unsigned long vaddr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- pgd = pgd_offset_k(address);
- BUG_ON(pgd_none(*pgd) || pgd_large(*pgd));
+ pgd = pgd_offset_k(vaddr);
+ /*
+ * We made all the kernel PGDs present in kaiser_init().
+ * We expect them to stay that way.
+ */
+ BUG_ON(pgd_none(*pgd));
+ /*
+ * PGDs are either 512GB or 128TB on all x86_64
+ * configurations. We don't handle these.
+ */
+ BUG_ON(pgd_large(*pgd));
+
+ pud = pud_offset(pgd, vaddr);
+ if (pud_none(*pud)) {
+ WARN_ON_ONCE(1);
+ return -1;
+ }
- pud = pud_offset(pgd, address);
- BUG_ON(pud_none(*pud));
+ if (pud_large(*pud))
+ return (pud_pfn(*pud) << PAGE_SHIFT) | (vaddr & ~PUD_PAGE_MASK);
- if (pud_large(*pud)) {
- return (pud_pfn(*pud) << PAGE_SHIFT) | (address & ~PUD_PAGE_MASK);
+ pmd = pmd_offset(pud, vaddr);
+ if (pmd_none(*pmd)) {
+ WARN_ON_ONCE(1);
+ return -1;
}
- pmd = pmd_offset(pud, address);
- BUG_ON(pmd_none(*pmd));
+ if (pmd_large(*pmd))
+ return (pmd_pfn(*pmd) << PAGE_SHIFT) | (vaddr & ~PMD_PAGE_MASK);
- if (pmd_large(*pmd)) {
- return (pmd_pfn(*pmd) << PAGE_SHIFT) | (address & ~PMD_PAGE_MASK);
+ pte = pte_offset_kernel(pmd, vaddr);
+ if (pte_none(*pte)) {
+ WARN_ON_ONCE(1);
+ return -1;
}
- pte = pte_offset_kernel(pmd, address);
- BUG_ON(pte_none(*pte));
-
- return (pte_pfn(*pte) << PAGE_SHIFT) | (address & ~PAGE_MASK);
+ return (pte_pfn(*pte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
}
-void _kaiser_copy (unsigned long start_addr, unsigned long size,
- unsigned long flags)
+/*
+ * This is a relatively normal page table walk, except that it
+ * also tries to allocate page tables pages along the way.
+ *
+ * Returns a pointer to a PTE on success, or NULL on failure.
+ */
+static pte_t *kaiser_pagetable_walk(unsigned long address, bool is_atomic)
{
- pgd_t *pgd;
- pud_t *pud;
pmd_t *pmd;
- pte_t *pte;
- unsigned long address;
- unsigned long end_addr = start_addr + size;
- unsigned long target_address;
+ pud_t *pud;
+ pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(address));
+ gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- for (address = PAGE_ALIGN(start_addr - (PAGE_SIZE - 1));
- address < PAGE_ALIGN(end_addr); address += PAGE_SIZE) {
- target_address = get_pa_from_mapping(address);
+ might_sleep();
+ if (is_atomic) {
+ gfp &= ~GFP_KERNEL;
+ gfp |= __GFP_HIGH | __GFP_ATOMIC;
+ }
- pgd = native_get_shadow_pgd(pgd_offset_k(address));
+ if (pgd_none(*pgd)) {
+ WARN_ONCE(1, "All shadow pgds should have been populated");
+ return NULL;
+ }
+ BUILD_BUG_ON(pgd_large(*pgd) != 0);
- BUG_ON(pgd_none(*pgd) && "All shadow pgds should be mapped at this time\n");
- BUG_ON(pgd_large(*pgd));
+ pud = pud_offset(pgd, address);
+ /* The shadow page tables do not use large mappings: */
+ if (pud_large(*pud)) {
+ WARN_ON(1);
+ return NULL;
+ }
+ if (pud_none(*pud)) {
+ unsigned long new_pmd_page = __get_free_page(gfp);
+ if (!new_pmd_page)
+ return NULL;
+ spin_lock(&shadow_table_allocation_lock);
+ if (pud_none(*pud))
+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
+ else
+ free_page(new_pmd_page);
+ spin_unlock(&shadow_table_allocation_lock);
+ }
- pud = pud_offset(pgd, address);
- if (pud_none(*pud)) {
- set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd_alloc_one(0, address))));
- }
- BUG_ON(pud_large(*pud));
+ pmd = pmd_offset(pud, address);
+ /* The shadow page tables do not use large mappings: */
+ if (pmd_large(*pmd)) {
+ WARN_ON(1);
+ return NULL;
+ }
+ if (pmd_none(*pmd)) {
+ unsigned long new_pte_page = __get_free_page(gfp);
+ if (!new_pte_page)
+ return NULL;
+ spin_lock(&shadow_table_allocation_lock);
+ if (pmd_none(*pmd))
+ set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
+ else
+ free_page(new_pte_page);
+ spin_unlock(&shadow_table_allocation_lock);
+ }
- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd)) {
- set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte_alloc_one_kernel(0, address))));
- }
- BUG_ON(pmd_large(*pmd));
+ return pte_offset_kernel(pmd, address);
+}
- pte = pte_offset_kernel(pmd, address);
+int kaiser_add_user_map(const void *__start_addr, unsigned long size,
+ unsigned long flags)
+{
+ int ret = 0;
+ pte_t *pte;
+ unsigned long start_addr = (unsigned long )__start_addr;
+ unsigned long address = start_addr & PAGE_MASK;
+ unsigned long end_addr = PAGE_ALIGN(start_addr + size);
+ unsigned long target_address;
+
+ for (;address < end_addr; address += PAGE_SIZE) {
+ target_address = get_pa_from_mapping(address);
+ if (target_address == -1) {
+ ret = -EIO;
+ break;
+ }
+ pte = kaiser_pagetable_walk(address, false);
if (pte_none(*pte)) {
set_pte(pte, __pte(flags | target_address));
} else {
- BUG_ON(__pa(pte_page(*pte)) != target_address);
+ pte_t tmp;
+ set_pte(&tmp, __pte(flags | target_address));
+ WARN_ON_ONCE(!pte_same(*pte, tmp));
}
}
+ return ret;
}
-// at first, add a pmd for every pgd entry in the shadowmem-kernel-part of the kernel mapping
-static inline void __init _kaiser_init(void)
+static int kaiser_add_user_map_ptrs(const void *start, const void *end, unsigned long flags)
+{
+ unsigned long size = end - start;
+
+ return kaiser_add_user_map(start, size, flags);
+}
+
+/*
+ * Ensure that the top level of the (shadow) page tables are
+ * entirely populated. This ensures that all processes that get
+ * forked have the same entries. This way, we do not have to
+ * ever go set up new entries in older processes.
+ *
+ * Note: we never free these, so there are no updates to them
+ * after this.
+ */
+static void __init kaiser_init_all_pgds(void)
{
pgd_t *pgd;
int i = 0;
pgd = native_get_shadow_pgd(pgd_offset_k((unsigned long )0));
for (i = PTRS_PER_PGD / 2; i < PTRS_PER_PGD; i++) {
- set_pgd(pgd + i, __pgd(_PAGE_TABLE |__pa(pud_alloc_one(0, 0))));
+ pgd_t new_pgd;
+ pud_t *pud = pud_alloc_one(&init_mm, PAGE_OFFSET + i * PGDIR_SIZE);
+ if (!pud) {
+ WARN_ON(1);
+ break;
+ }
+ new_pgd = __pgd(_KERNPG_TABLE |__pa(pud));
+ /*
+ * Make sure not to stomp on some other pgd entry.
+ */
+ if (!pgd_none(pgd[i])) {
+ WARN_ON(1);
+ continue;
+ }
+ set_pgd(pgd + i, new_pgd);
}
}
+#define kaiser_add_user_map_early(start, size, flags) do { \
+ int __ret = kaiser_add_user_map(start, size, flags); \
+ WARN_ON(__ret); \
+} while (0)
+
+#define kaiser_add_user_map_ptrs_early(start, end, flags) do { \
+ int __ret = kaiser_add_user_map_ptrs(start, end, flags); \
+ WARN_ON(__ret); \
+} while (0)
+
extern char __per_cpu_user_mapped_start[], __per_cpu_user_mapped_end[];
-spinlock_t shadow_table_lock;
+/*
+ * If anything in here fails, we will likely die on one of the
+ * first kernel->user transitions and init will die. But, we
+ * will have most of the kernel up by then and should be able to
+ * get a clean warning out of it. If we BUG_ON() here, we run
+ * the risk of being before we have good console output.
+ */
void __init kaiser_init(void)
{
int cpu;
- spin_lock_init(&shadow_table_lock);
-
- spin_lock(&shadow_table_lock);
- _kaiser_init();
+ kaiser_init_all_pgds();
for_each_possible_cpu(cpu) {
- // map the per cpu user variables
- _kaiser_copy(
- (unsigned long) (__per_cpu_user_mapped_start + per_cpu_offset(cpu)),
- (unsigned long) __per_cpu_user_mapped_end - (unsigned long) __per_cpu_user_mapped_start,
- __PAGE_KERNEL);
- }
-
- // map the entry/exit text section, which is responsible to switch between user- and kernel mode
- _kaiser_copy(
- (unsigned long) __entry_text_start,
- (unsigned long) __entry_text_end - (unsigned long) __entry_text_start,
- __PAGE_KERNEL_RX);
-
- // the fixed map address of the idt_table
- _kaiser_copy(
- (unsigned long) idt_descr.address,
- sizeof(gate_desc) * NR_VECTORS,
- __PAGE_KERNEL_RO);
+ void *percpu_vaddr = __per_cpu_user_mapped_start +
+ per_cpu_offset(cpu);
+ unsigned long percpu_sz = __per_cpu_user_mapped_end -
+ __per_cpu_user_mapped_start;
+ kaiser_add_user_map_early(percpu_vaddr, percpu_sz,
+ __PAGE_KERNEL);
+ }
- spin_unlock(&shadow_table_lock);
+ /*
+ * Map the entry/exit text section, which is needed at
+ * switches from user to and from kernel.
+ */
+ kaiser_add_user_map_ptrs_early(__entry_text_start, __entry_text_end,
+ __PAGE_KERNEL_RX);
+
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
+ kaiser_add_user_map_ptrs_early(__irqentry_text_start,
+ __irqentry_text_end,
+ __PAGE_KERNEL_RX);
+#endif
+ kaiser_add_user_map_early((void *)idt_descr.address,
+ sizeof(gate_desc) * NR_VECTORS,
+ __PAGE_KERNEL_RO);
+#ifdef CONFIG_TRACING
+ kaiser_add_user_map_early(&trace_idt_descr,
+ sizeof(trace_idt_descr),
+ __PAGE_KERNEL);
+ kaiser_add_user_map_early(&trace_idt_table,
+ sizeof(gate_desc) * NR_VECTORS,
+ __PAGE_KERNEL);
+#endif
+ kaiser_add_user_map_early(&debug_idt_descr, sizeof(debug_idt_descr),
+ __PAGE_KERNEL);
+ kaiser_add_user_map_early(&debug_idt_table,
+ sizeof(gate_desc) * NR_VECTORS,
+ __PAGE_KERNEL);
}
+extern void unmap_pud_range_nofree(pgd_t *pgd, unsigned long start, unsigned long end);
// add a mapping to the shadow-mapping, and synchronize the mappings
-void kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags)
+int kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags)
{
- spin_lock(&shadow_table_lock);
- _kaiser_copy(addr, size, flags);
- spin_unlock(&shadow_table_lock);
+ return kaiser_add_user_map((const void *)addr, size, flags);
}
-extern void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end);
void kaiser_remove_mapping(unsigned long start, unsigned long size)
{
- pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(start));
- spin_lock(&shadow_table_lock);
- do {
- unmap_pud_range(pgd, start, start + size);
- } while (pgd++ != native_get_shadow_pgd(pgd_offset_k(start + size)));
- spin_unlock(&shadow_table_lock);
+ unsigned long end = start + size;
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PGDIR_SIZE) {
+ pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(addr));
+ /*
+ * unmap_p4d_range() handles > P4D_SIZE unmaps,
+ * so no need to trim 'end'.
+ */
+ unmap_pud_range_nofree(pgd, addr, end);
+ }
}
#endif /* CONFIG_KAISER */
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -52,6 +52,7 @@ static DEFINE_SPINLOCK(cpa_lock);
#define CPA_FLUSHTLB 1
#define CPA_ARRAY 2
#define CPA_PAGES_ARRAY 4
+#define CPA_FREE_PAGETABLES 8
#ifdef CONFIG_PROC_FS
static unsigned long direct_pages_count[PG_LEVEL_NUM];
@@ -729,10 +730,13 @@ static int split_large_page(struct cpa_d
return 0;
}
-static bool try_to_free_pte_page(pte_t *pte)
+static bool try_to_free_pte_page(struct cpa_data *cpa, pte_t *pte)
{
int i;
+ if (!(cpa->flags & CPA_FREE_PAGETABLES))
+ return false;
+
for (i = 0; i < PTRS_PER_PTE; i++)
if (!pte_none(pte[i]))
return false;
@@ -741,10 +745,13 @@ static bool try_to_free_pte_page(pte_t *
return true;
}
-static bool try_to_free_pmd_page(pmd_t *pmd)
+static bool try_to_free_pmd_page(struct cpa_data *cpa, pmd_t *pmd)
{
int i;
+ if (!(cpa->flags & CPA_FREE_PAGETABLES))
+ return false;
+
for (i = 0; i < PTRS_PER_PMD; i++)
if (!pmd_none(pmd[i]))
return false;
@@ -753,7 +760,9 @@ static bool try_to_free_pmd_page(pmd_t *
return true;
}
-static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
+static bool unmap_pte_range(struct cpa_data *cpa, pmd_t *pmd,
+ unsigned long start,
+ unsigned long end)
{
pte_t *pte = pte_offset_kernel(pmd, start);
@@ -764,22 +773,23 @@ static bool unmap_pte_range(pmd_t *pmd,
pte++;
}
- if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
+ if (try_to_free_pte_page(cpa, (pte_t *)pmd_page_vaddr(*pmd))) {
pmd_clear(pmd);
return true;
}
return false;
}
-static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
+static void __unmap_pmd_range(struct cpa_data *cpa, pud_t *pud, pmd_t *pmd,
unsigned long start, unsigned long end)
{
- if (unmap_pte_range(pmd, start, end))
- if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+ if (unmap_pte_range(cpa, pmd, start, end))
+ if (try_to_free_pmd_page(cpa, (pmd_t *)pud_page_vaddr(*pud)))
pud_clear(pud);
}
-static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
+static void unmap_pmd_range(struct cpa_data *cpa, pud_t *pud,
+ unsigned long start, unsigned long end)
{
pmd_t *pmd = pmd_offset(pud, start);
@@ -790,7 +800,7 @@ static void unmap_pmd_range(pud_t *pud,
unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
unsigned long pre_end = min_t(unsigned long, end, next_page);
- __unmap_pmd_range(pud, pmd, start, pre_end);
+ __unmap_pmd_range(cpa, pud, pmd, start, pre_end);
start = pre_end;
pmd++;
@@ -803,7 +813,8 @@ static void unmap_pmd_range(pud_t *pud,
if (pmd_large(*pmd))
pmd_clear(pmd);
else
- __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
+ __unmap_pmd_range(cpa, pud, pmd,
+ start, start + PMD_SIZE);
start += PMD_SIZE;
pmd++;
@@ -813,17 +824,19 @@ static void unmap_pmd_range(pud_t *pud,
* 4K leftovers?
*/
if (start < end)
- return __unmap_pmd_range(pud, pmd, start, end);
+ return __unmap_pmd_range(cpa, pud, pmd, start, end);
/*
* Try again to free the PMD page if haven't succeeded above.
*/
if (!pud_none(*pud))
- if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+ if (try_to_free_pmd_page(cpa, (pmd_t *)pud_page_vaddr(*pud)))
pud_clear(pud);
}
-void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
+static void __unmap_pud_range(struct cpa_data *cpa, pgd_t *pgd,
+ unsigned long start,
+ unsigned long end)
{
pud_t *pud = pud_offset(pgd, start);
@@ -834,7 +847,7 @@ void unmap_pud_range(pgd_t *pgd, unsigne
unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
unsigned long pre_end = min_t(unsigned long, end, next_page);
- unmap_pmd_range(pud, start, pre_end);
+ unmap_pmd_range(cpa, pud, start, pre_end);
start = pre_end;
pud++;
@@ -848,7 +861,7 @@ void unmap_pud_range(pgd_t *pgd, unsigne
if (pud_large(*pud))
pud_clear(pud);
else
- unmap_pmd_range(pud, start, start + PUD_SIZE);
+ unmap_pmd_range(cpa, pud, start, start + PUD_SIZE);
start += PUD_SIZE;
pud++;
@@ -858,7 +871,7 @@ void unmap_pud_range(pgd_t *pgd, unsigne
* 2M leftovers?
*/
if (start < end)
- unmap_pmd_range(pud, start, end);
+ unmap_pmd_range(cpa, pud, start, end);
/*
* No need to try to free the PUD page because we'll free it in
@@ -866,6 +879,24 @@ void unmap_pud_range(pgd_t *pgd, unsigne
*/
}
+static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
+{
+ struct cpa_data cpa = {
+ .flags = CPA_FREE_PAGETABLES,
+ };
+
+ __unmap_pud_range(&cpa, pgd, start, end);
+}
+
+void unmap_pud_range_nofree(pgd_t *pgd, unsigned long start, unsigned long end)
+{
+ struct cpa_data cpa = {
+ .flags = 0,
+ };
+
+ __unmap_pud_range(&cpa, pgd, start, end);
+}
+
static int alloc_pte_page(pmd_t *pmd)
{
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -344,40 +344,26 @@ static inline void _pgd_free(pgd_t *pgd)
kmem_cache_free(pgd_cache, pgd);
}
#else
-static inline pgd_t *_pgd_alloc(void)
-{
-#ifdef CONFIG_KAISER
- // Instead of one PML4, we aquire two PML4s and, thus, an 8kb-aligned memory
- // block. Therefore, we have to allocate at least 3 pages. However, the
- // __get_free_pages returns us 4 pages. Hence, we store the base pointer at
- // the beginning of the page of our 8kb-aligned memory block in order to
- // correctly free it afterwars.
- unsigned long pages = __get_free_pages(PGALLOC_GFP, get_order(4*PAGE_SIZE));
-
- if(native_get_normal_pgd((pgd_t*) pages) == (pgd_t*) pages)
- {
- *((unsigned long*)(pages + 2 * PAGE_SIZE)) = pages;
- return (pgd_t *) pages;
- }
- else
- {
- *((unsigned long*)(pages + 3 * PAGE_SIZE)) = pages;
- return (pgd_t *) (pages + PAGE_SIZE);
- }
+#ifdef CONFIG_KAISER
+/*
+ * Instead of one pmd, we aquire two pmds. Being order-1, it is
+ * both 8k in size and 8k-aligned. That lets us just flip bit 12
+ * in a pointer to swap between the two 4k halves.
+ */
+#define PGD_ALLOCATION_ORDER 1
#else
- return (pgd_t *)__get_free_page(PGALLOC_GFP);
+#define PGD_ALLOCATION_ORDER 0
#endif
+
+static inline pgd_t *_pgd_alloc(void)
+{
+ return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
}
static inline void _pgd_free(pgd_t *pgd)
{
-#ifdef CONFIG_KAISER
- unsigned long pages = *((unsigned long*) ((char*) pgd + 2 * PAGE_SIZE));
- free_pages(pages, get_order(4*PAGE_SIZE));
-#else
- free_page((unsigned long)pgd);
-#endif
+ free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
}
#endif /* CONFIG_X86_PAE */
--- /dev/null
+++ b/include/linux/kaiser.h
@@ -0,0 +1,26 @@
+#ifndef _INCLUDE_KAISER_H
+#define _INCLUDE_KAISER_H
+
+#ifdef CONFIG_KAISER
+#include <asm/kaiser.h>
+#else
+
+/*
+ * These stubs are used whenever CONFIG_KAISER is off, which
+ * includes architectures that support KAISER, but have it
+ * disabled.
+ */
+
+static inline void kaiser_init(void)
+{
+}
+static inline void kaiser_remove_mapping(unsigned long start, unsigned long size)
+{
+}
+static inline int kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_KAISER */
+#endif /* _INCLUDE_KAISER_H */
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -58,6 +58,7 @@
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/freezer.h>
+#include <linux/kaiser.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/random.h>
@@ -472,7 +473,6 @@ void set_task_stack_end_magic(struct tas
*stackend = STACK_END_MAGIC; /* for overflow detection */
}
-extern void kaiser_add_mapping(unsigned long addr, unsigned long size, unsigned long flags);
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{
struct task_struct *tsk;
@@ -500,9 +500,10 @@ static struct task_struct *dup_task_stru
* functions again.
*/
tsk->stack = stack;
-#ifdef CONFIG_KAISER
- kaiser_add_mapping((unsigned long)tsk->stack, THREAD_SIZE, __PAGE_KERNEL);
-#endif
+
+ err= kaiser_add_mapping((unsigned long)tsk->stack, THREAD_SIZE, __PAGE_KERNEL);
+ if (err)
+ goto free_stack;
#ifdef CONFIG_VMAP_STACK
tsk->stack_vm_area = stack_vm_area;
#endif
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -32,12 +32,17 @@ config SECURITY
If you are unsure how to answer this question, answer N.
config KAISER
bool "Remove the kernel mapping in user mode"
+ default y
depends on X86_64
depends on !PARAVIRT
help
This enforces a strict kernel and user space isolation in order to close
hardware side channels on kernel address information.
+config KAISER_REAL_SWITCH
+ bool "KAISER: actually switch page tables"
+ default y
+
config SECURITYFS
bool "Enable the securityfs filesystem"
help
Patches currently in stable-queue which might be from dave.hansen(a)linux.intel.com are
queue-4.9/x86-paravirt-dont-patch-flush_tlb_single.patch
queue-4.9/kaiser-merged-update.patch
This is a note to let you know that I've just added the patch titled
mfd: cros ec: spi: Don't send first message too soon
to the 3.18-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
mfd-cros-ec-spi-don-t-send-first-message-too-soon.patch
and it can be found in the queue-3.18 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 15d8374874ded0bec37ef27f8301a6d54032c0e5 Mon Sep 17 00:00:00 2001
From: Jon Hunter <jonathanh(a)nvidia.com>
Date: Tue, 14 Nov 2017 14:43:27 +0000
Subject: mfd: cros ec: spi: Don't send first message too soon
From: Jon Hunter <jonathanh(a)nvidia.com>
commit 15d8374874ded0bec37ef27f8301a6d54032c0e5 upstream.
On the Tegra124 Nyan-Big chromebook the very first SPI message sent to
the EC is failing.
The Tegra SPI driver configures the SPI chip-selects to be active-high
by default (and always has for many years). The EC SPI requires an
active-low chip-select and so the Tegra chip-select is reconfigured to
be active-low when the EC SPI driver calls spi_setup(). The problem is
that if the first SPI message to the EC is sent too soon after
reconfiguring the SPI chip-select, it fails.
The EC SPI driver prevents back-to-back SPI messages being sent too
soon by keeping track of the time the last transfer was sent via the
variable 'last_transfer_ns'. To prevent the very first transfer being
sent too soon, initialise the 'last_transfer_ns' variable after calling
spi_setup() and before sending the first SPI message.
Signed-off-by: Jon Hunter <jonathanh(a)nvidia.com>
Reviewed-by: Brian Norris <briannorris(a)chromium.org>
Reviewed-by: Douglas Anderson <dianders(a)chromium.org>
Acked-by: Benson Leung <bleung(a)chromium.org>
Signed-off-by: Lee Jones <lee.jones(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/mfd/cros_ec_spi.c | 1 +
1 file changed, 1 insertion(+)
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
+ ec_spi->last_transfer_ns = ktime_get_ns();
/* The header byte, which follows the preamble */
#define EC_MSG_HEADER 0xec
Patches currently in stable-queue which might be from jonathanh(a)nvidia.com are
queue-3.18/mfd-cros-ec-spi-don-t-send-first-message-too-soon.patch
On 03/01/18 03:11, kernelci.org bot wrote:
> stable/linux-4.14.y boot: 108 boots: 0 failed, 107 passed with 1 conflict (v4.14.11)
>
> Full Boot Summary: https://kernelci.org/boot/all/job/stable/branch/linux-4.14.y/kernel/v4.14.1…
> Full Build Summary: https://kernelci.org/build/stable/branch/linux-4.14.y/kernel/v4.14.11/
>
> Tree: stable
> Branch: linux-4.14.y
> Git Describe: v4.14.11
> Git Commit: 0d59679df5b53755c00ea0292df696f97bfc950d
> Git URL: http://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git
> Tested: 59 unique boards, 23 SoC families, 16 builds out of 185
>
> Boot Regressions Detected:
>
> x86:
>
> x86_64_defconfig:
> qemu:
> lab-mhart: new failure (last pass: v4.14.10)
>
> Conflicting Boot Failure Detected: (These likely are not failures as other labs are reporting PASS. Needs review.)
>
> x86:
>
> x86_64_defconfig:
> qemu:
> lab-mhart: FAIL
> lab-collabora: PASS
Well, it turns out this is not exactly a conflict as there's a
subtle difference between the qemu devices in lab-mhart and in
lab-collabora. The ones in lab-collabora are configured to use
KVM, and it looks like the ones in lab-mhart aren't.
So this job with KVM enabled passes in lab-collabora:
https://lava.collabora.co.uk/scheduler/job/1032358
but it fails if I tell LAVA (qemu) to disable KVM:
https://lava.collabora.co.uk/scheduler/job/1032359
with the same panic as in lab-mhart. It seems like it's failing
to return from an interrupt:
http://lava.streamtester.net/scheduler/job/87308
[ 2.678828] ? native_iret+0x7/0x7
[ 2.679208] WARNING: can't dereference iret registers at 00000000ffc66068 for ip page_fault+0x11/0x60
This triggered an automated bisection on kernelci.org, please see
the results below.
I may run another bisection with this config enabled earlier in
the history to track down the actual change in the code that
introduced the issue, let me know if it's worth doing.
Hope this helps!
Best wishes,
Guillaume
--8<-----------------------------------------------------------------------8<--
Bisection result for stable/linux-4.14.y (v4.14.11) on qemu
Good known revision:
b8ce823 Linux 4.14.10
Bad known revision:
0d59679 Linux 4.14.11
Extra parameters:
Tree: stable
URL: http://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git
Branch: linux-4.14.y
Target: qemu
Lab: lab-mhart
Defconfig: x86_64_defconfig
Plan: boot
Breaking commit found:
-------------------------------------------------------------------------------
commit 3dfd9fd8d897214b1a880c7fd8ed36b88faa1c02
Author: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Mon Dec 4 15:08:03 2017 +0100
x86/mm/pti: Add Kconfig
commit 385ce0ea4c078517fa51c261882c4e72fba53005 upstream.
Finally allow CONFIG_PAGE_TABLE_ISOLATION to be enabled.
PARAVIRT generally requires that the kernel not manage its own page tables.
It also means that the hypervisor and kernel must agree wholeheartedly
about what format the page tables are in and what they contain.
PAGE_TABLE_ISOLATION, unfortunately, changes the rules and they
can not be used together.
I've seen conflicting feedback from maintainers lately about whether they
want the Kconfig magic to go first or last in a patch series. It's going
last here because the partially-applied series leads to kernels that can
not boot in a bunch of cases. I did a run through the entire series with
CONFIG_PAGE_TABLE_ISOLATION=y to look for build errors, though.
[ tglx: Removed SMP and !PARAVIRT dependencies as they not longer exist ]
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Andy Lutomirski <luto(a)kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky(a)oracle.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Brian Gerst <brgerst(a)gmail.com>
Cc: David Laight <David.Laight(a)aculab.com>
Cc: Denys Vlasenko <dvlasenk(a)redhat.com>
Cc: Eduardo Valentin <eduval(a)amazon.com>
Cc: Greg KH <gregkh(a)linuxfoundation.org>
Cc: H. Peter Anvin <hpa(a)zytor.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: Juergen Gross <jgross(a)suse.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: aliguori(a)amazon.com
Cc: daniel.gruss(a)iaik.tugraz.at
Cc: hughd(a)google.com
Cc: keescook(a)google.com
Cc: linux-mm(a)kvack.org
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
diff --git a/security/Kconfig b/security/Kconfig
index e8e4494..6614b93 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -54,6 +54,17 @@ config SECURITY_NETWORK
implement socket and networking access controls.
If you are unsure how to answer this question, answer N.
+config PAGE_TABLE_ISOLATION
+ bool "Remove the kernel mapping in user mode"
+ depends on X86_64 && !UML
+ default y
+ help
+ This feature reduces the number of hardware side channels by
+ ensuring that the majority of kernel addresses are not mapped
+ into userspace.
+
+ See Documentation/x86/pagetable-isolation.txt for more details.
+
config SECURITY_INFINIBAND
bool "Infiniband Security Hooks"
depends on SECURITY && INFINIBAND
-------------------------------------------------------------------------------
Git bisection log:
-------------------------------------------------------------------------------
git bisect start
# good: [b8ce8232fcc37fe7a97db79ea0a5f32098c25e72] Linux 4.14.10
git bisect good b8ce8232fcc37fe7a97db79ea0a5f32098c25e72
# bad: [0d59679df5b53755c00ea0292df696f97bfc950d] Linux 4.14.11
git bisect bad 0d59679df5b53755c00ea0292df696f97bfc950d
# bad: [621b5ae0f9f4f9ef91bf441afc086ecf5e752d51] s390/qeth: apply takeover changes when mode is toggled
git bisect bad 621b5ae0f9f4f9ef91bf441afc086ecf5e752d51
# bad: [66f833dbed02d39c44440b6b35ac088655c32edb] ring-buffer: Mask out the info bits when returning buffer page length
git bisect bad 66f833dbed02d39c44440b6b35ac088655c32edb
# good: [d230c1917f57c3beee2e0204a4c8c58999758b95] x86/mm/pti: Map ESPFIX into user space
git bisect good d230c1917f57c3beee2e0204a4c8c58999758b95
# good: [36a72ab52c8d969a7a302082f52731c1be0e9ada] x86/mm: Optimize RESTORE_CR3
git bisect good 36a72ab52c8d969a7a302082f52731c1be0e9ada
# bad: [3dfd9fd8d897214b1a880c7fd8ed36b88faa1c02] x86/mm/pti: Add Kconfig
git bisect bad 3dfd9fd8d897214b1a880c7fd8ed36b88faa1c02
# good: [ef4b38472d6b1bf587554dfc7d5ab7abc835c1a5] x86/mm: Clarify the whole ASID/kernel PCID/user PCID naming
git bisect good ef4b38472d6b1bf587554dfc7d5ab7abc835c1a5
# good: [33d9d7836f0fa02777667d72bc815c12fbe61cac] x86/dumpstack: Indicate in Oops whether PTI is configured and enabled
git bisect good 33d9d7836f0fa02777667d72bc815c12fbe61cac
# first bad commit: [3dfd9fd8d897214b1a880c7fd8ed36b88faa1c02] x86/mm/pti: Add Kconfig
-------------------------------------------------------------------------------
> ---
> For more info write to <info(a)kernelci.org>
>
>
>
> _______________________________________________
> Kernel-build-reports mailing list
> Kernel-build-reports(a)lists.linaro.org
> https://lists.linaro.org/mailman/listinfo/kernel-build-reports
This is a note to let you know that I've just added the patch titled
KPTI: Report when enabled
to the 4.4-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
kpti-report-when-enabled.patch
and it can be found in the queue-4.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From keescook(a)chromium.org Wed Jan 3 20:49:35 2018
From: Kees Cook <keescook(a)chromium.org>
Date: Wed, 3 Jan 2018 10:43:32 -0800
Subject: KPTI: Report when enabled
To: Greg KH <gregkh(a)linuxfoundation.org>
Message-ID: <20180103184332.GA18888@beast>
Content-Disposition: inline
From: Kees Cook <keescook(a)chromium.org>
Make sure dmesg reports when KPTI is enabled.
Signed-off-by: Kees Cook <keescook(a)chromium.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/mm/kaiser.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -11,6 +11,9 @@
#include <linux/uaccess.h>
#include <linux/ftrace.h>
+#undef pr_fmt
+#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
+
#include <asm/kaiser.h>
#include <asm/tlbflush.h> /* to verify its kaiser declarations */
#include <asm/pgtable.h>
@@ -293,7 +296,7 @@ enable:
return;
disable:
- pr_info("Kernel/User page tables isolation: disabled\n");
+ pr_info("disabled\n");
silent_disable:
kaiser_enabled = 0;
@@ -353,6 +356,8 @@ void __init kaiser_init(void)
kaiser_add_user_map_early(&debug_idt_table,
sizeof(gate_desc) * NR_VECTORS,
__PAGE_KERNEL);
+
+ pr_info("enabled\n");
}
/* Add a mapping to the shadow mapping, and synchronize the mappings */
Patches currently in stable-queue which might be from keescook(a)chromium.org are
queue-4.4/kpti-rename-to-page_table_isolation.patch
queue-4.4/kpti-report-when-enabled.patch
This is a note to let you know that I've just added the patch titled
KPTI: Report when enabled
to the 4.9-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
kpti-report-when-enabled.patch
and it can be found in the queue-4.9 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From keescook(a)chromium.org Wed Jan 3 20:48:07 2018
From: Kees Cook <keescook(a)chromium.org>
Date: Wed, 3 Jan 2018 10:18:01 -0800
Subject: KPTI: Report when enabled
To: Greg KH <gregkh(a)linuxfoundation.org>
Message-ID: <20180103181801.GA33383@beast>
Content-Disposition: inline
From: Kees Cook <keescook(a)chromium.org>
Make sure dmesg reports when KPTI is enabled.
Signed-off-by: Kees Cook <keescook(a)chromium.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/mm/kaiser.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -10,6 +10,9 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
+#undef pr_fmt
+#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
+
#include <asm/kaiser.h>
#include <asm/tlbflush.h> /* to verify its kaiser declarations */
#include <asm/pgtable.h>
@@ -292,7 +295,7 @@ enable:
return;
disable:
- pr_info("Kernel/User page tables isolation: disabled\n");
+ pr_info("disabled\n");
silent_disable:
kaiser_enabled = 0;
@@ -352,6 +355,8 @@ void __init kaiser_init(void)
kaiser_add_user_map_early(&debug_idt_table,
sizeof(gate_desc) * NR_VECTORS,
__PAGE_KERNEL);
+
+ pr_info("enabled\n");
}
/* Add a mapping to the shadow mapping, and synchronize the mappings */
Patches currently in stable-queue which might be from keescook(a)chromium.org are
queue-4.9/kpti-rename-to-page_table_isolation.patch
queue-4.9/kpti-report-when-enabled.patch
This is a note to let you know that I've just added the patch titled
x86/kaiser: Rename and simplify X86_FEATURE_KAISER handling
to the 4.4-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-kaiser-rename-and-simplify-x86_feature_kaiser-handling.patch
and it can be found in the queue-4.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Wed Jan 3 18:58:12 CET 2018
From: Borislav Petkov <bp(a)suse.de>
Date: Tue, 2 Jan 2018 14:19:48 +0100
Subject: x86/kaiser: Rename and simplify X86_FEATURE_KAISER handling
From: Borislav Petkov <bp(a)suse.de>
Concentrate it in arch/x86/mm/kaiser.c and use the upstream string "nopti".
Signed-off-by: Borislav Petkov <bp(a)suse.de>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
Documentation/kernel-parameters.txt | 2 +-
arch/x86/kernel/cpu/common.c | 18 ------------------
arch/x86/mm/kaiser.c | 20 +++++++++++++++++++-
3 files changed, 20 insertions(+), 20 deletions(-)
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2523,7 +2523,7 @@ bytes respectively. Such letter suffixes
nojitter [IA-64] Disables jitter checking for ITC timers.
- nokaiser [X86-64] Disable KAISER isolation of kernel from user.
+ nopti [X86-64] Disable KAISER isolation of kernel from user.
no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -178,20 +178,6 @@ static int __init x86_pcid_setup(char *s
return 1;
}
__setup("nopcid", x86_pcid_setup);
-
-static int __init x86_nokaiser_setup(char *s)
-{
- /* nokaiser doesn't accept parameters */
- if (s)
- return -EINVAL;
-#ifdef CONFIG_KAISER
- kaiser_enabled = 0;
- setup_clear_cpu_cap(X86_FEATURE_KAISER);
- pr_info("nokaiser: KAISER feature disabled\n");
-#endif
- return 0;
-}
-early_param("nokaiser", x86_nokaiser_setup);
#endif
static int __init x86_noinvpcid_setup(char *s)
@@ -761,10 +747,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_power = cpuid_edx(0x80000007);
init_scattered_cpuid_features(c);
-#ifdef CONFIG_KAISER
- if (kaiser_enabled)
- set_cpu_cap(c, X86_FEATURE_KAISER);
-#endif
}
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -275,8 +275,13 @@ void __init kaiser_init(void)
{
int cpu;
- if (!kaiser_enabled)
+ if (!kaiser_enabled) {
+ setup_clear_cpu_cap(X86_FEATURE_KAISER);
return;
+ }
+
+ setup_force_cpu_cap(X86_FEATURE_KAISER);
+
kaiser_init_all_pgds();
for_each_possible_cpu(cpu) {
@@ -419,3 +424,16 @@ void kaiser_flush_tlb_on_return_to_user(
X86_CR3_PCID_USER_FLUSH | KAISER_SHADOW_PGD_OFFSET);
}
EXPORT_SYMBOL(kaiser_flush_tlb_on_return_to_user);
+
+static int __init x86_nokaiser_setup(char *s)
+{
+ /* nopti doesn't accept parameters */
+ if (s)
+ return -EINVAL;
+
+ kaiser_enabled = 0;
+ pr_info("Kernel/User page tables isolation: disabled\n");
+
+ return 0;
+}
+early_param("nopti", x86_nokaiser_setup);
Patches currently in stable-queue which might be from bp(a)suse.de are
queue-4.4/x86-paravirt-dont-patch-flush_tlb_single.patch
queue-4.4/x86-kaiser-reenable-paravirt.patch
queue-4.4/x86-kaiser-rename-and-simplify-x86_feature_kaiser-handling.patch
queue-4.4/x86-kaiser-check-boottime-cmdline-params.patch
queue-4.4/x86-kaiser-move-feature-detection-up.patch