This is a note to let you know that I've just added the patch titled
x86/retpoline: Add initial retpoline support
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-retpoline-add-initial-retpoline-support.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 76b043848fd22dbf7f8bf3a1452f8c70d557b860 Mon Sep 17 00:00:00 2001
From: David Woodhouse <dwmw(a)amazon.co.uk>
Date: Thu, 11 Jan 2018 21:46:25 +0000
Subject: x86/retpoline: Add initial retpoline support
From: David Woodhouse <dwmw(a)amazon.co.uk>
commit 76b043848fd22dbf7f8bf3a1452f8c70d557b860 upstream.
Enable the use of -mindirect-branch=thunk-extern in newer GCC, and provide
the corresponding thunks. Provide assembler macros for invoking the thunks
in the same way that GCC does, from native and inline assembler.
This adds X86_FEATURE_RETPOLINE and sets it by default on all CPUs. In
some circumstances, IBRS microcode features may be used instead, and the
retpoline can be disabled.
On AMD CPUs if lfence is serialising, the retpoline can be dramatically
simplified to a simple "lfence; jmp *\reg". A future patch, after it has
been verified that lfence really is serialising in all circumstances, can
enable this by setting the X86_FEATURE_RETPOLINE_AMD feature bit in addition
to X86_FEATURE_RETPOLINE.
Do not align the retpoline in the altinstr section, because there is no
guarantee that it stays aligned when it's copied over the oldinstr during
alternative patching.
[ Andi Kleen: Rename the macros, add CONFIG_RETPOLINE option, export thunks]
[ tglx: Put actual function CALL/JMP in front of the macros, convert to
symbolic labels ]
[ dwmw2: Convert back to numeric labels, merge objtool fixes ]
Signed-off-by: David Woodhouse <dwmw(a)amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Acked-by: Arjan van de Ven <arjan(a)linux.intel.com>
Acked-by: Ingo Molnar <mingo(a)kernel.org>
Cc: gnomes(a)lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel(a)redhat.com>
Cc: Andi Kleen <ak(a)linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe(a)redhat.com>
Cc: thomas.lendacky(a)amd.com
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Jiri Kosina <jikos(a)kernel.org>
Cc: Andy Lutomirski <luto(a)amacapital.net>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Kees Cook <keescook(a)google.com>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh(a)linux-foundation.org>
Cc: Paul Turner <pjt(a)google.com>
Link: https://lkml.kernel.org/r/1515707194-20531-4-git-send-email-dwmw@amazon.co.…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/Kconfig | 13 +++
arch/x86/Makefile | 10 ++
arch/x86/include/asm/asm-prototypes.h | 25 ++++++
arch/x86/include/asm/cpufeatures.h | 2
arch/x86/include/asm/nospec-branch.h | 128 ++++++++++++++++++++++++++++++++++
arch/x86/kernel/cpu/common.c | 4 +
arch/x86/lib/Makefile | 1
arch/x86/lib/retpoline.S | 48 ++++++++++++
8 files changed, 231 insertions(+)
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -429,6 +429,19 @@ config GOLDFISH
def_bool y
depends on X86_GOLDFISH
+config RETPOLINE
+ bool "Avoid speculative indirect branches in kernel"
+ default y
+ help
+ Compile kernel with the retpoline compiler options to guard against
+ kernel-to-user data leaks by avoiding speculative indirect
+ branches. Requires a compiler with -mindirect-branch=thunk-extern
+ support for full protection. The kernel may run slower.
+
+ Without compiler support, at least indirect branches in assembler
+ code are eliminated. Since this includes the syscall entry path,
+ it is not entirely pointless.
+
config INTEL_RDT
bool "Intel Resource Director Technology support"
default n
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -235,6 +235,16 @@ KBUILD_CFLAGS += -Wno-sign-compare
#
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+# Avoid indirect branches in kernel to deal with Spectre
+ifdef CONFIG_RETPOLINE
+ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+ ifneq ($(RETPOLINE_CFLAGS),)
+ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
+ else
+ $(warning CONFIG_RETPOLINE=y, but not supported by the compiler. Toolchain update recommended.)
+ endif
+endif
+
archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -11,7 +11,32 @@
#include <asm/pgtable.h>
#include <asm/special_insns.h>
#include <asm/preempt.h>
+#include <asm/asm.h>
#ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void);
#endif
+
+#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_X86_32
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void);
+#else
+#define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void);
+INDIRECT_THUNK(8)
+INDIRECT_THUNK(9)
+INDIRECT_THUNK(10)
+INDIRECT_THUNK(11)
+INDIRECT_THUNK(12)
+INDIRECT_THUNK(13)
+INDIRECT_THUNK(14)
+INDIRECT_THUNK(15)
+#endif
+INDIRECT_THUNK(ax)
+INDIRECT_THUNK(bx)
+INDIRECT_THUNK(cx)
+INDIRECT_THUNK(dx)
+INDIRECT_THUNK(si)
+INDIRECT_THUNK(di)
+INDIRECT_THUNK(bp)
+INDIRECT_THUNK(sp)
+#endif /* CONFIG_RETPOLINE */
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -203,6 +203,8 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
+#define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */
--- /dev/null
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __NOSPEC_BRANCH_H__
+#define __NOSPEC_BRANCH_H__
+
+#include <asm/alternative.h>
+#include <asm/alternative-asm.h>
+#include <asm/cpufeatures.h>
+
+#ifdef __ASSEMBLY__
+
+/*
+ * This should be used immediately before a retpoline alternative. It tells
+ * objtool where the retpolines are so that it can make sense of the control
+ * flow by just reading the original instruction(s) and ignoring the
+ * alternatives.
+ */
+.macro ANNOTATE_NOSPEC_ALTERNATIVE
+ .Lannotate_\@:
+ .pushsection .discard.nospec
+ .long .Lannotate_\@ - .
+ .popsection
+.endm
+
+/*
+ * These are the bare retpoline primitives for indirect jmp and call.
+ * Do not use these directly; they only exist to make the ALTERNATIVE
+ * invocation below less ugly.
+ */
+.macro RETPOLINE_JMP reg:req
+ call .Ldo_rop_\@
+.Lspec_trap_\@:
+ pause
+ jmp .Lspec_trap_\@
+.Ldo_rop_\@:
+ mov \reg, (%_ASM_SP)
+ ret
+.endm
+
+/*
+ * This is a wrapper around RETPOLINE_JMP so the called function in reg
+ * returns to the instruction after the macro.
+ */
+.macro RETPOLINE_CALL reg:req
+ jmp .Ldo_call_\@
+.Ldo_retpoline_jmp_\@:
+ RETPOLINE_JMP \reg
+.Ldo_call_\@:
+ call .Ldo_retpoline_jmp_\@
+.endm
+
+/*
+ * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
+ * indirect jmp/call which may be susceptible to the Spectre variant 2
+ * attack.
+ */
+.macro JMP_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+ ANNOTATE_NOSPEC_ALTERNATIVE
+ ALTERNATIVE_2 __stringify(jmp *\reg), \
+ __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
+ __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+ jmp *\reg
+#endif
+.endm
+
+.macro CALL_NOSPEC reg:req
+#ifdef CONFIG_RETPOLINE
+ ANNOTATE_NOSPEC_ALTERNATIVE
+ ALTERNATIVE_2 __stringify(call *\reg), \
+ __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
+ __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD
+#else
+ call *\reg
+#endif
+.endm
+
+#else /* __ASSEMBLY__ */
+
+#define ANNOTATE_NOSPEC_ALTERNATIVE \
+ "999:\n\t" \
+ ".pushsection .discard.nospec\n\t" \
+ ".long 999b - .\n\t" \
+ ".popsection\n\t"
+
+#if defined(CONFIG_X86_64) && defined(RETPOLINE)
+
+/*
+ * Since the inline asm uses the %V modifier which is only in newer GCC,
+ * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
+ */
+# define CALL_NOSPEC \
+ ANNOTATE_NOSPEC_ALTERNATIVE \
+ ALTERNATIVE( \
+ "call *%[thunk_target]\n", \
+ "call __x86_indirect_thunk_%V[thunk_target]\n", \
+ X86_FEATURE_RETPOLINE)
+# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
+
+#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
+/*
+ * For i386 we use the original ret-equivalent retpoline, because
+ * otherwise we'll run out of registers. We don't care about CET
+ * here, anyway.
+ */
+# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
+ " jmp 904f;\n" \
+ " .align 16\n" \
+ "901: call 903f;\n" \
+ "902: pause;\n" \
+ " jmp 902b;\n" \
+ " .align 16\n" \
+ "903: addl $4, %%esp;\n" \
+ " pushl %[thunk_target];\n" \
+ " ret;\n" \
+ " .align 16\n" \
+ "904: call 901b;\n", \
+ X86_FEATURE_RETPOLINE)
+
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#else /* No retpoline */
+# define CALL_NOSPEC "call *%[thunk_target]\n"
+# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif /* __NOSPEC_BRANCH_H__ */
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -905,6 +905,10 @@ static void __init early_identify_cpu(st
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+#ifdef CONFIG_RETPOLINE
+ setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
+#endif
+
fpu__init_system(c);
#ifdef CONFIG_X86_32
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -26,6 +26,7 @@ lib-y += memcpy_$(BITS).o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+lib-$(CONFIG_RETPOLINE) += retpoline.o
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
--- /dev/null
+++ b/arch/x86/lib/retpoline.S
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/stringify.h>
+#include <linux/linkage.h>
+#include <asm/dwarf2.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative-asm.h>
+#include <asm/export.h>
+#include <asm/nospec-branch.h>
+
+.macro THUNK reg
+ .section .text.__x86.indirect_thunk.\reg
+
+ENTRY(__x86_indirect_thunk_\reg)
+ CFI_STARTPROC
+ JMP_NOSPEC %\reg
+ CFI_ENDPROC
+ENDPROC(__x86_indirect_thunk_\reg)
+.endm
+
+/*
+ * Despite being an assembler file we can't just use .irp here
+ * because __KSYM_DEPS__ only uses the C preprocessor and would
+ * only see one instance of "__x86_indirect_thunk_\reg" rather
+ * than one per register with the correct names. So we do it
+ * the simple and nasty way...
+ */
+#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg)
+#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
+
+GENERATE_THUNK(_ASM_AX)
+GENERATE_THUNK(_ASM_BX)
+GENERATE_THUNK(_ASM_CX)
+GENERATE_THUNK(_ASM_DX)
+GENERATE_THUNK(_ASM_SI)
+GENERATE_THUNK(_ASM_DI)
+GENERATE_THUNK(_ASM_BP)
+GENERATE_THUNK(_ASM_SP)
+#ifdef CONFIG_64BIT
+GENERATE_THUNK(r8)
+GENERATE_THUNK(r9)
+GENERATE_THUNK(r10)
+GENERATE_THUNK(r11)
+GENERATE_THUNK(r12)
+GENERATE_THUNK(r13)
+GENERATE_THUNK(r14)
+GENERATE_THUNK(r15)
+#endif
Patches currently in stable-queue which might be from dwmw(a)amazon.co.uk are
queue-4.14/x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch
queue-4.14/objtool-detect-jumps-to-retpoline-thunks.patch
queue-4.14/x86-cpufeatures-add-x86_bug_spectre_v.patch
queue-4.14/x86-mm-pti-remove-dead-logic-in-pti_user_pagetable_walk.patch
queue-4.14/sysfs-cpu-fix-typos-in-vulnerability-documentation.patch
queue-4.14/x86-cpu-amd-make-lfence-a-serializing-instruction.patch
queue-4.14/objtool-allow-alternatives-to-be-ignored.patch
queue-4.14/x86-cpu-implement-cpu-vulnerabilites-sysfs-functions.patch
queue-4.14/x86-tboot-unbreak-tboot-with-pti-enabled.patch
queue-4.14/x86-pti-make-unpoison-of-pgd-for-trusted-boot-work-for-real.patch
queue-4.14/sysfs-cpu-add-vulnerability-folder.patch
queue-4.14/x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch
queue-4.14/x86-retpoline-add-initial-retpoline-support.patch
This is a note to let you know that I've just added the patch titled
x86/pti: Make unpoison of pgd for trusted boot work for real
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-pti-make-unpoison-of-pgd-for-trusted-boot-work-for-real.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 445b69e3b75e42362a5bdc13c8b8f61599e2228a Mon Sep 17 00:00:00 2001
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Wed, 10 Jan 2018 14:49:39 -0800
Subject: x86/pti: Make unpoison of pgd for trusted boot work for real
From: Dave Hansen <dave.hansen(a)linux.intel.com>
commit 445b69e3b75e42362a5bdc13c8b8f61599e2228a upstream.
The inital fix for trusted boot and PTI potentially misses the pgd clearing
if pud_alloc() sets a PGD. It probably works in *practice* because for two
adjacent calls to map_tboot_page() that share a PGD entry, the first will
clear NX, *then* allocate and set the PGD (without NX clear). The second
call will *not* allocate but will clear the NX bit.
Defer the NX clearing to a point after it is known that all top-level
allocations have occurred. Add a comment to clarify why.
[ tglx: Massaged changelog ]
Fixes: 262b6b30087 ("x86/tboot: Unbreak tboot with PTI enabled")
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Andrea Arcangeli <aarcange(a)redhat.com>
Cc: Jon Masters <jcm(a)redhat.com>
Cc: "Tim Chen" <tim.c.chen(a)linux.intel.com>
Cc: gnomes(a)lxorguk.ukuu.org.uk
Cc: peterz(a)infradead.org
Cc: ning.sun(a)intel.com
Cc: tboot-devel(a)lists.sourceforge.net
Cc: andi(a)firstfloor.org
Cc: luto(a)kernel.org
Cc: law(a)redhat.com
Cc: pbonzini(a)redhat.com
Cc: torvalds(a)linux-foundation.org
Cc: gregkh(a)linux-foundation.org
Cc: dwmw(a)amazon.co.uk
Cc: nickc(a)redhat.com
Cc: stable(a)vger.kernel.org
Link: https://lkml.kernel.org/r/20180110224939.2695CD47@viggo.jf.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/kernel/tboot.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -127,7 +127,6 @@ static int map_tboot_page(unsigned long
p4d = p4d_alloc(&tboot_mm, pgd, vaddr);
if (!p4d)
return -1;
- pgd->pgd &= ~_PAGE_NX;
pud = pud_alloc(&tboot_mm, p4d, vaddr);
if (!pud)
return -1;
@@ -139,6 +138,17 @@ static int map_tboot_page(unsigned long
return -1;
set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
pte_unmap(pte);
+
+ /*
+ * PTI poisons low addresses in the kernel page tables in the
+ * name of making them unusable for userspace. To execute
+ * code at such a low address, the poison must be cleared.
+ *
+ * Note: 'pgd' actually gets set in p4d_alloc() _or_
+ * pud_alloc() depending on 4/5-level paging.
+ */
+ pgd->pgd &= ~_PAGE_NX;
+
return 0;
}
Patches currently in stable-queue which might be from dave.hansen(a)linux.intel.com are
queue-4.14/x86-pti-unbreak-efi-old_memmap.patch
queue-4.14/x86-documentation-add-pti-description.patch
queue-4.14/x86-tboot-unbreak-tboot-with-pti-enabled.patch
queue-4.14/x86-pti-make-unpoison-of-pgd-for-trusted-boot-work-for-real.patch
From: Vincent Wang <vincent.wang(a)spreadtrum.com>
list_del_rcu() should be used to replace list_del() in the function
_remove_list_dev(), since the opp is a rcu protected pointer.
For example, on an ARM big.Little platform of spreadtrum, there are
little cluster, big cluster and gpu using pm_opp. And the opp_table
of big cluster will be removed when big cluster is removed, which
is implemented in the cpufreq driver. Sometimes an issue maybe occur:
[ 237.647758] c0 Unable to handle kernel paging request at virtual address dead000000000110
[ 237.647776] c0 pgd = ffffffc073e78000
[ 237.647786] c0 [dead000000000110] *pgd=0000000000000000, *pud=0000000000000000
[ 237.647808] c0 Internal error: Oops: 96000004 [#1] PREEMPT SMP
[ 237.653535] c0 Modules linked in: sprdwl_ng(O) mtty marlin2_fm mali_kbase(O)
[ 237.653569] c0 CPU: 0 PID: 38 Comm: kworker/u12:1 Tainted: G S W O 4.4.83+ #1
[ 237.653578] c0 Hardware name: Spreadtrum SP9850KHsmt 1h10 Board (DT)
[ 237.653594] c0 Workqueue: devfreq_wq devfreq_monitor
[ 237.653605] c0 task: ffffffc0babd0d80 task.stack: ffffffc0badbc000
[ 237.653619] c0 PC is at _find_device_opp+0x58/0xac
[ 237.653629] c0 LR is at dev_pm_opp_find_freq_ceil+0x2c/0xb8
[ 237.921294] c0 Call trace:
[ 237.921425] c0 [<ffffff80085362b0>] _find_device_opp+0x58/0xac
[ 237.921437] c0 [<ffffff8008536560>] dev_pm_opp_find_freq_ceil+0x2c/0xb8
[ 237.921452] c0 [<ffffff80088760f4>] devfreq_recommended_opp+0x54/0x7c
[ 237.921494] c0 [<ffffff8000b6a96c>] kbase_wait_write_flush+0x164/0x358 [mali_kbase]
[ 237.921504] c0 [<ffffff800887485c>] update_devfreq+0x8c/0xf8
[ 237.921514] c0 [<ffffff80088749e4>] devfreq_monitor+0x34/0x94
[ 237.921529] c0 [<ffffff80080bd75c>] process_one_work+0x154/0x458
[ 237.921539] c0 [<ffffff80080be428>] worker_thread+0x134/0x4a4
[ 237.921551] c0 [<ffffff80080c4bec>] kthread+0xdc/0xf0
[ 237.921564] c0 [<ffffff8008085f20>] ret_from_fork+0x10/0x30
Cc: stable <stable(a)vger.kernel.org> # 4.4
Signed-off-by: Vincent Wang <vincent.wang(a)spreadtrum.com>
Signed-off-by: Chunyan Zhang <chunyan.zhang(a)spreadtrum.com>
Acked-by: Viresh Kumar <viresh.kumar(a)linaro.org>
---
This patch is for 4.4 stable branch only.
Once this patch accepted, I can cook a similar patch for 4.9 stable branch.
This fix can't be done to upstream kernel as the OPP code doesn't
use RCUs anymore.
---
drivers/base/power/opp/core.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index db6e7e5..e0eac53 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -463,7 +463,7 @@ static void _kfree_list_dev_rcu(struct rcu_head *head)
static void _remove_list_dev(struct device_list_opp *list_dev,
struct device_opp *dev_opp)
{
- list_del(&list_dev->node);
+ list_del_rcu(&list_dev->node);
call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
_kfree_list_dev_rcu);
}
--
1.9.1
This is a note to let you know that I've just added the patch titled
objtool: Detect jumps to retpoline thunks
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
objtool-detect-jumps-to-retpoline-thunks.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 39b735332cb8b33a27c28592d969e4016c86c3ea Mon Sep 17 00:00:00 2001
From: Josh Poimboeuf <jpoimboe(a)redhat.com>
Date: Thu, 11 Jan 2018 21:46:23 +0000
Subject: objtool: Detect jumps to retpoline thunks
From: Josh Poimboeuf <jpoimboe(a)redhat.com>
commit 39b735332cb8b33a27c28592d969e4016c86c3ea upstream.
A direct jump to a retpoline thunk is really an indirect jump in
disguise. Change the objtool instruction type accordingly.
Objtool needs to know where indirect branches are so it can detect
switch statement jump tables.
This fixes a bunch of warnings with CONFIG_RETPOLINE like:
arch/x86/events/intel/uncore_nhmex.o: warning: objtool: nhmex_rbox_msr_enable_event()+0x44: sibling call from callable instruction with modified stack frame
kernel/signal.o: warning: objtool: copy_siginfo_to_user()+0x91: sibling call from callable instruction with modified stack frame
...
Signed-off-by: Josh Poimboeuf <jpoimboe(a)redhat.com>
Signed-off-by: David Woodhouse <dwmw(a)amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: gnomes(a)lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel(a)redhat.com>
Cc: Andi Kleen <ak(a)linux.intel.com>
Cc: thomas.lendacky(a)amd.com
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Jiri Kosina <jikos(a)kernel.org>
Cc: Andy Lutomirski <luto(a)amacapital.net>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Kees Cook <keescook(a)google.com>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh(a)linux-foundation.org>
Cc: Paul Turner <pjt(a)google.com>
Link: https://lkml.kernel.org/r/1515707194-20531-2-git-send-email-dwmw@amazon.co.…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
tools/objtool/check.c | 7 +++++++
1 file changed, 7 insertions(+)
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -456,6 +456,13 @@ static int add_jump_destinations(struct
} else if (rela->sym->sec->idx) {
dest_sec = rela->sym->sec;
dest_off = rela->sym->sym.st_value + rela->addend + 4;
+ } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+ /*
+ * Retpoline jumps are really dynamic jumps in
+ * disguise, so convert them accordingly.
+ */
+ insn->type = INSN_JUMP_DYNAMIC;
+ continue;
} else {
/* sibling call */
insn->jump_dest = 0;
Patches currently in stable-queue which might be from jpoimboe(a)redhat.com are
queue-4.14/x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch
queue-4.14/objtool-detect-jumps-to-retpoline-thunks.patch
queue-4.14/objtool-allow-alternatives-to-be-ignored.patch
queue-4.14/x86-retpoline-add-initial-retpoline-support.patch
This is a note to let you know that I've just added the patch titled
objtool: Allow alternatives to be ignored
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
objtool-allow-alternatives-to-be-ignored.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 258c76059cece01bebae098e81bacb1af2edad17 Mon Sep 17 00:00:00 2001
From: Josh Poimboeuf <jpoimboe(a)redhat.com>
Date: Thu, 11 Jan 2018 21:46:24 +0000
Subject: objtool: Allow alternatives to be ignored
From: Josh Poimboeuf <jpoimboe(a)redhat.com>
commit 258c76059cece01bebae098e81bacb1af2edad17 upstream.
Getting objtool to understand retpolines is going to be a bit of a
challenge. For now, take advantage of the fact that retpolines are
patched in with alternatives. Just read the original (sane)
non-alternative instruction, and ignore the patched-in retpoline.
This allows objtool to understand the control flow *around* the
retpoline, even if it can't yet follow what's inside. This means the
ORC unwinder will fail to unwind from inside a retpoline, but will work
fine otherwise.
Signed-off-by: Josh Poimboeuf <jpoimboe(a)redhat.com>
Signed-off-by: David Woodhouse <dwmw(a)amazon.co.uk>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: gnomes(a)lxorguk.ukuu.org.uk
Cc: Rik van Riel <riel(a)redhat.com>
Cc: Andi Kleen <ak(a)linux.intel.com>
Cc: thomas.lendacky(a)amd.com
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Jiri Kosina <jikos(a)kernel.org>
Cc: Andy Lutomirski <luto(a)amacapital.net>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Kees Cook <keescook(a)google.com>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Greg Kroah-Hartman <gregkh(a)linux-foundation.org>
Cc: Paul Turner <pjt(a)google.com>
Link: https://lkml.kernel.org/r/1515707194-20531-3-git-send-email-dwmw@amazon.co.…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
tools/objtool/check.c | 62 +++++++++++++++++++++++++++++++++++++++++++++-----
tools/objtool/check.h | 2 -
2 files changed, 57 insertions(+), 7 deletions(-)
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -428,6 +428,40 @@ static void add_ignores(struct objtool_f
}
/*
+ * FIXME: For now, just ignore any alternatives which add retpolines. This is
+ * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
+ * But it at least allows objtool to understand the control flow *around* the
+ * retpoline.
+ */
+static int add_nospec_ignores(struct objtool_file *file)
+{
+ struct section *sec;
+ struct rela *rela;
+ struct instruction *insn;
+
+ sec = find_section_by_name(file->elf, ".rela.discard.nospec");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(rela, &sec->rela_list, list) {
+ if (rela->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, rela->sym->sec, rela->addend);
+ if (!insn) {
+ WARN("bad .discard.nospec entry");
+ return -1;
+ }
+
+ insn->ignore_alts = true;
+ }
+
+ return 0;
+}
+
+/*
* Find the destination instructions for all jumps.
*/
static int add_jump_destinations(struct objtool_file *file)
@@ -509,11 +543,18 @@ static int add_call_destinations(struct
dest_off = insn->offset + insn->len + insn->immediate;
insn->call_dest = find_symbol_by_offset(insn->sec,
dest_off);
+ /*
+ * FIXME: Thanks to retpolines, it's now considered
+ * normal for a function to call within itself. So
+ * disable this warning for now.
+ */
+#if 0
if (!insn->call_dest) {
WARN_FUNC("can't find call dest symbol at offset 0x%lx",
insn->sec, insn->offset, dest_off);
return -1;
}
+#endif
} else if (rela->sym->type == STT_SECTION) {
insn->call_dest = find_symbol_by_offset(rela->sym->sec,
rela->addend+4);
@@ -678,12 +719,6 @@ static int add_special_section_alts(stru
return ret;
list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
- alt = malloc(sizeof(*alt));
- if (!alt) {
- WARN("malloc failed");
- ret = -1;
- goto out;
- }
orig_insn = find_insn(file, special_alt->orig_sec,
special_alt->orig_off);
@@ -694,6 +729,10 @@ static int add_special_section_alts(stru
goto out;
}
+ /* Ignore retpoline alternatives. */
+ if (orig_insn->ignore_alts)
+ continue;
+
new_insn = NULL;
if (!special_alt->group || special_alt->new_len) {
new_insn = find_insn(file, special_alt->new_sec,
@@ -719,6 +758,13 @@ static int add_special_section_alts(stru
goto out;
}
+ alt = malloc(sizeof(*alt));
+ if (!alt) {
+ WARN("malloc failed");
+ ret = -1;
+ goto out;
+ }
+
alt->insn = new_insn;
list_add_tail(&alt->list, &orig_insn->alts);
@@ -1035,6 +1081,10 @@ static int decode_sections(struct objtoo
add_ignores(file);
+ ret = add_nospec_ignores(file);
+ if (ret)
+ return ret;
+
ret = add_jump_destinations(file);
if (ret)
return ret;
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -44,7 +44,7 @@ struct instruction {
unsigned int len;
unsigned char type;
unsigned long immediate;
- bool alt_group, visited, dead_end, ignore, hint, save, restore;
+ bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
struct symbol *call_dest;
struct instruction *jump_dest;
struct list_head alts;
Patches currently in stable-queue which might be from jpoimboe(a)redhat.com are
queue-4.14/x86-spectre-add-boot-time-option-to-select-spectre-v2-mitigation.patch
queue-4.14/objtool-detect-jumps-to-retpoline-thunks.patch
queue-4.14/objtool-allow-alternatives-to-be-ignored.patch
queue-4.14/x86-retpoline-add-initial-retpoline-support.patch
This is a note to let you know that I've just added the patch titled
x86/tboot: Unbreak tboot with PTI enabled
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-tboot-unbreak-tboot-with-pti-enabled.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 262b6b30087246abf09d6275eb0c0dc421bcbe38 Mon Sep 17 00:00:00 2001
From: Dave Hansen <dave.hansen(a)linux.intel.com>
Date: Sat, 6 Jan 2018 18:41:14 +0100
Subject: x86/tboot: Unbreak tboot with PTI enabled
From: Dave Hansen <dave.hansen(a)linux.intel.com>
commit 262b6b30087246abf09d6275eb0c0dc421bcbe38 upstream.
This is another case similar to what EFI does: create a new set of
page tables, map some code at a low address, and jump to it. PTI
mistakes this low address for userspace and mistakenly marks it
non-executable in an effort to make it unusable for userspace.
Undo the poison to allow execution.
Fixes: 385ce0ea4c07 ("x86/mm/pti: Add Kconfig")
Signed-off-by: Dave Hansen <dave.hansen(a)linux.intel.com>
Signed-off-by: Andrea Arcangeli <aarcange(a)redhat.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Alan Cox <gnomes(a)lxorguk.ukuu.org.uk>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Jon Masters <jcm(a)redhat.com>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Andi Kleen <andi(a)firstfloor.org>
Cc: Jeff Law <law(a)redhat.com>
Cc: Paolo Bonzini <pbonzini(a)redhat.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh(a)linux-foundation.org>
Cc: David" <dwmw(a)amazon.co.uk>
Cc: Nick Clifton <nickc(a)redhat.com>
Link: https://lkml.kernel.org/r/20180108102805.GK25546@redhat.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/kernel/tboot.c | 1 +
1 file changed, 1 insertion(+)
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -127,6 +127,7 @@ static int map_tboot_page(unsigned long
p4d = p4d_alloc(&tboot_mm, pgd, vaddr);
if (!p4d)
return -1;
+ pgd->pgd &= ~_PAGE_NX;
pud = pud_alloc(&tboot_mm, p4d, vaddr);
if (!pud)
return -1;
Patches currently in stable-queue which might be from dave.hansen(a)linux.intel.com are
queue-4.14/x86-pti-unbreak-efi-old_memmap.patch
queue-4.14/x86-documentation-add-pti-description.patch
queue-4.14/x86-tboot-unbreak-tboot-with-pti-enabled.patch
This is a note to let you know that I've just added the patch titled
x86/mm/pti: Remove dead logic in pti_user_pagetable_walk*()
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-mm-pti-remove-dead-logic-in-pti_user_pagetable_walk.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 8d56eff266f3e41a6c39926269c4c3f58f881a8e Mon Sep 17 00:00:00 2001
From: Jike Song <albcamus(a)gmail.com>
Date: Tue, 9 Jan 2018 00:03:41 +0800
Subject: x86/mm/pti: Remove dead logic in pti_user_pagetable_walk*()
From: Jike Song <albcamus(a)gmail.com>
commit 8d56eff266f3e41a6c39926269c4c3f58f881a8e upstream.
The following code contains dead logic:
162 if (pgd_none(*pgd)) {
163 unsigned long new_p4d_page = __get_free_page(gfp);
164 if (!new_p4d_page)
165 return NULL;
166
167 if (pgd_none(*pgd)) {
168 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
169 new_p4d_page = 0;
170 }
171 if (new_p4d_page)
172 free_page(new_p4d_page);
173 }
There can't be any difference between two pgd_none(*pgd) at L162 and L167,
so it's always false at L171.
Dave Hansen explained:
Yes, the double-test was part of an optimization where we attempted to
avoid using a global spinlock in the fork() path. We would check for
unallocated mid-level page tables without the lock. The lock was only
taken when we needed to *make* an entry to avoid collisions.
Now that it is all single-threaded, there is no chance of a collision,
no need for a lock, and no need for the re-check.
As all these functions are only called during init, mark them __init as
well.
Fixes: 03f4424f348e ("x86/mm/pti: Add functions to clone kernel PMDs")
Signed-off-by: Jike Song <albcamus(a)gmail.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Alan Cox <gnomes(a)lxorguk.ukuu.org.uk>
Cc: Andi Kleen <ak(a)linux.intel.com>
Cc: Tom Lendacky <thomas.lendacky(a)amd.com>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Jiri Koshina <jikos(a)kernel.org>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Kees Cook <keescook(a)google.com>
Cc: Andi Lutomirski <luto(a)amacapital.net>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Greg KH <gregkh(a)linux-foundation.org>
Cc: David Woodhouse <dwmw(a)amazon.co.uk>
Cc: Paul Turner <pjt(a)google.com>
Link: https://lkml.kernel.org/r/20180108160341.3461-1-albcamus@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/mm/pti.c | 32 ++++++--------------------------
1 file changed, 6 insertions(+), 26 deletions(-)
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -149,7 +149,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pg
*
* Returns a pointer to a P4D on success, or NULL on failure.
*/
-static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
+static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
{
pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
@@ -164,12 +164,7 @@ static p4d_t *pti_user_pagetable_walk_p4
if (!new_p4d_page)
return NULL;
- if (pgd_none(*pgd)) {
- set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
- new_p4d_page = 0;
- }
- if (new_p4d_page)
- free_page(new_p4d_page);
+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
}
BUILD_BUG_ON(pgd_large(*pgd) != 0);
@@ -182,7 +177,7 @@ static p4d_t *pti_user_pagetable_walk_p4
*
* Returns a pointer to a PMD on success, or NULL on failure.
*/
-static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
{
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
@@ -194,12 +189,7 @@ static pmd_t *pti_user_pagetable_walk_pm
if (!new_pud_page)
return NULL;
- if (p4d_none(*p4d)) {
- set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
- new_pud_page = 0;
- }
- if (new_pud_page)
- free_page(new_pud_page);
+ set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
}
pud = pud_offset(p4d, address);
@@ -213,12 +203,7 @@ static pmd_t *pti_user_pagetable_walk_pm
if (!new_pmd_page)
return NULL;
- if (pud_none(*pud)) {
- set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
- new_pmd_page = 0;
- }
- if (new_pmd_page)
- free_page(new_pmd_page);
+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
}
return pmd_offset(pud, address);
@@ -251,12 +236,7 @@ static __init pte_t *pti_user_pagetable_
if (!new_pte_page)
return NULL;
- if (pmd_none(*pmd)) {
- set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
- new_pte_page = 0;
- }
- if (new_pte_page)
- free_page(new_pte_page);
+ set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
}
pte = pte_offset_kernel(pmd, address);
Patches currently in stable-queue which might be from albcamus(a)gmail.com are
queue-4.14/x86-mm-pti-remove-dead-logic-in-pti_user_pagetable_walk.patch
This is a note to let you know that I've just added the patch titled
x86/cpu/AMD: Use LFENCE_RDTSC in preference to MFENCE_RDTSC
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f Mon Sep 17 00:00:00 2001
From: Tom Lendacky <thomas.lendacky(a)amd.com>
Date: Mon, 8 Jan 2018 16:09:32 -0600
Subject: x86/cpu/AMD: Use LFENCE_RDTSC in preference to MFENCE_RDTSC
From: Tom Lendacky <thomas.lendacky(a)amd.com>
commit 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f upstream.
With LFENCE now a serializing instruction, use LFENCE_RDTSC in preference
to MFENCE_RDTSC. However, since the kernel could be running under a
hypervisor that does not support writing that MSR, read the MSR back and
verify that the bit has been set successfully. If the MSR can be read
and the bit is set, then set the LFENCE_RDTSC feature, otherwise set the
MFENCE_RDTSC feature.
Signed-off-by: Tom Lendacky <thomas.lendacky(a)amd.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Reviewed-by: Reviewed-by: Borislav Petkov <bp(a)suse.de>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Dave Hansen <dave.hansen(a)intel.com>
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Dan Williams <dan.j.williams(a)intel.com>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh(a)linux-foundation.org>
Cc: David Woodhouse <dwmw(a)amazon.co.uk>
Cc: Paul Turner <pjt(a)google.com>
Link: https://lkml.kernel.org/r/20180108220932.12580.52458.stgit@tlendack-t1.amdo…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/include/asm/msr-index.h | 1 +
arch/x86/kernel/cpu/amd.c | 18 ++++++++++++++++--
2 files changed, 17 insertions(+), 2 deletions(-)
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -354,6 +354,7 @@
#define MSR_FAM10H_NODE_ID 0xc001100c
#define MSR_F10H_DECFG 0xc0011029
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
+#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -829,6 +829,9 @@ static void init_amd(struct cpuinfo_x86
set_cpu_cap(c, X86_FEATURE_K8);
if (cpu_has(c, X86_FEATURE_XMM2)) {
+ unsigned long long val;
+ int ret;
+
/*
* A serializing LFENCE has less overhead than MFENCE, so
* use it for execution serialization. On families which
@@ -839,8 +842,19 @@ static void init_amd(struct cpuinfo_x86
msr_set_bit(MSR_F10H_DECFG,
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
- /* MFENCE stops RDTSC speculation */
- set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+ /*
+ * Verify that the MSR write was successful (could be running
+ * under a hypervisor) and only then assume that LFENCE is
+ * serializing.
+ */
+ ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
+ if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
+ /* A serializing LFENCE stops RDTSC speculation */
+ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+ } else {
+ /* MFENCE stops RDTSC speculation */
+ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+ }
}
/*
Patches currently in stable-queue which might be from thomas.lendacky(a)amd.com are
queue-4.14/kvm-vmx-scrub-hardware-gprs-at-vm-exit.patch
queue-4.14/x86-mm-pti-remove-dead-logic-in-pti_user_pagetable_walk.patch
queue-4.14/x86-cpu-amd-make-lfence-a-serializing-instruction.patch
queue-4.14/x86-alternatives-fix-optimize_nops-checking.patch
queue-4.14/x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch