-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1
Hi Greg,
Please queue up these powerpc patches for 4.14 if you have no objections.
cheers
Christophe Leroy (1): powerpc/fsl: Fix the flush of branch predictor.
Diana Craciun (16): powerpc/64: Disable the speculation barrier from the command line powerpc/64: Make stf barrier PPC_BOOK3S_64 specific. powerpc/64: Make meltdown reporting Book3S 64 specific powerpc/fsl: Add barrier_nospec implementation for NXP PowerPC Book3E powerpc/fsl: Sanitize the syscall table for NXP PowerPC 32 bit platforms powerpc/fsl: Add infrastructure to fixup branch predictor flush powerpc/fsl: Add macro to flush the branch predictor powerpc/fsl: Fix spectre_v2 mitigations reporting powerpc/fsl: Emulate SPRN_BUCSR register powerpc/fsl: Add nospectre_v2 command line argument powerpc/fsl: Flush the branch predictor at each kernel entry (64bit) powerpc/fsl: Flush the branch predictor at each kernel entry (32 bit) powerpc/fsl: Flush branch predictor when entering KVM powerpc/fsl: Enable runtime patching if nospectre_v2 boot arg is used powerpc/fsl: Update Spectre v2 reporting powerpc/fsl: Fixed warning: orphan section `__btb_flush_fixup'
Michael Ellerman (11): powerpc: Use barrier_nospec in copy_from_user() powerpc/64: Use barrier_nospec in syscall entry powerpc64s: Show ori31 availability in spectre_v1 sysfs file not v2 powerpc/64: Add CONFIG_PPC_BARRIER_NOSPEC powerpc/64: Call setup_barrier_nospec() from setup_arch() powerpc/asm: Add a patch_site macro & helpers for patching instructions powerpc/64s: Add new security feature flags for count cache flush powerpc/64s: Add support for software count cache flush powerpc/pseries: Query hypervisor for count cache flush settings powerpc/powernv: Query firmware for count cache flush settings powerpc/security: Fix spectre_v2 reporting
Michal Suchanek (4): powerpc/64s: Add support for ori barrier_nospec patching powerpc/64s: Patch barrier_nospec in modules powerpc/64s: Enable barrier_nospec based on firmware settings powerpc/64s: Enhance the information in cpu_show_spectre_v1()
arch/powerpc/Kconfig | 7 +- arch/powerpc/include/asm/asm-prototypes.h | 6 + arch/powerpc/include/asm/barrier.h | 12 +- arch/powerpc/include/asm/code-patching-asm.h | 18 ++ arch/powerpc/include/asm/code-patching.h | 2 + arch/powerpc/include/asm/feature-fixups.h | 21 ++ arch/powerpc/include/asm/hvcall.h | 2 + arch/powerpc/include/asm/ppc_asm.h | 10 + arch/powerpc/include/asm/security_features.h | 7 + arch/powerpc/include/asm/setup.h | 20 ++ arch/powerpc/include/asm/uaccess.h | 11 +- arch/powerpc/kernel/Makefile | 3 +- arch/powerpc/kernel/entry_32.S | 10 + arch/powerpc/kernel/entry_64.S | 69 ++++++ arch/powerpc/kernel/exceptions-64e.S | 27 ++- arch/powerpc/kernel/head_booke.h | 12 ++ arch/powerpc/kernel/head_fsl_booke.S | 15 ++ arch/powerpc/kernel/module.c | 10 +- arch/powerpc/kernel/security.c | 215 ++++++++++++++++++- arch/powerpc/kernel/setup-common.c | 3 + arch/powerpc/kernel/vmlinux.lds.S | 19 +- arch/powerpc/kvm/bookehv_interrupts.S | 4 + arch/powerpc/kvm/e500_emulate.c | 7 + arch/powerpc/lib/code-patching.c | 16 ++ arch/powerpc/lib/feature-fixups.c | 93 ++++++++ arch/powerpc/mm/tlb_low_64e.S | 7 + arch/powerpc/platforms/powernv/setup.c | 7 + arch/powerpc/platforms/pseries/setup.c | 7 + 28 files changed, 622 insertions(+), 18 deletions(-) create mode 100644 arch/powerpc/include/asm/code-patching-asm.h
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index fe418226df7f..de3b07c7be30 100644 - --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -164,7 +164,7 @@ config PPC select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE - - select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64 + select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_SMP_IDLE_THREAD @@ -236,6 +236,11 @@ config PPC # Please keep this list sorted alphabetically. #
+config PPC_BARRIER_NOSPEC + bool + default y + depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E + config GENERIC_CSUM def_bool n
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 7330150bfe34..ba4c75062d49 100644 - --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -126,4 +126,10 @@ extern int __ucmpdi2(u64, u64); void _mcount(void); unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
+/* Patch sites */ +extern s32 patch__call_flush_count_cache; +extern s32 patch__flush_count_cache_return; + +extern long flush_count_cache; + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index e582d2c88092..449474f667c4 100644 - --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -77,19 +77,25 @@ do { \ })
#ifdef CONFIG_PPC_BOOK3S_64 +#define NOSPEC_BARRIER_SLOT nop +#elif defined(CONFIG_PPC_FSL_BOOK3E) +#define NOSPEC_BARRIER_SLOT nop; nop +#endif + +#ifdef CONFIG_PPC_BARRIER_NOSPEC /* * Prevent execution of subsequent instructions until preceding branches have * been fully resolved and are no longer executing speculatively. */ - -#define barrier_nospec_asm ori 31,31,0 +#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
// This also acts as a compiler barrier due to the memory clobber. #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
- -#else /* !CONFIG_PPC_BOOK3S_64 */ +#else /* !CONFIG_PPC_BARRIER_NOSPEC */ #define barrier_nospec_asm #define barrier_nospec() - -#endif +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
#include <asm-generic/barrier.h>
diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h new file mode 100644 index 000000000000..ed7b1448493a - --- /dev/null +++ b/arch/powerpc/include/asm/code-patching-asm.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2018, Michael Ellerman, IBM Corporation. + */ +#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H +#define _ASM_POWERPC_CODE_PATCHING_ASM_H + +/* Define a "site" that can be patched */ +.macro patch_site label name + .pushsection ".rodata" + .balign 4 + .global \name +\name: + .4byte \label - . + .popsection +.endm + +#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */ diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 812535f40124..b2051234ada8 100644 - --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -32,6 +32,8 @@ unsigned int create_cond_branch(const unsigned int *addr, int patch_branch(unsigned int *addr, unsigned long target, int flags); int patch_instruction(unsigned int *addr, unsigned int instr); int raw_patch_instruction(unsigned int *addr, unsigned int instr); +int patch_instruction_site(s32 *addr, unsigned int instr); +int patch_branch_site(s32 *site, unsigned long target, int flags);
int instr_is_relative_branch(unsigned int instr); int instr_is_relative_link_branch(unsigned int instr); diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index a9b64df34e2a..b1d478acbaec 100644 - --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -211,6 +211,25 @@ label##3: \ FTR_ENTRY_OFFSET 951b-952b; \ .popsection;
+#define NOSPEC_BARRIER_FIXUP_SECTION \ +953: \ + .pushsection __barrier_nospec_fixup,"a"; \ + .align 2; \ +954: \ + FTR_ENTRY_OFFSET 953b-954b; \ + .popsection; + +#define START_BTB_FLUSH_SECTION \ +955: \ + +#define END_BTB_FLUSH_SECTION \ +956: \ + .pushsection __btb_flush_fixup,"a"; \ + .align 2; \ +957: \ + FTR_ENTRY_OFFSET 955b-957b; \ + FTR_ENTRY_OFFSET 956b-957b; \ + .popsection;
#ifndef __ASSEMBLY__ #include <linux/types.h> @@ -219,6 +238,8 @@ extern long stf_barrier_fallback; extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; +extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; +extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
void apply_feature_fixups(void); void setup_feature_keys(void); diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 5a740feb7bd7..15cef59092c7 100644 - --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -340,10 +340,12 @@ #define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5 #define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 #define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 +#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 +#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
/* Flag values used in H_REGISTER_PROC_TBL hcall */ #define PROC_TABLE_OP_MASK 0x18 diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 36f3e41c9fbe..3e1b8de72776 100644 - --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -802,4 +802,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) stringify_in_c(.long (_target) - . ;) \ stringify_in_c(.previous)
+#ifdef CONFIG_PPC_FSL_BOOK3E +#define BTB_FLUSH(reg) \ + lis reg,BUCSR_INIT@h; \ + ori reg,reg,BUCSR_INIT@l; \ + mtspr SPRN_BUCSR,reg; \ + isync; +#else +#define BTB_FLUSH(reg) +#endif /* CONFIG_PPC_FSL_BOOK3E */ + #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 44989b22383c..759597bf0fd8 100644 - --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -22,6 +22,7 @@ enum stf_barrier_type {
void setup_stf_barrier(void); void do_stf_barrier_fixups(enum stf_barrier_type types); +void setup_count_cache_flush(void);
static inline void security_ftr_set(unsigned long feature) { @@ -59,6 +60,9 @@ static inline bool security_ftr_enabled(unsigned long feature) // Indirect branch prediction cache disabled #define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
+// bcctr 2,0,0 triggers a hardware assisted count cache flush +#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull +
// Features indicating need for Spectre/Meltdown mitigations
@@ -74,6 +78,9 @@ static inline bool security_ftr_enabled(unsigned long feature) // Firmware configuration indicates user favours security over performance #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
+// Software required to flush count cache on context switch +#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull +
// Features enabled by default #define SEC_FTR_DEFAULT \ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index a5e919e34c42..5ceab440ecb9 100644 - --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -52,6 +52,26 @@ enum l1d_flush_type {
void setup_rfi_flush(enum l1d_flush_type, bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); +#ifdef CONFIG_PPC_BARRIER_NOSPEC +void setup_barrier_nospec(void); +#else +static inline void setup_barrier_nospec(void) { }; +#endif +void do_barrier_nospec_fixups(bool enable); +extern bool barrier_nospec_enabled; + +#ifdef CONFIG_PPC_BARRIER_NOSPEC +void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); +#else +static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; +#endif + +#ifdef CONFIG_PPC_FSL_BOOK3E +void setup_spectre_v2(void); +#else +static inline void setup_spectre_v2(void) {}; +#endif +void do_btb_flush_fixups(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index cf26e62b268d..bd6d0fb5be9f 100644 - --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -238,6 +238,7 @@ do { \ __chk_user_ptr(ptr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ + barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -249,8 +250,10 @@ do { \ __long_type(*(ptr)) __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ might_fault(); \ - - if (access_ok(VERIFY_READ, __gu_addr, (size))) \ + if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ + barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -261,6 +264,7 @@ do { \ __long_type(*(ptr)) __gu_val; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ + barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -288,15 +292,19 @@ static inline unsigned long raw_copy_from_user(void *to,
switch (n) { case 1: + barrier_nospec(); __get_user_size(*(u8 *)to, from, 1, ret); break; case 2: + barrier_nospec(); __get_user_size(*(u16 *)to, from, 2, ret); break; case 4: + barrier_nospec(); __get_user_size(*(u32 *)to, from, 4, ret); break; case 8: + barrier_nospec(); __get_user_size(*(u64 *)to, from, 8, ret); break; } @@ -304,6 +312,7 @@ static inline unsigned long raw_copy_from_user(void *to, return 0; }
+ barrier_nospec(); return __copy_tofrom_user((__force void __user *)to, from, n); }
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index a1089c9a9aa5..142b08d40642 100644 - --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -45,9 +45,10 @@ obj-$(CONFIG_VDSO32) += vdso32/ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o - -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o +obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_ALTIVEC) += vecemu.o obj-$(CONFIG_PPC_970_NAP) += idle_power4.o diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 4ae464b9d490..a2999cd73a82 100644 - --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -33,6 +33,7 @@ #include <asm/unistd.h> #include <asm/ptrace.h> #include <asm/export.h> +#include <asm/barrier.h>
/* * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. @@ -358,6 +359,15 @@ _GLOBAL(DoSyscall) ori r10,r10,sys_call_table@l slwi r0,r0,2 bge- 66f + + barrier_nospec_asm + /* + * Prevent the load of the handler below (based on the user-passed + * system call number) being speculatively executed until the test + * against NR_syscalls and branch to .66f above has + * committed. + */ + lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ mtlr r10 addi r9,r1,STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index c194f4c8e66b..12395895b9aa 100644 - --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -25,6 +25,7 @@ #include <asm/page.h> #include <asm/mmu.h> #include <asm/thread_info.h> +#include <asm/code-patching-asm.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cputable.h> @@ -36,6 +37,7 @@ #include <asm/context_tracking.h> #include <asm/tm.h> #include <asm/ppc-opcode.h> +#include <asm/barrier.h> #include <asm/export.h> #ifdef CONFIG_PPC_BOOK3S #include <asm/exception-64s.h> @@ -76,6 +78,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) std r0,GPR0(r1) std r10,GPR1(r1) beq 2f /* if from kernel mode */ +#ifdef CONFIG_PPC_FSL_BOOK3E +START_BTB_FLUSH_SECTION + BTB_FLUSH(r10) +END_BTB_FLUSH_SECTION +#endif ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) 2: std r2,GPR2(r1) std r3,GPR3(r1) @@ -179,6 +186,15 @@ system_call: /* label this so stack traces look sane */ clrldi r8,r8,32 15: slwi r0,r0,4 + + barrier_nospec_asm + /* + * Prevent the load of the handler below (based on the user-passed + * system call number) being speculatively executed until the test + * against NR_syscalls and branch to .Lsyscall_enosys above has + * committed. + */ + ldx r12,r11,r0 /* Fetch system call handler [ptr] */ mtctr r12 bctrl /* Call handler */ @@ -487,6 +503,57 @@ _GLOBAL(ret_from_kernel_thread) li r3,0 b .Lsyscall_exit
+#ifdef CONFIG_PPC_BOOK3S_64 + +#define FLUSH_COUNT_CACHE \ +1: nop; \ + patch_site 1b, patch__call_flush_count_cache + + +#define BCCTR_FLUSH .long 0x4c400420 + +.macro nops number + .rept \number + nop + .endr +.endm + +.balign 32 +.global flush_count_cache +flush_count_cache: + /* Save LR into r9 */ + mflr r9 + + .rept 64 + bl .+4 + .endr + b 1f + nops 6 + + .balign 32 + /* Restore LR */ +1: mtlr r9 + li r9,0x7fff + mtctr r9 + + BCCTR_FLUSH + +2: nop + patch_site 2b patch__flush_count_cache_return + + nops 3 + + .rept 278 + .balign 32 + BCCTR_FLUSH + nops 7 + .endr + + blr +#else +#define FLUSH_COUNT_CACHE +#endif /* CONFIG_PPC_BOOK3S_64 */ + /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state @@ -518,6 +585,8 @@ _GLOBAL(_switch) std r23,_CCR(r1) std r1,KSP(r3) /* Set old stack pointer */
+ FLUSH_COUNT_CACHE + /* * On SMP kernels, care must be taken because a task may be * scheduled off CPUx and on to CPUy. Memory ordering must be diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index acd8ca76233e..2edc1b7b34cc 100644 - --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -295,7 +295,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) andi. r10,r11,MSR_PR; /* save stack pointer */ \ beq 1f; /* branch around if supervisor */ \ ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ - -1: cmpdi cr1,r1,0; /* check if SP makes sense */ \ +1: type##_BTB_FLUSH \ + cmpdi cr1,r1,0; /* check if SP makes sense */ \ bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
@@ -327,6 +328,30 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) #define SPRN_MC_SRR0 SPRN_MCSRR0 #define SPRN_MC_SRR1 SPRN_MCSRR1
+#ifdef CONFIG_PPC_FSL_BOOK3E +#define GEN_BTB_FLUSH \ + START_BTB_FLUSH_SECTION \ + beq 1f; \ + BTB_FLUSH(r10) \ + 1: \ + END_BTB_FLUSH_SECTION + +#define CRIT_BTB_FLUSH \ + START_BTB_FLUSH_SECTION \ + BTB_FLUSH(r10) \ + END_BTB_FLUSH_SECTION + +#define DBG_BTB_FLUSH CRIT_BTB_FLUSH +#define MC_BTB_FLUSH CRIT_BTB_FLUSH +#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH +#else +#define GEN_BTB_FLUSH +#define CRIT_BTB_FLUSH +#define DBG_BTB_FLUSH +#define MC_BTB_FLUSH +#define GDBELL_BTB_FLUSH +#endif + #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index d0862a100d29..306e26c073a0 100644 - --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -32,6 +32,16 @@ */ #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
+#ifdef CONFIG_PPC_FSL_BOOK3E +#define BOOKE_CLEAR_BTB(reg) \ +START_BTB_FLUSH_SECTION \ + BTB_FLUSH(reg) \ +END_BTB_FLUSH_SECTION +#else +#define BOOKE_CLEAR_BTB(reg) +#endif + + #define NORMAL_EXCEPTION_PROLOG(intno) \ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ mfspr r10, SPRN_SPRG_THREAD; \ @@ -43,6 +53,7 @@ andi. r11, r11, MSR_PR; /* check whether user or kernel */\ mr r11, r1; \ beq 1f; \ + BOOKE_CLEAR_BTB(r11) \ /* if from user, start at top of this thread's kernel stack */ \ lwz r11, THREAD_INFO-THREAD(r10); \ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ @@ -128,6 +139,7 @@ stw r9,_CCR(r8); /* save CR on stack */\ mfspr r11,exc_level_srr1; /* check whether user or kernel */\ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ + BOOKE_CLEAR_BTB(r10) \ andi. r11,r11,MSR_PR; \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index bf4c6021515f..60a0aeefc4a7 100644 - --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -452,6 +452,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the @@ -546,6 +553,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION + mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 3f7ba0f5bf29..77371c9ef3d8 100644 - --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -72,7 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr, do_feature_fixups(powerpc_firmware_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); - -#endif +#endif /* CONFIG_PPC64 */ + +#ifdef CONFIG_PPC_BARRIER_NOSPEC + sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); + if (sect != NULL) + do_barrier_nospec_fixups_range(barrier_nospec_enabled, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
sect = find_section(hdr, sechdrs, "__lwsync_fixup"); if (sect != NULL) diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index b98a722da915..48b50fb8dc4b 100644 - --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -9,11 +9,120 @@ #include <linux/seq_buf.h>
#include <asm/debugfs.h> +#include <asm/asm-prototypes.h> +#include <asm/code-patching.h> #include <asm/security_features.h> +#include <asm/setup.h>
unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+enum count_cache_flush_type { + COUNT_CACHE_FLUSH_NONE = 0x1, + COUNT_CACHE_FLUSH_SW = 0x2, + COUNT_CACHE_FLUSH_HW = 0x4, +}; +static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; + +bool barrier_nospec_enabled; +static bool no_nospec; +static bool btb_flush_enabled; +#ifdef CONFIG_PPC_FSL_BOOK3E +static bool no_spectrev2; +#endif + +static void enable_barrier_nospec(bool enable) +{ + barrier_nospec_enabled = enable; + do_barrier_nospec_fixups(enable); +} + +void setup_barrier_nospec(void) +{ + bool enable; + + /* + * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. + * But there's a good reason not to. The two flags we check below are + * both are enabled by default in the kernel, so if the hcall is not + * functional they will be enabled. + * On a system where the host firmware has been updated (so the ori + * functions as a barrier), but on which the hypervisor (KVM/Qemu) has + * not been updated, we would like to enable the barrier. Dropping the + * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is + * we potentially enable the barrier on systems where the host firmware + * is not updated, but that's harmless as it's a no-op. + */ + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); + + if (!no_nospec) + enable_barrier_nospec(enable); +} + +static int __init handle_nospectre_v1(char *p) +{ + no_nospec = true; + + return 0; +} +early_param("nospectre_v1", handle_nospectre_v1); + +#ifdef CONFIG_DEBUG_FS +static int barrier_nospec_set(void *data, u64 val) +{ + switch (val) { + case 0: + case 1: + break; + default: + return -EINVAL; + } + + if (!!val == !!barrier_nospec_enabled) + return 0; + + enable_barrier_nospec(!!val); + + return 0; +} + +static int barrier_nospec_get(void *data, u64 *val) +{ + *val = barrier_nospec_enabled ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, + barrier_nospec_get, barrier_nospec_set, "%llu\n"); + +static __init int barrier_nospec_debugfs_init(void) +{ + debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, + &fops_barrier_nospec); + return 0; +} +device_initcall(barrier_nospec_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ + +#ifdef CONFIG_PPC_FSL_BOOK3E +static int __init handle_nospectre_v2(char *p) +{ + no_spectrev2 = true; + + return 0; +} +early_param("nospectre_v2", handle_nospectre_v2); +void setup_spectre_v2(void) +{ + if (no_spectrev2) + do_btb_flush_fixups(); + else + btb_flush_enabled = true; +} +#endif /* CONFIG_PPC_FSL_BOOK3E */ + +#ifdef CONFIG_PPC_BOOK3S_64 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { bool thread_priv; @@ -46,25 +155,39 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
return sprintf(buf, "Vulnerable\n"); } +#endif
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { - - if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) - - return sprintf(buf, "Not affected\n"); + struct seq_buf s;
- - return sprintf(buf, "Vulnerable\n"); + seq_buf_init(&s, buf, PAGE_SIZE - 1); + + if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { + if (barrier_nospec_enabled) + seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); + else + seq_buf_printf(&s, "Vulnerable"); + + if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) + seq_buf_printf(&s, ", ori31 speculation barrier enabled"); + + seq_buf_printf(&s, "\n"); + } else + seq_buf_printf(&s, "Not affected\n"); + + return s.len; }
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { - - bool bcs, ccd, ori; struct seq_buf s; + bool bcs, ccd;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); - - ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
if (bcs || ccd) { seq_buf_printf(&s, "Mitigation: "); @@ -77,17 +200,23 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (ccd) seq_buf_printf(&s, "Indirect branch cache disabled"); - - } else + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { + seq_buf_printf(&s, "Mitigation: Software count cache flush"); + + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) + seq_buf_printf(&s, " (hardware accelerated)"); + } else if (btb_flush_enabled) { + seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); + } else { seq_buf_printf(&s, "Vulnerable"); - - - - if (ori) - - seq_buf_printf(&s, ", ori31 speculation barrier enabled"); + }
seq_buf_printf(&s, "\n");
return s.len; }
+#ifdef CONFIG_PPC_BOOK3S_64 /* * Store-forwarding barrier support. */ @@ -235,3 +364,71 @@ static __init int stf_barrier_debugfs_init(void) } device_initcall(stf_barrier_debugfs_init); #endif /* CONFIG_DEBUG_FS */ + +static void toggle_count_cache_flush(bool enable) +{ + if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); + count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; + pr_info("count-cache-flush: software flush disabled.\n"); + return; + } + + patch_branch_site(&patch__call_flush_count_cache, + (u64)&flush_count_cache, BRANCH_SET_LINK); + + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { + count_cache_flush_type = COUNT_CACHE_FLUSH_SW; + pr_info("count-cache-flush: full software flush sequence enabled.\n"); + return; + } + + patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); + count_cache_flush_type = COUNT_CACHE_FLUSH_HW; + pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); +} + +void setup_count_cache_flush(void) +{ + toggle_count_cache_flush(true); +} + +#ifdef CONFIG_DEBUG_FS +static int count_cache_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + toggle_count_cache_flush(enable); + + return 0; +} + +static int count_cache_flush_get(void *data, u64 *val) +{ + if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) + *val = 0; + else + *val = 1; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, + count_cache_flush_set, "%llu\n"); + +static __init int count_cache_flush_debugfs_init(void) +{ + debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root, + NULL, &fops_count_cache_flush); + return 0; +} +device_initcall(count_cache_flush_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 008447664643..c58364c74dad 100644 - --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -937,6 +937,9 @@ void __init setup_arch(char **cmdline_p) if (ppc_md.setup_arch) ppc_md.setup_arch();
+ setup_barrier_nospec(); + setup_spectre_v2(); + paging_init();
/* Initialize the MMU context management stuff. */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index c89ffb88fa3b..b0cf4af7ba84 100644 - --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -153,8 +153,25 @@ SECTIONS *(__rfi_flush_fixup) __stop___rfi_flush_fixup = .; } - -#endif +#endif /* CONFIG_PPC64 */ + +#ifdef CONFIG_PPC_BARRIER_NOSPEC + . = ALIGN(8); + __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) { + __start___barrier_nospec_fixup = .; + *(__barrier_nospec_fixup) + __stop___barrier_nospec_fixup = .; + } +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+#ifdef CONFIG_PPC_FSL_BOOK3E + . = ALIGN(8); + __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { + __start__btb_flush_fixup = .; + *(__btb_flush_fixup) + __stop__btb_flush_fixup = .; + } +#endif EXCEPTION_TABLE(0)
NOTES :kernel :notes diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 81bd8a07aa51..612b7f6a887f 100644 - --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -75,6 +75,10 @@ PPC_LL r1, VCPU_HOST_STACK(r4) PPC_LL r2, HOST_R2(r1)
+START_BTB_FLUSH_SECTION + BTB_FLUSH(r10) +END_BTB_FLUSH_SECTION + mfspr r10, SPRN_PID lwz r8, VCPU_HOST_PID(r4) PPC_LL r11, VCPU_SHARED(r4) diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 990db69a1d0b..fa88f641ac03 100644 - --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va vcpu->arch.pwrmgtcr0 = spr_val; break;
+ case SPRN_BUCSR: + /* + * If we are here, it means that we have already flushed the + * branch predictor, so just return to guest. + */ + break; + /* extra exceptions */ #ifdef CONFIG_SPE_POSSIBLE case SPRN_IVOR32: diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 130405158afa..c5154817178b 100644 - --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -206,6 +206,22 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags) return patch_instruction(addr, create_branch(addr, target, flags)); }
+int patch_branch_site(s32 *site, unsigned long target, int flags) +{ + unsigned int *addr; + + addr = (unsigned int *)((unsigned long)site + *site); + return patch_instruction(addr, create_branch(addr, target, flags)); +} + +int patch_instruction_site(s32 *site, unsigned int instr) +{ + unsigned int *addr; + + addr = (unsigned int *)((unsigned long)site + *site); + return patch_instruction(addr, instr); +} + bool is_offset_in_branch_range(long offset) { /* diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index e1bcdc32a851..de7861e09b41 100644 - --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -277,8 +277,101 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) (types & L1D_FLUSH_MTTRIG) ? "mttrig type" : "unknown"); } + +void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) +{ + unsigned int instr, *dest; + long *start, *end; + int i; + + start = fixup_start; + end = fixup_end; + + instr = 0x60000000; /* nop */ + + if (enable) { + pr_info("barrier-nospec: using ORI speculation barrier\n"); + instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + } + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + patch_instruction(dest, instr); + } + + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); +} + #endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_PPC_BARRIER_NOSPEC +void do_barrier_nospec_fixups(bool enable) +{ + void *start, *end; + + start = PTRRELOC(&__start___barrier_nospec_fixup), + end = PTRRELOC(&__stop___barrier_nospec_fixup); + + do_barrier_nospec_fixups_range(enable, start, end); +} +#endif /* CONFIG_PPC_BARRIER_NOSPEC */ + +#ifdef CONFIG_PPC_FSL_BOOK3E +void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) +{ + unsigned int instr[2], *dest; + long *start, *end; + int i; + + start = fixup_start; + end = fixup_end; + + instr[0] = PPC_INST_NOP; + instr[1] = PPC_INST_NOP; + + if (enable) { + pr_info("barrier-nospec: using isync; sync as speculation barrier\n"); + instr[0] = PPC_INST_ISYNC; + instr[1] = PPC_INST_SYNC; + } + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + patch_instruction(dest, instr[0]); + patch_instruction(dest + 1, instr[1]); + } + + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); +} + +static void patch_btb_flush_section(long *curr) +{ + unsigned int *start, *end; + + start = (void *)curr + *curr; + end = (void *)curr + *(curr + 1); + for (; start < end; start++) { + pr_devel("patching dest %lx\n", (unsigned long)start); + patch_instruction(start, PPC_INST_NOP); + } +} + +void do_btb_flush_fixups(void) +{ + long *start, *end; + + start = PTRRELOC(&__start__btb_flush_fixup); + end = PTRRELOC(&__stop__btb_flush_fixup); + + for (; start < end; start += 2) + patch_btb_flush_section(start); +} +#endif /* CONFIG_PPC_FSL_BOOK3E */ + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { long *start, *end; diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index eb82d787d99a..b7e9c09dfe19 100644 - --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -69,6 +69,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) std r15,EX_TLB_R15(r12) std r10,EX_TLB_CR(r12) #ifdef CONFIG_PPC_FSL_BOOK3E +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION std r7,EX_TLB_R7(r12) #endif TLB_MISS_PROLOG_STATS diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index fd143c934768..888aa9584e94 100644 - --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -77,6 +77,12 @@ static void init_fw_feat_flags(struct device_node *np) if (fw_feature_is("enabled", "fw-count-cache-disabled", np)) security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np)) + security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); + + if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np)) + security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); + /* * The features below are enabled by default, so we instead look to see * if firmware has *disabled* them, and clear them if so. @@ -123,6 +129,7 @@ static void pnv_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable); + setup_count_cache_flush(); }
static void __init pnv_setup_arch(void) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 45f814041448..6a0ad56e89b9 100644 - --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -484,6 +484,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED) security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) + security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); + + if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE) + security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); + /* * The features below are enabled by default, so we instead look to see * if firmware has *disabled* them, and clear them if so. @@ -534,6 +540,7 @@ void pseries_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
setup_rfi_flush(types, enable); + setup_count_cache_flush(); }
static void __init pSeries_setup_arch(void) - -- 2.20.1
From: Michal Suchanek msuchanek@suse.de
commit 2eea7f067f495e33b8b116b35b5988ab2b8aec55 upstream.
Based on the RFI patching. This is required to be able to disable the speculation barrier.
Only one barrier type is supported and it does nothing when the firmware does not enable it. Also re-patching modules is not supported So the only meaningful thing that can be done is patching out the speculation barrier at boot when the user says it is not wanted.
Signed-off-by: Michal Suchanek msuchanek@suse.de Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/barrier.h | 2 +- arch/powerpc/include/asm/feature-fixups.h | 9 ++++++++ arch/powerpc/include/asm/setup.h | 1 + arch/powerpc/kernel/security.c | 9 ++++++++ arch/powerpc/kernel/vmlinux.lds.S | 7 ++++++ arch/powerpc/lib/feature-fixups.c | 27 +++++++++++++++++++++++ 6 files changed, 54 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index e582d2c88092..f67b3f6e36be 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -81,7 +81,7 @@ do { \ * Prevent execution of subsequent instructions until preceding branches have * been fully resolved and are no longer executing speculatively. */ -#define barrier_nospec_asm ori 31,31,0 +#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop
// This also acts as a compiler barrier due to the memory clobber. #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index a9b64df34e2a..fcfd05672b1b 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -211,6 +211,14 @@ label##3: \ FTR_ENTRY_OFFSET 951b-952b; \ .popsection;
+#define NOSPEC_BARRIER_FIXUP_SECTION \ +953: \ + .pushsection __barrier_nospec_fixup,"a"; \ + .align 2; \ +954: \ + FTR_ENTRY_OFFSET 953b-954b; \ + .popsection; +
#ifndef __ASSEMBLY__ #include <linux/types.h> @@ -219,6 +227,7 @@ extern long stf_barrier_fallback; extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; +extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
void apply_feature_fixups(void); void setup_feature_keys(void); diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index a5e919e34c42..88018e442386 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -52,6 +52,7 @@ enum l1d_flush_type {
void setup_rfi_flush(enum l1d_flush_type, bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); +void do_barrier_nospec_fixups(bool enable);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index b98a722da915..4f18a447ab70 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -10,10 +10,19 @@
#include <asm/debugfs.h> #include <asm/security_features.h> +#include <asm/setup.h>
unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+static bool barrier_nospec_enabled; + +static void enable_barrier_nospec(bool enable) +{ + barrier_nospec_enabled = enable; + do_barrier_nospec_fixups(enable); +} + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { bool thread_priv; diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index c89ffb88fa3b..43960d69bec9 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -153,6 +153,13 @@ SECTIONS *(__rfi_flush_fixup) __stop___rfi_flush_fixup = .; } + + . = ALIGN(8); + __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) { + __start___barrier_nospec_fixup = .; + *(__barrier_nospec_fixup) + __stop___barrier_nospec_fixup = .; + } #endif
EXCEPTION_TABLE(0) diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index e1bcdc32a851..65b4e8276bdd 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -277,6 +277,33 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) (types & L1D_FLUSH_MTTRIG) ? "mttrig type" : "unknown"); } + +void do_barrier_nospec_fixups(bool enable) +{ + unsigned int instr, *dest; + long *start, *end; + int i; + + start = PTRRELOC(&__start___barrier_nospec_fixup), + end = PTRRELOC(&__stop___barrier_nospec_fixup); + + instr = 0x60000000; /* nop */ + + if (enable) { + pr_info("barrier-nospec: using ORI speculation barrier\n"); + instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */ + } + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + patch_instruction(dest, instr); + } + + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); +} + #endif /* CONFIG_PPC_BOOK3S_64 */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
From: Michal Suchanek msuchanek@suse.de
commit 815069ca57c142eb71d27439bc27f41a433a67b3 upstream.
Note that unlike RFI which is patched only in kernel the nospec state reflects settings at the time the module was loaded.
Iterating all modules and re-patching every time the settings change is not implemented.
Based on lwsync patching.
Signed-off-by: Michal Suchanek msuchanek@suse.de Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/setup.h | 7 +++++++ arch/powerpc/kernel/module.c | 6 ++++++ arch/powerpc/kernel/security.c | 2 +- arch/powerpc/lib/feature-fixups.c | 16 +++++++++++++--- 4 files changed, 27 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 88018e442386..8f3e5f6de0dd 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -53,6 +53,13 @@ enum l1d_flush_type { void setup_rfi_flush(enum l1d_flush_type, bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); void do_barrier_nospec_fixups(bool enable); +extern bool barrier_nospec_enabled; + +#ifdef CONFIG_PPC_BOOK3S_64 +void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); +#else +static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; +#endif
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 3f7ba0f5bf29..1b3c6835e730 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -72,6 +72,12 @@ int module_finalize(const Elf_Ehdr *hdr, do_feature_fixups(powerpc_firmware_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); + + sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); + if (sect != NULL) + do_barrier_nospec_fixups_range(barrier_nospec_enabled, + (void *)sect->sh_addr, + (void *)sect->sh_addr + sect->sh_size); #endif
sect = find_section(hdr, sechdrs, "__lwsync_fixup"); diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 4f18a447ab70..4eb9d2b252e3 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -15,7 +15,7 @@
unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
-static bool barrier_nospec_enabled; +bool barrier_nospec_enabled;
static void enable_barrier_nospec(bool enable) { diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 65b4e8276bdd..d78421174ab6 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -278,14 +278,14 @@ void do_rfi_flush_fixups(enum l1d_flush_type types) : "unknown"); }
-void do_barrier_nospec_fixups(bool enable) +void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) { unsigned int instr, *dest; long *start, *end; int i;
- start = PTRRELOC(&__start___barrier_nospec_fixup), - end = PTRRELOC(&__stop___barrier_nospec_fixup); + start = fixup_start; + end = fixup_end;
instr = 0x60000000; /* nop */
@@ -304,6 +304,16 @@ void do_barrier_nospec_fixups(bool enable) printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); }
+void do_barrier_nospec_fixups(bool enable) +{ + void *start, *end; + + start = PTRRELOC(&__start___barrier_nospec_fixup), + end = PTRRELOC(&__stop___barrier_nospec_fixup); + + do_barrier_nospec_fixups_range(enable, start, end); +} + #endif /* CONFIG_PPC_BOOK3S_64 */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
From: Michal Suchanek msuchanek@suse.de
commit cb3d6759a93c6d0aea1c10deb6d00e111c29c19c upstream.
Check what firmware told us and enable/disable the barrier_nospec as appropriate.
We err on the side of enabling the barrier, as it's no-op on older systems, see the comment for more detail.
Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/setup.h | 1 + arch/powerpc/kernel/security.c | 59 ++++++++++++++++++++++++++ arch/powerpc/platforms/powernv/setup.c | 1 + arch/powerpc/platforms/pseries/setup.c | 1 + 4 files changed, 62 insertions(+)
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 8f3e5f6de0dd..cd436d208b40 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -52,6 +52,7 @@ enum l1d_flush_type {
void setup_rfi_flush(enum l1d_flush_type, bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); +void setup_barrier_nospec(void); void do_barrier_nospec_fixups(bool enable); extern bool barrier_nospec_enabled;
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 4eb9d2b252e3..7553951b500a 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -23,6 +23,65 @@ static void enable_barrier_nospec(bool enable) do_barrier_nospec_fixups(enable); }
+void setup_barrier_nospec(void) +{ + bool enable; + + /* + * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. + * But there's a good reason not to. The two flags we check below are + * both are enabled by default in the kernel, so if the hcall is not + * functional they will be enabled. + * On a system where the host firmware has been updated (so the ori + * functions as a barrier), but on which the hypervisor (KVM/Qemu) has + * not been updated, we would like to enable the barrier. Dropping the + * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is + * we potentially enable the barrier on systems where the host firmware + * is not updated, but that's harmless as it's a no-op. + */ + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); + + enable_barrier_nospec(enable); +} + +#ifdef CONFIG_DEBUG_FS +static int barrier_nospec_set(void *data, u64 val) +{ + switch (val) { + case 0: + case 1: + break; + default: + return -EINVAL; + } + + if (!!val == !!barrier_nospec_enabled) + return 0; + + enable_barrier_nospec(!!val); + + return 0; +} + +static int barrier_nospec_get(void *data, u64 *val) +{ + *val = barrier_nospec_enabled ? 1 : 0; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, + barrier_nospec_get, barrier_nospec_set, "%llu\n"); + +static __init int barrier_nospec_debugfs_init(void) +{ + debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, + &fops_barrier_nospec); + return 0; +} +device_initcall(barrier_nospec_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ + ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { bool thread_priv; diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index fd143c934768..e6f8505a3818 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -123,6 +123,7 @@ static void pnv_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable); + setup_barrier_nospec(); }
static void __init pnv_setup_arch(void) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 45f814041448..ac12ec4d839d 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -534,6 +534,7 @@ void pseries_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
setup_rfi_flush(types, enable); + setup_barrier_nospec(); }
static void __init pSeries_setup_arch(void)
commit ddf35cf3764b5a182b178105f57515b42e2634f8 upstream.
Based on the x86 commit doing the same.
See commit 304ec1b05031 ("x86/uaccess: Use __uaccess_begin_nospec() and uaccess_try_nospec") and b3bbfb3fb5d2 ("x86: Introduce __uaccess_begin_nospec() and uaccess_try_nospec") for more detail.
In all cases we are ordering the load from the potentially user-controlled pointer vs a previous branch based on an access_ok() check or similar.
Base on a patch from Michal Suchanek.
Signed-off-by: Michal Suchanek msuchanek@suse.de Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/uaccess.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index cf26e62b268d..bd6d0fb5be9f 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -238,6 +238,7 @@ do { \ __chk_user_ptr(ptr); \ if (!is_kernel_addr((unsigned long)__gu_addr)) \ might_fault(); \ + barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -249,8 +250,10 @@ do { \ __long_type(*(ptr)) __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_READ, __gu_addr, (size))) \ + if (access_ok(VERIFY_READ, __gu_addr, (size))) { \ + barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -261,6 +264,7 @@ do { \ __long_type(*(ptr)) __gu_val; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ + barrier_nospec(); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -288,15 +292,19 @@ static inline unsigned long raw_copy_from_user(void *to,
switch (n) { case 1: + barrier_nospec(); __get_user_size(*(u8 *)to, from, 1, ret); break; case 2: + barrier_nospec(); __get_user_size(*(u16 *)to, from, 2, ret); break; case 4: + barrier_nospec(); __get_user_size(*(u32 *)to, from, 4, ret); break; case 8: + barrier_nospec(); __get_user_size(*(u64 *)to, from, 8, ret); break; } @@ -304,6 +312,7 @@ static inline unsigned long raw_copy_from_user(void *to, return 0; }
+ barrier_nospec(); return __copy_tofrom_user((__force void __user *)to, from, n); }
commit 51973a815c6b46d7b23b68d6af371ad1c9d503ca upstream.
Our syscall entry is done in assembly so patch in an explicit barrier_nospec.
Based on a patch by Michal Suchanek.
Signed-off-by: Michal Suchanek msuchanek@suse.de Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/entry_64.S | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index c194f4c8e66b..7a43b27dc6e0 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -36,6 +36,7 @@ #include <asm/context_tracking.h> #include <asm/tm.h> #include <asm/ppc-opcode.h> +#include <asm/barrier.h> #include <asm/export.h> #ifdef CONFIG_PPC_BOOK3S #include <asm/exception-64s.h> @@ -179,6 +180,15 @@ system_call: /* label this so stack traces look sane */ clrldi r8,r8,32 15: slwi r0,r0,4 + + barrier_nospec_asm + /* + * Prevent the load of the handler below (based on the user-passed + * system call number) being speculatively executed until the test + * against NR_syscalls and branch to .Lsyscall_enosys above has + * committed. + */ + ldx r12,r11,r0 /* Fetch system call handler [ptr] */ mtctr r12 bctrl /* Call handler */
From: Michal Suchanek msuchanek@suse.de
commit a377514519b9a20fa1ea9adddbb4129573129cef upstream.
We now have barrier_nospec as mitigation so print it in cpu_show_spectre_v1() when enabled.
Signed-off-by: Michal Suchanek msuchanek@suse.de Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 7553951b500a..a8b277362931 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -120,6 +120,9 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, c if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) return sprintf(buf, "Not affected\n");
+ if (barrier_nospec_enabled) + return sprintf(buf, "Mitigation: __user pointer sanitization\n"); + return sprintf(buf, "Vulnerable\n"); }
commit 6d44acae1937b81cf8115ada8958e04f601f3f2e upstream.
When I added the spectre_v2 information in sysfs, I included the availability of the ori31 speculation barrier.
Although the ori31 barrier can be used to mitigate v2, it's primarily intended as a spectre v1 mitigation. Spectre v2 is mitigated by hardware changes.
So rework the sysfs files to show the ori31 information in the spectre_v1 file, rather than v2.
Currently we display eg:
$ grep . spectre_v* spectre_v1:Mitigation: __user pointer sanitization spectre_v2:Mitigation: Indirect branch cache disabled, ori31 speculation barrier enabled
After:
$ grep . spectre_v* spectre_v1:Mitigation: __user pointer sanitization, ori31 speculation barrier enabled spectre_v2:Mitigation: Indirect branch cache disabled
Fixes: d6fbe1c55c55 ("powerpc/64s: Wire up cpu_show_spectre_v2()") Cc: stable@vger.kernel.org # v4.17+ Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-)
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index a8b277362931..4cb8f1f7b593 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -117,25 +117,35 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) { - if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) - return sprintf(buf, "Not affected\n"); + struct seq_buf s; + + seq_buf_init(&s, buf, PAGE_SIZE - 1);
- if (barrier_nospec_enabled) - return sprintf(buf, "Mitigation: __user pointer sanitization\n"); + if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { + if (barrier_nospec_enabled) + seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); + else + seq_buf_printf(&s, "Vulnerable");
- return sprintf(buf, "Vulnerable\n"); + if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) + seq_buf_printf(&s, ", ori31 speculation barrier enabled"); + + seq_buf_printf(&s, "\n"); + } else + seq_buf_printf(&s, "Not affected\n"); + + return s.len; }
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) { - bool bcs, ccd, ori; struct seq_buf s; + bool bcs, ccd;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); - ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
if (bcs || ccd) { seq_buf_printf(&s, "Mitigation: "); @@ -151,9 +161,6 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c } else seq_buf_printf(&s, "Vulnerable");
- if (ori) - seq_buf_printf(&s, ", ori31 speculation barrier enabled"); - seq_buf_printf(&s, "\n");
return s.len;
From: Diana Craciun diana.craciun@nxp.com
commit cf175dc315f90185128fb061dc05b6fbb211aa2f upstream.
The speculation barrier can be disabled from the command line with the parameter: "nospectre_v1".
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 4cb8f1f7b593..79f9397998ed 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -16,6 +16,7 @@ unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
bool barrier_nospec_enabled; +static bool no_nospec;
static void enable_barrier_nospec(bool enable) { @@ -42,9 +43,18 @@ void setup_barrier_nospec(void) enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
- enable_barrier_nospec(enable); + if (!no_nospec) + enable_barrier_nospec(enable); }
+static int __init handle_nospectre_v1(char *p) +{ + no_nospec = true; + + return 0; +} +early_param("nospectre_v1", handle_nospectre_v1); + #ifdef CONFIG_DEBUG_FS static int barrier_nospec_set(void *data, u64 val) {
From: Diana Craciun diana.craciun@nxp.com
commit 6453b532f2c8856a80381e6b9a1f5ea2f12294df upstream.
NXP Book3E platforms are not vulnerable to speculative store bypass, so make the mitigations PPC_BOOK3S_64 specific.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 79f9397998ed..8ee1ade845c6 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -176,6 +176,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c return s.len; }
+#ifdef CONFIG_PPC_BOOK3S_64 /* * Store-forwarding barrier support. */ @@ -323,3 +324,4 @@ static __init int stf_barrier_debugfs_init(void) } device_initcall(stf_barrier_debugfs_init); #endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_PPC_BOOK3S_64 */
commit 179ab1cbf883575c3a585bcfc0f2160f1d22a149 upstream.
Add a config symbol to encode which platforms support the barrier_nospec speculation barrier. Currently this is just Book3S 64 but we will add Book3E in a future patch.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/Kconfig | 7 ++++++- arch/powerpc/include/asm/barrier.h | 6 +++--- arch/powerpc/include/asm/setup.h | 2 +- arch/powerpc/kernel/Makefile | 3 ++- arch/powerpc/kernel/module.c | 4 +++- arch/powerpc/kernel/vmlinux.lds.S | 4 +++- arch/powerpc/lib/feature-fixups.c | 6 ++++-- 7 files changed, 22 insertions(+), 10 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index fe418226df7f..d4eb36bb1f7c 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -164,7 +164,7 @@ config PPC select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CMOS_UPDATE select GENERIC_CPU_AUTOPROBE - select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64 + select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW_LEVEL select GENERIC_SMP_IDLE_THREAD @@ -236,6 +236,11 @@ config PPC # Please keep this list sorted alphabetically. #
+config PPC_BARRIER_NOSPEC + bool + default y + depends on PPC_BOOK3S_64 + config GENERIC_CSUM def_bool n
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index f67b3f6e36be..ec43375463ba 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -76,7 +76,7 @@ do { \ ___p1; \ })
-#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_BARRIER_NOSPEC /* * Prevent execution of subsequent instructions until preceding branches have * been fully resolved and are no longer executing speculatively. @@ -86,10 +86,10 @@ do { \ // This also acts as a compiler barrier due to the memory clobber. #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
-#else /* !CONFIG_PPC_BOOK3S_64 */ +#else /* !CONFIG_PPC_BARRIER_NOSPEC */ #define barrier_nospec_asm #define barrier_nospec() -#endif +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
#include <asm-generic/barrier.h>
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index cd436d208b40..1f06bfaac7cc 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -56,7 +56,7 @@ void setup_barrier_nospec(void); void do_barrier_nospec_fixups(bool enable); extern bool barrier_nospec_enabled;
-#ifdef CONFIG_PPC_BOOK3S_64 +#ifdef CONFIG_PPC_BARRIER_NOSPEC void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); #else static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index a1089c9a9aa5..142b08d40642 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -45,9 +45,10 @@ obj-$(CONFIG_VDSO32) += vdso32/ obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o -obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o +obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o +obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_ALTIVEC) += vecemu.o obj-$(CONFIG_PPC_970_NAP) += idle_power4.o diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 1b3c6835e730..77371c9ef3d8 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -72,13 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr, do_feature_fixups(powerpc_firmware_features, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); +#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_BARRIER_NOSPEC sect = find_section(hdr, sechdrs, "__spec_barrier_fixup"); if (sect != NULL) do_barrier_nospec_fixups_range(barrier_nospec_enabled, (void *)sect->sh_addr, (void *)sect->sh_addr + sect->sh_size); -#endif +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
sect = find_section(hdr, sechdrs, "__lwsync_fixup"); if (sect != NULL) diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 43960d69bec9..7a178dc3f19c 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -153,14 +153,16 @@ SECTIONS *(__rfi_flush_fixup) __stop___rfi_flush_fixup = .; } +#endif /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC_BARRIER_NOSPEC . = ALIGN(8); __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) { __start___barrier_nospec_fixup = .; *(__barrier_nospec_fixup) __stop___barrier_nospec_fixup = .; } -#endif +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
EXCEPTION_TABLE(0)
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index d78421174ab6..4f6acfc87010 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -304,6 +304,9 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); }
+#endif /* CONFIG_PPC_BOOK3S_64 */ + +#ifdef CONFIG_PPC_BARRIER_NOSPEC void do_barrier_nospec_fixups(bool enable) { void *start, *end; @@ -313,8 +316,7 @@ void do_barrier_nospec_fixups(bool enable)
do_barrier_nospec_fixups_range(enable, start, end); } - -#endif /* CONFIG_PPC_BOOK3S_64 */ +#endif /* CONFIG_PPC_BARRIER_NOSPEC */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) {
commit af375eefbfb27cbb5b831984e66d724a40d26b5c upstream.
Currently we require platform code to call setup_barrier_nospec(). But if we add an empty definition for the !CONFIG_PPC_BARRIER_NOSPEC case then we can call it in setup_arch().
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/setup.h | 4 ++++ arch/powerpc/kernel/setup-common.c | 2 ++ arch/powerpc/platforms/powernv/setup.c | 1 - arch/powerpc/platforms/pseries/setup.c | 1 - 4 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 1f06bfaac7cc..102b778c8496 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -52,7 +52,11 @@ enum l1d_flush_type {
void setup_rfi_flush(enum l1d_flush_type, bool enable); void do_rfi_flush_fixups(enum l1d_flush_type types); +#ifdef CONFIG_PPC_BARRIER_NOSPEC void setup_barrier_nospec(void); +#else +static inline void setup_barrier_nospec(void) { }; +#endif void do_barrier_nospec_fixups(bool enable); extern bool barrier_nospec_enabled;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 008447664643..ab7a75b731da 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -937,6 +937,8 @@ void __init setup_arch(char **cmdline_p) if (ppc_md.setup_arch) ppc_md.setup_arch();
+ setup_barrier_nospec(); + paging_init();
/* Initialize the MMU context management stuff. */ diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index e6f8505a3818..fd143c934768 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -123,7 +123,6 @@ static void pnv_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable); - setup_barrier_nospec(); }
static void __init pnv_setup_arch(void) diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ac12ec4d839d..45f814041448 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -534,7 +534,6 @@ void pseries_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
setup_rfi_flush(types, enable); - setup_barrier_nospec(); }
static void __init pSeries_setup_arch(void)
From: Diana Craciun diana.craciun@nxp.com
commit 406d2b6ae3420f5bb2b3db6986dc6f0b6dbb637b upstream.
In a subsequent patch we will enable building security.c for Book3E. However the NXP platforms are not vulnerable to Meltdown, so make the Meltdown vulnerability reporting PPC_BOOK3S_64 specific.
Signed-off-by: Diana Craciun diana.craciun@nxp.com [mpe: Split out of larger patch] Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 8ee1ade845c6..206488603b66 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -92,6 +92,7 @@ static __init int barrier_nospec_debugfs_init(void) device_initcall(barrier_nospec_debugfs_init); #endif /* CONFIG_DEBUG_FS */
+#ifdef CONFIG_PPC_BOOK3S_64 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) { bool thread_priv; @@ -124,6 +125,7 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
return sprintf(buf, "Vulnerable\n"); } +#endif
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) {
From: Diana Craciun diana.craciun@nxp.com
commit ebcd1bfc33c7a90df941df68a6e5d4018c022fba upstream.
Implement the barrier_nospec as a isync;sync instruction sequence. The implementation uses the infrastructure built for BOOK3S 64.
Signed-off-by: Diana Craciun diana.craciun@nxp.com [mpe: Split out of larger patch] Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/barrier.h | 8 +++++++- arch/powerpc/lib/feature-fixups.c | 31 ++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index d4eb36bb1f7c..de3b07c7be30 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -239,7 +239,7 @@ config PPC config PPC_BARRIER_NOSPEC bool default y - depends on PPC_BOOK3S_64 + depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
config GENERIC_CSUM def_bool n diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index ec43375463ba..449474f667c4 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -76,12 +76,18 @@ do { \ ___p1; \ })
+#ifdef CONFIG_PPC_BOOK3S_64 +#define NOSPEC_BARRIER_SLOT nop +#elif defined(CONFIG_PPC_FSL_BOOK3E) +#define NOSPEC_BARRIER_SLOT nop; nop +#endif + #ifdef CONFIG_PPC_BARRIER_NOSPEC /* * Prevent execution of subsequent instructions until preceding branches have * been fully resolved and are no longer executing speculatively. */ -#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop +#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
// This also acts as a compiler barrier due to the memory clobber. #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 4f6acfc87010..cac17882ac61 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -318,6 +318,37 @@ void do_barrier_nospec_fixups(bool enable) } #endif /* CONFIG_PPC_BARRIER_NOSPEC */
+#ifdef CONFIG_PPC_FSL_BOOK3E +void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end) +{ + unsigned int instr[2], *dest; + long *start, *end; + int i; + + start = fixup_start; + end = fixup_end; + + instr[0] = PPC_INST_NOP; + instr[1] = PPC_INST_NOP; + + if (enable) { + pr_info("barrier-nospec: using isync; sync as speculation barrier\n"); + instr[0] = PPC_INST_ISYNC; + instr[1] = PPC_INST_SYNC; + } + + for (i = 0; start < end; start++, i++) { + dest = (void *)start + *start; + + pr_devel("patching dest %lx\n", (unsigned long)dest); + patch_instruction(dest, instr[0]); + patch_instruction(dest + 1, instr[1]); + } + + printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); +} +#endif /* CONFIG_PPC_FSL_BOOK3E */ + void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) { long *start, *end;
On Fri, 2019-03-29 at 22:26 +1100, Michael Ellerman wrote:
From: Diana Craciun diana.craciun@nxp.com
commit ebcd1bfc33c7a90df941df68a6e5d4018c022fba upstream.
Implement the barrier_nospec as a isync;sync instruction sequence. The implementation uses the infrastructure built for BOOK3S 64.
Signed-off-by: Diana Craciun diana.craciun@nxp.com [mpe: Split out of larger patch] Signed-off-by: Michael Ellerman mpe@ellerman.id.au
What is the performanc impact of these spectre fixes? Can I compile it away?
Jocke
Joakim Tjernlund Joakim.Tjernlund@infinera.com writes:
On Fri, 2019-03-29 at 22:26 +1100, Michael Ellerman wrote:
From: Diana Craciun diana.craciun@nxp.com
commit ebcd1bfc33c7a90df941df68a6e5d4018c022fba upstream.
Implement the barrier_nospec as a isync;sync instruction sequence. The implementation uses the infrastructure built for BOOK3S 64.
Signed-off-by: Diana Craciun diana.craciun@nxp.com [mpe: Split out of larger patch] Signed-off-by: Michael Ellerman mpe@ellerman.id.au
What is the performanc impact of these spectre fixes?
I've not seen any numbers from anyone.
It will depend on the workload, it's copy to/from user that is most likely to show an impact.
We have a context switch benchmark in tools/testing/selftests/powerpc/benchmarks/context_switch.c.
Running that with "--no-vector --no-altivec --no-fp --test=pipe" shows about a 2.3% slow down vs booting with "nospectre_v1".
Can I compile it away?
You can't actually, but you can disable it at runtime with "nospectre_v1" on the kernel command line.
We could make it a user selectable compile time option if you really want it to be.
cheers
On Tue, 2019-04-02 at 17:19 +1100, Michael Ellerman wrote:
Joakim Tjernlund Joakim.Tjernlund@infinera.com writes:
On Fri, 2019-03-29 at 22:26 +1100, Michael Ellerman wrote:
From: Diana Craciun diana.craciun@nxp.com
commit ebcd1bfc33c7a90df941df68a6e5d4018c022fba upstream.
Implement the barrier_nospec as a isync;sync instruction sequence. The implementation uses the infrastructure built for BOOK3S 64.
Signed-off-by: Diana Craciun diana.craciun@nxp.com [mpe: Split out of larger patch] Signed-off-by: Michael Ellerman mpe@ellerman.id.au
What is the performanc impact of these spectre fixes?
I've not seen any numbers from anyone.
Thanks for getting back to me.
It will depend on the workload, it's copy to/from user that is most likely to show an impact.
We have a context switch benchmark in tools/testing/selftests/powerpc/benchmarks/context_switch.c.
Running that with "--no-vector --no-altivec --no-fp --test=pipe" shows about a 2.3% slow down vs booting with "nospectre_v1".
Can I compile it away?
You can't actually, but you can disable it at runtime with "nospectre_v1" on the kernel command line.
We could make it a user selectable compile time option if you really want it to be.
I think yes. Considering that these patches are fairly untested and the impact in the wild unknown. Requiring systems to change their boot config over night is too fast.
Jocke
Joakim Tjernlund Joakim.Tjernlund@infinera.com writes:
On Tue, 2019-04-02 at 17:19 +1100, Michael Ellerman wrote:
Joakim Tjernlund Joakim.Tjernlund@infinera.com writes:
...
Can I compile it away?
You can't actually, but you can disable it at runtime with "nospectre_v1" on the kernel command line.
We could make it a user selectable compile time option if you really want it to be.
I think yes. Considering that these patches are fairly untested and the impact in the wild unknown. Requiring systems to change their boot config over night is too fast.
OK. Just to be clear, you're actually using 4.14 on an NXP board and would actually use this option? I don't want to add another option just for a theoretical use case.
cheers
On Wed, 2019-04-03 at 11:53 +1100, Michael Ellerman wrote:
Joakim Tjernlund Joakim.Tjernlund@infinera.com writes:
On Tue, 2019-04-02 at 17:19 +1100, Michael Ellerman wrote:
Joakim Tjernlund Joakim.Tjernlund@infinera.com writes:
...
Can I compile it away?
You can't actually, but you can disable it at runtime with "nospectre_v1" on the kernel command line.
We could make it a user selectable compile time option if you really want it to be.
I think yes. Considering that these patches are fairly untested and the impact in the wild unknown. Requiring systems to change their boot config over night is too fast.
OK. Just to be clear, you're actually using 4.14 on an NXP board and would actually use this option? I don't want to add another option just for a theoretical use case.
Correct, we use 4.14 on several custom boards using NXP CPUs and would appreciate if I could control spectre with a build switch.
Thanks a lot! Jocke
From: Diana Craciun diana.craciun@nxp.com
commit c28218d4abbf4f2035495334d8bfcba64bda4787 upstream.
Used barrier_nospec to sanitize the syscall table.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/entry_32.S | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 4ae464b9d490..a2999cd73a82 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -33,6 +33,7 @@ #include <asm/unistd.h> #include <asm/ptrace.h> #include <asm/export.h> +#include <asm/barrier.h>
/* * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. @@ -358,6 +359,15 @@ _GLOBAL(DoSyscall) ori r10,r10,sys_call_table@l slwi r0,r0,2 bge- 66f + + barrier_nospec_asm + /* + * Prevent the load of the handler below (based on the user-passed + * system call number) being speculatively executed until the test + * against NR_syscalls and branch to .66f above has + * committed. + */ + lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ mtlr r10 addi r9,r1,STACK_FRAME_OVERHEAD
commit 06d0bbc6d0f56dacac3a79900e9a9a0d5972d818 upstream.
Add a macro and some helper C functions for patching single asm instructions.
The gas macro means we can do something like:
1: nop patch_site 1b, patch__foo
Which is less visually distracting than defining a GLOBAL symbol at 1, and also doesn't pollute the symbol table which can confuse eg. perf.
These are obviously similar to our existing feature sections, but are not automatically patched based on CPU/MMU features, rather they are designed to be manually patched by C code at some arbitrary point.
Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/code-patching-asm.h | 18 ++++++++++++++++++ arch/powerpc/include/asm/code-patching.h | 2 ++ arch/powerpc/lib/code-patching.c | 16 ++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 arch/powerpc/include/asm/code-patching-asm.h
diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h new file mode 100644 index 000000000000..ed7b1448493a --- /dev/null +++ b/arch/powerpc/include/asm/code-patching-asm.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2018, Michael Ellerman, IBM Corporation. + */ +#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H +#define _ASM_POWERPC_CODE_PATCHING_ASM_H + +/* Define a "site" that can be patched */ +.macro patch_site label name + .pushsection ".rodata" + .balign 4 + .global \name +\name: + .4byte \label - . + .popsection +.endm + +#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */ diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index 812535f40124..b2051234ada8 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -32,6 +32,8 @@ unsigned int create_cond_branch(const unsigned int *addr, int patch_branch(unsigned int *addr, unsigned long target, int flags); int patch_instruction(unsigned int *addr, unsigned int instr); int raw_patch_instruction(unsigned int *addr, unsigned int instr); +int patch_instruction_site(s32 *addr, unsigned int instr); +int patch_branch_site(s32 *site, unsigned long target, int flags);
int instr_is_relative_branch(unsigned int instr); int instr_is_relative_link_branch(unsigned int instr); diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 130405158afa..c5154817178b 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -206,6 +206,22 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags) return patch_instruction(addr, create_branch(addr, target, flags)); }
+int patch_branch_site(s32 *site, unsigned long target, int flags) +{ + unsigned int *addr; + + addr = (unsigned int *)((unsigned long)site + *site); + return patch_instruction(addr, create_branch(addr, target, flags)); +} + +int patch_instruction_site(s32 *site, unsigned int instr) +{ + unsigned int *addr; + + addr = (unsigned int *)((unsigned long)site + *site); + return patch_instruction(addr, instr); +} + bool is_offset_in_branch_range(long offset) { /*
commit dc8c6cce9a26a51fc19961accb978217a3ba8c75 upstream.
Add security feature flags to indicate the need for software to flush the count cache on context switch, and for the presence of a hardware assisted count cache flush.
Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/security_features.h | 6 ++++++ 1 file changed, 6 insertions(+)
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 44989b22383c..a0d47bc18a5c 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -59,6 +59,9 @@ static inline bool security_ftr_enabled(unsigned long feature) // Indirect branch prediction cache disabled #define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
+// bcctr 2,0,0 triggers a hardware assisted count cache flush +#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull +
// Features indicating need for Spectre/Meltdown mitigations
@@ -74,6 +77,9 @@ static inline bool security_ftr_enabled(unsigned long feature) // Firmware configuration indicates user favours security over performance #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
+// Software required to flush count cache on context switch +#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull +
// Features enabled by default #define SEC_FTR_DEFAULT \
commit ee13cb249fabdff8b90aaff61add347749280087 upstream.
Some CPU revisions support a mode where the count cache needs to be flushed by software on context switch. Additionally some revisions may have a hardware accelerated flush, in which case the software flush sequence can be shortened.
If we detect the appropriate flag from firmware we patch a branch into _switch() which takes us to a count cache flush sequence.
That sequence in turn may be patched to return early if we detect that the CPU supports accelerating the flush sequence in hardware.
Add debugfs support for reporting the state of the flush, as well as runtime disabling it.
And modify the spectre_v2 sysfs file to report the state of the software flush.
Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/asm-prototypes.h | 6 ++ arch/powerpc/include/asm/security_features.h | 1 + arch/powerpc/kernel/entry_64.S | 54 +++++++++++ arch/powerpc/kernel/security.c | 98 +++++++++++++++++++- 4 files changed, 154 insertions(+), 5 deletions(-)
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 7330150bfe34..ba4c75062d49 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -126,4 +126,10 @@ extern int __ucmpdi2(u64, u64); void _mcount(void); unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
+/* Patch sites */ +extern s32 patch__call_flush_count_cache; +extern s32 patch__flush_count_cache_return; + +extern long flush_count_cache; + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index a0d47bc18a5c..759597bf0fd8 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -22,6 +22,7 @@ enum stf_barrier_type {
void setup_stf_barrier(void); void do_stf_barrier_fixups(enum stf_barrier_type types); +void setup_count_cache_flush(void);
static inline void security_ftr_set(unsigned long feature) { diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 7a43b27dc6e0..e40e74e8c635 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -25,6 +25,7 @@ #include <asm/page.h> #include <asm/mmu.h> #include <asm/thread_info.h> +#include <asm/code-patching-asm.h> #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/cputable.h> @@ -497,6 +498,57 @@ _GLOBAL(ret_from_kernel_thread) li r3,0 b .Lsyscall_exit
+#ifdef CONFIG_PPC_BOOK3S_64 + +#define FLUSH_COUNT_CACHE \ +1: nop; \ + patch_site 1b, patch__call_flush_count_cache + + +#define BCCTR_FLUSH .long 0x4c400420 + +.macro nops number + .rept \number + nop + .endr +.endm + +.balign 32 +.global flush_count_cache +flush_count_cache: + /* Save LR into r9 */ + mflr r9 + + .rept 64 + bl .+4 + .endr + b 1f + nops 6 + + .balign 32 + /* Restore LR */ +1: mtlr r9 + li r9,0x7fff + mtctr r9 + + BCCTR_FLUSH + +2: nop + patch_site 2b patch__flush_count_cache_return + + nops 3 + + .rept 278 + .balign 32 + BCCTR_FLUSH + nops 7 + .endr + + blr +#else +#define FLUSH_COUNT_CACHE +#endif /* CONFIG_PPC_BOOK3S_64 */ + /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state @@ -528,6 +580,8 @@ _GLOBAL(_switch) std r23,_CCR(r1) std r1,KSP(r3) /* Set old stack pointer */
+ FLUSH_COUNT_CACHE + /* * On SMP kernels, care must be taken because a task may be * scheduled off CPUx and on to CPUy. Memory ordering must be diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 206488603b66..554d33c7b758 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -9,12 +9,21 @@ #include <linux/seq_buf.h>
#include <asm/debugfs.h> +#include <asm/asm-prototypes.h> +#include <asm/code-patching.h> #include <asm/security_features.h> #include <asm/setup.h>
unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+enum count_cache_flush_type { + COUNT_CACHE_FLUSH_NONE = 0x1, + COUNT_CACHE_FLUSH_SW = 0x2, + COUNT_CACHE_FLUSH_HW = 0x4, +}; +static enum count_cache_flush_type count_cache_flush_type; + bool barrier_nospec_enabled; static bool no_nospec;
@@ -159,17 +168,29 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
- if (bcs || ccd) { + if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { + bool comma = false; seq_buf_printf(&s, "Mitigation: ");
- if (bcs) + if (bcs) { seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); + comma = true; + } + + if (ccd) { + if (comma) + seq_buf_printf(&s, ", "); + seq_buf_printf(&s, "Indirect branch cache disabled"); + comma = true; + }
- if (bcs && ccd) + if (comma) seq_buf_printf(&s, ", ");
- if (ccd) - seq_buf_printf(&s, "Indirect branch cache disabled"); + seq_buf_printf(&s, "Software count cache flush"); + + if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) + seq_buf_printf(&s, "(hardware accelerated)"); } else seq_buf_printf(&s, "Vulnerable");
@@ -326,4 +347,71 @@ static __init int stf_barrier_debugfs_init(void) } device_initcall(stf_barrier_debugfs_init); #endif /* CONFIG_DEBUG_FS */ + +static void toggle_count_cache_flush(bool enable) +{ + if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); + count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; + pr_info("count-cache-flush: software flush disabled.\n"); + return; + } + + patch_branch_site(&patch__call_flush_count_cache, + (u64)&flush_count_cache, BRANCH_SET_LINK); + + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { + count_cache_flush_type = COUNT_CACHE_FLUSH_SW; + pr_info("count-cache-flush: full software flush sequence enabled.\n"); + return; + } + + patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR); + count_cache_flush_type = COUNT_CACHE_FLUSH_HW; + pr_info("count-cache-flush: hardware assisted flush sequence enabled\n"); +} + +void setup_count_cache_flush(void) +{ + toggle_count_cache_flush(true); +} + +#ifdef CONFIG_DEBUG_FS +static int count_cache_flush_set(void *data, u64 val) +{ + bool enable; + + if (val == 1) + enable = true; + else if (val == 0) + enable = false; + else + return -EINVAL; + + toggle_count_cache_flush(enable); + + return 0; +} + +static int count_cache_flush_get(void *data, u64 *val) +{ + if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE) + *val = 0; + else + *val = 1; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, + count_cache_flush_set, "%llu\n"); + +static __init int count_cache_flush_debugfs_init(void) +{ + debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root, + NULL, &fops_count_cache_flush); + return 0; +} +device_initcall(count_cache_flush_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_PPC_BOOK3S_64 */
commit ba72dc171954b782a79d25e0f4b3ed91090c3b1e upstream.
Use the existing hypercall to determine the appropriate settings for the count cache flush, and then call the generic powerpc code to set it up based on the security feature flags.
Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/hvcall.h | 2 ++ arch/powerpc/platforms/pseries/setup.c | 7 +++++++ 2 files changed, 9 insertions(+)
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 5a740feb7bd7..15cef59092c7 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -340,10 +340,12 @@ #define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5 #define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6 #define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7 +#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 +#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
/* Flag values used in H_REGISTER_PROC_TBL hcall */ #define PROC_TABLE_OP_MASK 0x18 diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 45f814041448..6a0ad56e89b9 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -484,6 +484,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result) if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED) security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) + security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); + + if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE) + security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); + /* * The features below are enabled by default, so we instead look to see * if firmware has *disabled* them, and clear them if so. @@ -534,6 +540,7 @@ void pseries_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
setup_rfi_flush(types, enable); + setup_count_cache_flush(); }
static void __init pSeries_setup_arch(void)
commit 99d54754d3d5f896a8f616b0b6520662bc99d66b upstream.
Look for fw-features properties to determine the appropriate settings for the count cache flush, and then call the generic powerpc code to set it up based on the security feature flags.
Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/platforms/powernv/setup.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index fd143c934768..888aa9584e94 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -77,6 +77,12 @@ static void init_fw_feat_flags(struct device_node *np) if (fw_feature_is("enabled", "fw-count-cache-disabled", np)) security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+ if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np)) + security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); + + if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np)) + security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); + /* * The features below are enabled by default, so we instead look to see * if firmware has *disabled* them, and clear them if so. @@ -123,6 +129,7 @@ static void pnv_setup_rfi_flush(void) security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable); + setup_count_cache_flush(); }
static void __init pnv_setup_arch(void)
From: Diana Craciun diana.craciun@nxp.com
commit 76a5eaa38b15dda92cd6964248c39b5a6f3a4e9d upstream.
In order to protect against speculation attacks (Spectre variant 2) on NXP PowerPC platforms, the branch predictor should be flushed when the privillege level is changed. This patch is adding the infrastructure to fixup at runtime the code sections that are performing the branch predictor flush depending on a boot arg parameter which is added later in a separate patch.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/feature-fixups.h | 12 ++++++++++++ arch/powerpc/include/asm/setup.h | 2 ++ arch/powerpc/kernel/vmlinux.lds.S | 8 ++++++++ arch/powerpc/lib/feature-fixups.c | 23 +++++++++++++++++++++++ 4 files changed, 45 insertions(+)
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index fcfd05672b1b..b1d478acbaec 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h @@ -219,6 +219,17 @@ label##3: \ FTR_ENTRY_OFFSET 953b-954b; \ .popsection;
+#define START_BTB_FLUSH_SECTION \ +955: \ + +#define END_BTB_FLUSH_SECTION \ +956: \ + .pushsection __btb_flush_fixup,"a"; \ + .align 2; \ +957: \ + FTR_ENTRY_OFFSET 955b-957b; \ + FTR_ENTRY_OFFSET 956b-957b; \ + .popsection;
#ifndef __ASSEMBLY__ #include <linux/types.h> @@ -228,6 +239,7 @@ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; +extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
void apply_feature_fixups(void); void setup_feature_keys(void); diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 102b778c8496..59072bc50fbc 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -66,6 +66,8 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; #endif
+void do_btb_flush_fixups(void); + #endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 7a178dc3f19c..b0cf4af7ba84 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -164,6 +164,14 @@ SECTIONS } #endif /* CONFIG_PPC_BARRIER_NOSPEC */
+#ifdef CONFIG_PPC_FSL_BOOK3E + . = ALIGN(8); + __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) { + __start__btb_flush_fixup = .; + *(__btb_flush_fixup) + __stop__btb_flush_fixup = .; + } +#endif EXCEPTION_TABLE(0)
NOTES :kernel :notes diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index cac17882ac61..de7861e09b41 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -347,6 +347,29 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i); } + +static void patch_btb_flush_section(long *curr) +{ + unsigned int *start, *end; + + start = (void *)curr + *curr; + end = (void *)curr + *(curr + 1); + for (; start < end; start++) { + pr_devel("patching dest %lx\n", (unsigned long)start); + patch_instruction(start, PPC_INST_NOP); + } +} + +void do_btb_flush_fixups(void) +{ + long *start, *end; + + start = PTRRELOC(&__start__btb_flush_fixup); + end = PTRRELOC(&__stop__btb_flush_fixup); + + for (; start < end; start += 2) + patch_btb_flush_section(start); +} #endif /* CONFIG_PPC_FSL_BOOK3E */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
From: Diana Craciun diana.craciun@nxp.com
commit 1cbf8990d79ff69da8ad09e8a3df014e1494462b upstream.
The BUCSR register can be used to invalidate the entries in the branch prediction mechanisms.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/ppc_asm.h | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 36f3e41c9fbe..3e1b8de72776 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -802,4 +802,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) stringify_in_c(.long (_target) - . ;) \ stringify_in_c(.previous)
+#ifdef CONFIG_PPC_FSL_BOOK3E +#define BTB_FLUSH(reg) \ + lis reg,BUCSR_INIT@h; \ + ori reg,reg,BUCSR_INIT@l; \ + mtspr SPRN_BUCSR,reg; \ + isync; +#else +#define BTB_FLUSH(reg) +#endif /* CONFIG_PPC_FSL_BOOK3E */ + #endif /* _ASM_POWERPC_PPC_ASM_H */
From: Diana Craciun diana.craciun@nxp.com
commit 7d8bad99ba5a22892f0cad6881289fdc3875a930 upstream.
Currently for CONFIG_PPC_FSL_BOOK3E the spectre_v2 file is incorrect:
$ cat /sys/devices/system/cpu/vulnerabilities/spectre_v2 "Mitigation: Software count cache flush"
Which is wrong. Fix it to report vulnerable for now.
Fixes: ee13cb249fab ("powerpc/64s: Add support for software count cache flush") Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 554d33c7b758..f9bdd37ddc8c 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -22,7 +22,7 @@ enum count_cache_flush_type { COUNT_CACHE_FLUSH_SW = 0x2, COUNT_CACHE_FLUSH_HW = 0x4, }; -static enum count_cache_flush_type count_cache_flush_type; +static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
bool barrier_nospec_enabled; static bool no_nospec;
From: Diana Craciun diana.craciun@nxp.com
commit 98518c4d8728656db349f875fcbbc7c126d4c973 upstream.
In order to flush the branch predictor the guest kernel performs writes to the BUCSR register which is hypervisor privilleged. However, the branch predictor is flushed at each KVM entry, so the branch predictor has been already flushed, so just return as soon as possible to guest.
Signed-off-by: Diana Craciun diana.craciun@nxp.com [mpe: Tweak comment formatting] Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kvm/e500_emulate.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 990db69a1d0b..fa88f641ac03 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c @@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va vcpu->arch.pwrmgtcr0 = spr_val; break;
+ case SPRN_BUCSR: + /* + * If we are here, it means that we have already flushed the + * branch predictor, so just return to guest. + */ + break; + /* extra exceptions */ #ifdef CONFIG_SPE_POSSIBLE case SPRN_IVOR32:
From: Diana Craciun diana.craciun@nxp.com
commit f633a8ad636efb5d4bba1a047d4a0f1ef719aa06 upstream.
When the command line argument is present, the Spectre variant 2 mitigations are disabled.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/include/asm/setup.h | 5 +++++ arch/powerpc/kernel/security.c | 21 +++++++++++++++++++++ 2 files changed, 26 insertions(+)
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 59072bc50fbc..5ceab440ecb9 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -66,6 +66,11 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end); static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }; #endif
+#ifdef CONFIG_PPC_FSL_BOOK3E +void setup_spectre_v2(void); +#else +static inline void setup_spectre_v2(void) {}; +#endif void do_btb_flush_fixups(void);
#endif /* !__ASSEMBLY__ */ diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index f9bdd37ddc8c..7a611a187b53 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -26,6 +26,10 @@ static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NO
bool barrier_nospec_enabled; static bool no_nospec; +static bool btb_flush_enabled; +#ifdef CONFIG_PPC_FSL_BOOK3E +static bool no_spectrev2; +#endif
static void enable_barrier_nospec(bool enable) { @@ -101,6 +105,23 @@ static __init int barrier_nospec_debugfs_init(void) device_initcall(barrier_nospec_debugfs_init); #endif /* CONFIG_DEBUG_FS */
+#ifdef CONFIG_PPC_FSL_BOOK3E +static int __init handle_nospectre_v2(char *p) +{ + no_spectrev2 = true; + + return 0; +} +early_param("nospectre_v2", handle_nospectre_v2); +void setup_spectre_v2(void) +{ + if (no_spectrev2) + do_btb_flush_fixups(); + else + btb_flush_enabled = true; +} +#endif /* CONFIG_PPC_FSL_BOOK3E */ + #ifdef CONFIG_PPC_BOOK3S_64 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) {
From: Diana Craciun diana.craciun@nxp.com
commit 10c5e83afd4a3f01712d97d3bb1ae34d5b74a185 upstream.
In order to protect against speculation attacks on indirect branches, the branch predictor is flushed at kernel entry to protect for the following situations: - userspace process attacking another userspace process - userspace process attacking the kernel Basically when the privillege level change (i.e. the kernel is entered), the branch predictor state is flushed.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/entry_64.S | 5 +++++ arch/powerpc/kernel/exceptions-64e.S | 26 +++++++++++++++++++++++++- arch/powerpc/mm/tlb_low_64e.S | 7 +++++++ 3 files changed, 37 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index e40e74e8c635..12395895b9aa 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -78,6 +78,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM) std r0,GPR0(r1) std r10,GPR1(r1) beq 2f /* if from kernel mode */ +#ifdef CONFIG_PPC_FSL_BOOK3E +START_BTB_FLUSH_SECTION + BTB_FLUSH(r10) +END_BTB_FLUSH_SECTION +#endif ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) 2: std r2,GPR2(r1) std r3,GPR3(r1) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index acd8ca76233e..2acd18a903e9 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -295,7 +295,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) andi. r10,r11,MSR_PR; /* save stack pointer */ \ beq 1f; /* branch around if supervisor */ \ ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ -1: cmpdi cr1,r1,0; /* check if SP makes sense */ \ +1: type##_BTB_FLUSH \ + cmpdi cr1,r1,0; /* check if SP makes sense */ \ bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
@@ -327,6 +328,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) #define SPRN_MC_SRR0 SPRN_MCSRR0 #define SPRN_MC_SRR1 SPRN_MCSRR1
+#ifdef CONFIG_PPC_FSL_BOOK3E +#define GEN_BTB_FLUSH \ + START_BTB_FLUSH_SECTION \ + beq 1f; \ + BTB_FLUSH(r10) \ + 1: \ + END_BTB_FLUSH_SECTION + +#define CRIT_BTB_FLUSH \ + START_BTB_FLUSH_SECTION \ + BTB_FLUSH(r10) \ + END_BTB_FLUSH_SECTION + +#define DBG_BTB_FLUSH CRIT_BTB_FLUSH +#define MC_BTB_FLUSH CRIT_BTB_FLUSH +#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH +#else +#define GEN_BTB_FLUSH +#define CRIT_BTB_FLUSH +#define DBG_BTB_FLUSH +#define GDBELL_BTB_FLUSH +#endif + #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index eb82d787d99a..b7e9c09dfe19 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -69,6 +69,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) std r15,EX_TLB_R15(r12) std r10,EX_TLB_CR(r12) #ifdef CONFIG_PPC_FSL_BOOK3E +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION std r7,EX_TLB_R7(r12) #endif TLB_MISS_PROLOG_STATS
From: Diana Craciun diana.craciun@nxp.com
commit 7fef436295bf6c05effe682c8797dfcb0deb112a upstream.
In order to protect against speculation attacks on indirect branches, the branch predictor is flushed at kernel entry to protect for the following situations: - userspace process attacking another userspace process - userspace process attacking the kernel Basically when the privillege level change (i.e.the kernel is entered), the branch predictor state is flushed.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/head_booke.h | 6 ++++++ arch/powerpc/kernel/head_fsl_booke.S | 15 +++++++++++++++ 2 files changed, 21 insertions(+)
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index d0862a100d29..15ac51072eb3 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -43,6 +43,9 @@ andi. r11, r11, MSR_PR; /* check whether user or kernel */\ mr r11, r1; \ beq 1f; \ +START_BTB_FLUSH_SECTION \ + BTB_FLUSH(r11) \ +END_BTB_FLUSH_SECTION \ /* if from user, start at top of this thread's kernel stack */ \ lwz r11, THREAD_INFO-THREAD(r10); \ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ @@ -128,6 +131,9 @@ stw r9,_CCR(r8); /* save CR on stack */\ mfspr r11,exc_level_srr1; /* check whether user or kernel */\ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ +START_BTB_FLUSH_SECTION \ + BTB_FLUSH(r10) \ +END_BTB_FLUSH_SECTION \ andi. r11,r11,MSR_PR; \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index bf4c6021515f..60a0aeefc4a7 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -452,6 +452,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the @@ -546,6 +553,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) mfcr r13 stw r13, THREAD_NORMSAVE(3)(r10) DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 +START_BTB_FLUSH_SECTION + mfspr r11, SPRN_SRR1 + andi. r10,r11,MSR_PR + beq 1f + BTB_FLUSH(r10) +1: +END_BTB_FLUSH_SECTION + mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
From: Diana Craciun diana.craciun@nxp.com
commit e7aa61f47b23afbec41031bc47ca8d6cb6516abc upstream.
Switching from the guest to host is another place where the speculative accesses can be exploited. Flush the branch predictor when entering KVM.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kvm/bookehv_interrupts.S | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 81bd8a07aa51..612b7f6a887f 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -75,6 +75,10 @@ PPC_LL r1, VCPU_HOST_STACK(r4) PPC_LL r2, HOST_R2(r1)
+START_BTB_FLUSH_SECTION + BTB_FLUSH(r10) +END_BTB_FLUSH_SECTION + mfspr r10, SPRN_PID lwz r8, VCPU_HOST_PID(r4) PPC_LL r11, VCPU_SHARED(r4)
From: Diana Craciun diana.craciun@nxp.com
commit 3bc8ea8603ae4c1e09aca8de229ad38b8091fcb3 upstream.
If the user choses not to use the mitigations, replace the code sequence with nops.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/setup-common.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index ab7a75b731da..c58364c74dad 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -938,6 +938,7 @@ void __init setup_arch(char **cmdline_p) ppc_md.setup_arch();
setup_barrier_nospec(); + setup_spectre_v2();
paging_init();
From: Diana Craciun diana.craciun@nxp.com
commit dfa88658fb0583abb92e062c7a9cd5a5b94f2a46 upstream.
Report branch predictor state flush as a mitigation for Spectre variant 2.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 7a611a187b53..720a7a912d0d 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -212,8 +212,11 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) seq_buf_printf(&s, "(hardware accelerated)"); - } else + } else if (btb_flush_enabled) { + seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); + } else { seq_buf_printf(&s, "Vulnerable"); + }
seq_buf_printf(&s, "\n");
From: Diana Craciun diana.craciun@nxp.com
commit 039daac5526932ec731e4499613018d263af8b3e upstream.
Fixed the following build warning: powerpc-linux-gnu-ld: warning: orphan section `__btb_flush_fixup' from `arch/powerpc/kernel/head_44x.o' being placed in section `__btb_flush_fixup'.
Signed-off-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/head_booke.h | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 15ac51072eb3..306e26c073a0 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -32,6 +32,16 @@ */ #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
+#ifdef CONFIG_PPC_FSL_BOOK3E +#define BOOKE_CLEAR_BTB(reg) \ +START_BTB_FLUSH_SECTION \ + BTB_FLUSH(reg) \ +END_BTB_FLUSH_SECTION +#else +#define BOOKE_CLEAR_BTB(reg) +#endif + + #define NORMAL_EXCEPTION_PROLOG(intno) \ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ mfspr r10, SPRN_SPRG_THREAD; \ @@ -43,9 +53,7 @@ andi. r11, r11, MSR_PR; /* check whether user or kernel */\ mr r11, r1; \ beq 1f; \ -START_BTB_FLUSH_SECTION \ - BTB_FLUSH(r11) \ -END_BTB_FLUSH_SECTION \ + BOOKE_CLEAR_BTB(r11) \ /* if from user, start at top of this thread's kernel stack */ \ lwz r11, THREAD_INFO-THREAD(r10); \ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ @@ -131,9 +139,7 @@ END_BTB_FLUSH_SECTION \ stw r9,_CCR(r8); /* save CR on stack */\ mfspr r11,exc_level_srr1; /* check whether user or kernel */\ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ -START_BTB_FLUSH_SECTION \ - BTB_FLUSH(r10) \ -END_BTB_FLUSH_SECTION \ + BOOKE_CLEAR_BTB(r10) \ andi. r11,r11,MSR_PR; \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
From: Christophe Leroy christophe.leroy@c-s.fr
commit 27da80719ef132cf8c80eb406d5aeb37dddf78cc upstream.
The commit identified below adds MC_BTB_FLUSH macro only when CONFIG_PPC_FSL_BOOK3E is defined. This results in the following error on some configs (seen several times with kisskb randconfig_defconfig)
arch/powerpc/kernel/exceptions-64e.S:576: Error: Unrecognized opcode: `mc_btb_flush' make[3]: *** [scripts/Makefile.build:367: arch/powerpc/kernel/exceptions-64e.o] Error 1 make[2]: *** [scripts/Makefile.build:492: arch/powerpc/kernel] Error 2 make[1]: *** [Makefile:1043: arch/powerpc] Error 2 make: *** [Makefile:152: sub-make] Error 2
This patch adds a blank definition of MC_BTB_FLUSH for other cases.
Fixes: 10c5e83afd4a ("powerpc/fsl: Flush the branch predictor at each kernel entry (64bit)") Cc: Diana Craciun diana.craciun@nxp.com Signed-off-by: Christophe Leroy christophe.leroy@c-s.fr Reviewed-by: Daniel Axtens dja@axtens.net Reviewed-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/exceptions-64e.S | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 2acd18a903e9..2edc1b7b34cc 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -348,6 +348,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) #define GEN_BTB_FLUSH #define CRIT_BTB_FLUSH #define DBG_BTB_FLUSH +#define MC_BTB_FLUSH #define GDBELL_BTB_FLUSH #endif
commit 92edf8df0ff2ae86cc632eeca0e651fd8431d40d upstream.
When I updated the spectre_v2 reporting to handle software count cache flush I got the logic wrong when there's no software count cache enabled at all.
The result is that on systems with the software count cache flush disabled we print:
Mitigation: Indirect branch cache disabled, Software count cache flush
Which correctly indicates that the count cache is disabled, but incorrectly says the software count cache flush is enabled.
The root of the problem is that we are trying to handle all combinations of options. But we know now that we only expect to see the software count cache flush enabled if the other options are false.
So split the two cases, which simplifies the logic and fixes the bug. We were also missing a space before "(hardware accelerated)".
The result is we see one of:
Mitigation: Indirect branch serialisation (kernel only) Mitigation: Indirect branch cache disabled Mitigation: Software count cache flush Mitigation: Software count cache flush (hardware accelerated)
Fixes: ee13cb249fab ("powerpc/64s: Add support for software count cache flush") Cc: stable@vger.kernel.org # v4.19+ Signed-off-by: Michael Ellerman mpe@ellerman.id.au Reviewed-by: Michael Neuling mikey@neuling.org Reviewed-by: Diana Craciun diana.craciun@nxp.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au --- arch/powerpc/kernel/security.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-)
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 720a7a912d0d..48b50fb8dc4b 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -189,29 +189,22 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
- if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { - bool comma = false; + if (bcs || ccd) { seq_buf_printf(&s, "Mitigation: ");
- if (bcs) { + if (bcs) seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); - comma = true; - }
- if (ccd) { - if (comma) - seq_buf_printf(&s, ", "); - seq_buf_printf(&s, "Indirect branch cache disabled"); - comma = true; - } - - if (comma) + if (bcs && ccd) seq_buf_printf(&s, ", ");
- seq_buf_printf(&s, "Software count cache flush"); + if (ccd) + seq_buf_printf(&s, "Indirect branch cache disabled"); + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { + seq_buf_printf(&s, "Mitigation: Software count cache flush");
if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) - seq_buf_printf(&s, "(hardware accelerated)"); + seq_buf_printf(&s, " (hardware accelerated)"); } else if (btb_flush_enabled) { seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); } else {
On Fri, Mar 29, 2019 at 10:25:48PM +1100, Michael Ellerman wrote:
-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1
Hi Greg,
Please queue up these powerpc patches for 4.14 if you have no objections.
Some of these also need to go to 4.19, right? Want me to add them there, or are you going to provide a backported series?
thanks,
greg k-h
On Fri, Mar 29, 2019 at 03:51:16PM +0100, Greg KH wrote:
On Fri, Mar 29, 2019 at 10:25:48PM +1100, Michael Ellerman wrote:
-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1
Hi Greg,
Please queue up these powerpc patches for 4.14 if you have no objections.
Some of these also need to go to 4.19, right? Want me to add them there, or are you going to provide a backported series?
Nevermind, I've queued up the missing ones to 4.19.y, and one missing one to 5.0.y. If I've missed anything, please let me know.
thanks,
greg k-h
Greg KH gregkh@linuxfoundation.org writes:
On Fri, Mar 29, 2019 at 03:51:16PM +0100, Greg KH wrote:
On Fri, Mar 29, 2019 at 10:25:48PM +1100, Michael Ellerman wrote:
-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1
Hi Greg,
Please queue up these powerpc patches for 4.14 if you have no objections.
Some of these also need to go to 4.19, right? Want me to add them there, or are you going to provide a backported series?
Yes some of them do, but I wasn't sure if they'd go cleanly.
Nevermind, I've queued up the missing ones to 4.19.y, and one missing one to 5.0.y. If I've missed anything, please let me know.
Thanks. I'll check everything's working as expected.
cheers
On 3/31/2019 12:53 PM, Michael Ellerman wrote:
Greg KH gregkh@linuxfoundation.org writes:
On Fri, Mar 29, 2019 at 03:51:16PM +0100, Greg KH wrote:
On Fri, Mar 29, 2019 at 10:25:48PM +1100, Michael Ellerman wrote:
-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 Hi Greg, Please queue up these powerpc patches for 4.14 if you have no objections.
Some of these also need to go to 4.19, right? Want me to add them there, or are you going to provide a backported series?
Yes some of them do, but I wasn't sure if they'd go cleanly.
Nevermind, I've queued up the missing ones to 4.19.y, and one missing one to 5.0.y. If I've missed anything, please let me know.
Thanks. I'll check everything's working as expected.
I have validated on NXP PowerPC and worked as expected on both kernel 4.14 and kernel 4.19.
Thanks, Diana
On Tue, Apr 02, 2019 at 03:21:09PM +0000, Diana Madalina Craciun wrote:
On 3/31/2019 12:53 PM, Michael Ellerman wrote:
Greg KH gregkh@linuxfoundation.org writes:
On Fri, Mar 29, 2019 at 03:51:16PM +0100, Greg KH wrote:
On Fri, Mar 29, 2019 at 10:25:48PM +1100, Michael Ellerman wrote:
-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 Hi Greg, Please queue up these powerpc patches for 4.14 if you have no objections.
Some of these also need to go to 4.19, right? Want me to add them there, or are you going to provide a backported series?
Yes some of them do, but I wasn't sure if they'd go cleanly.
Nevermind, I've queued up the missing ones to 4.19.y, and one missing one to 5.0.y. If I've missed anything, please let me know.
Thanks. I'll check everything's working as expected.
I have validated on NXP PowerPC and worked as expected on both kernel 4.14 and kernel 4.19.
Great, thanks for testing!
greg k-h
linux-stable-mirror@lists.linaro.org