The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From b1c03f1ef48d36ff28afb06e8f0c1233ef072f1d Mon Sep 17 00:00:00 2001
From: Matt Redfearn <matt.redfearn(a)mips.com>
Date: Wed, 23 May 2018 14:39:58 +0100
Subject: [PATCH] MIPS: memset.S: Fix byte_fixup for MIPSr6
The __clear_user function is defined to return the number of bytes that
could not be cleared. From the underlying memset / bzero implementation
this means setting register a2 to that number on return. Currently if a
page fault is triggered within the MIPSr6 version of setting of initial
unaligned bytes, the value loaded into a2 on return is meaningless.
During the MIPSr6 version of the initial unaligned bytes block, register
a2 contains the number of bytes to be set beyond the initial unaligned
bytes. The t0 register is initally set to the number of unaligned bytes
- STORSIZE, effectively a negative version of the number of unaligned
bytes. This is then incremented before each byte is saved.
The label .Lbyte_fixup\@ is jumped to on page fault. Currently the value
in a2 is incorrectly replaced by 0 - t0 + 1, effectively the number of
unaligned bytes remaining. This leads to the failures being reported by
the following test code:
static int __init test_clear_user(void)
{
int j, k;
pr_info("\n\n\nTesting clear_user\n");
for (j = 0; j < 512; j++) {
if ((k = clear_user(NULL+3, j)) != j) {
pr_err("clear_user (NULL %d) returned %d\n", j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
Which reports:
[ 3.965439] Testing clear_user
[ 3.973169] clear_user (NULL 8) returned 6
[ 3.976782] clear_user (NULL 9) returned 6
[ 3.980390] clear_user (NULL 10) returned 6
[ 3.984052] clear_user (NULL 11) returned 6
[ 3.987524] clear_user (NULL 12) returned 6
Fix this by subtracting t0 from a2 (rather than $0), effectivey giving:
unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
a2 = a2 - t0 + 1
This fixes the value returned from __clear user when the number of bytes
to set is > LONGSIZE and the address is invalid and unaligned.
Unfortunately, this breaks the fixup handling for unaligned bytes after
the final long, where register a2 still contains the number of bytes
remaining to be set and the t0 register is to 0 - the number of
unaligned bytes remaining.
Because t0 is now is now subtracted from a2 rather than 0, the number of
bytes unset is reported incorrectly:
static int __init test_clear_user(void)
{
char *test;
int j, k;
pr_info("\n\n\nTesting clear_user\n");
test = vmalloc(PAGE_SIZE);
for (j = 256; j < 512; j++) {
if ((k = clear_user(test + PAGE_SIZE - 254, j)) != j - 254) {
pr_err("clear_user (%px %d) returned %d\n",
test + PAGE_SIZE - 254, j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
[ 3.976775] clear_user (c00000000000df02 256) returned 4
[ 3.981957] clear_user (c00000000000df02 257) returned 6
[ 3.986425] clear_user (c00000000000df02 258) returned 8
[ 3.990850] clear_user (c00000000000df02 259) returned 10
[ 3.995332] clear_user (c00000000000df02 260) returned 12
[ 3.999815] clear_user (c00000000000df02 261) returned 14
Fix this by ensuring that a2 is set to 0 during the set of final
unaligned bytes.
Signed-off-by: Matt Redfearn <matt.redfearn(a)mips.com>
Signed-off-by: Paul Burton <paul.burton(a)mips.com>
Fixes: 8c56208aff77 ("MIPS: lib: memset: Add MIPS R6 support")
Patchwork: https://patchwork.linux-mips.org/patch/19338/
Cc: James Hogan <jhogan(a)kernel.org>
Cc: Ralf Baechle <ralf(a)linux-mips.org>
Cc: linux-mips(a)linux-mips.org
Cc: linux-kernel(a)vger.kernel.org
Cc: stable(a)vger.kernel.org # v4.0+
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 1cc306520a55..fac26ce64b2f 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -195,6 +195,7 @@
#endif
#else
PTR_SUBU t0, $0, a2
+ move a2, zero /* No remaining longs */
PTR_ADDIU t0, 1
STORE_BYTE(0)
STORE_BYTE(1)
@@ -231,7 +232,7 @@
#ifdef CONFIG_CPU_MIPSR6
.Lbyte_fixup\@:
- PTR_SUBU a2, $0, t0
+ PTR_SUBU a2, t0
jr ra
PTR_ADDIU a2, 1
#endif /* CONFIG_CPU_MIPSR6 */
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From b1c03f1ef48d36ff28afb06e8f0c1233ef072f1d Mon Sep 17 00:00:00 2001
From: Matt Redfearn <matt.redfearn(a)mips.com>
Date: Wed, 23 May 2018 14:39:58 +0100
Subject: [PATCH] MIPS: memset.S: Fix byte_fixup for MIPSr6
The __clear_user function is defined to return the number of bytes that
could not be cleared. From the underlying memset / bzero implementation
this means setting register a2 to that number on return. Currently if a
page fault is triggered within the MIPSr6 version of setting of initial
unaligned bytes, the value loaded into a2 on return is meaningless.
During the MIPSr6 version of the initial unaligned bytes block, register
a2 contains the number of bytes to be set beyond the initial unaligned
bytes. The t0 register is initally set to the number of unaligned bytes
- STORSIZE, effectively a negative version of the number of unaligned
bytes. This is then incremented before each byte is saved.
The label .Lbyte_fixup\@ is jumped to on page fault. Currently the value
in a2 is incorrectly replaced by 0 - t0 + 1, effectively the number of
unaligned bytes remaining. This leads to the failures being reported by
the following test code:
static int __init test_clear_user(void)
{
int j, k;
pr_info("\n\n\nTesting clear_user\n");
for (j = 0; j < 512; j++) {
if ((k = clear_user(NULL+3, j)) != j) {
pr_err("clear_user (NULL %d) returned %d\n", j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
Which reports:
[ 3.965439] Testing clear_user
[ 3.973169] clear_user (NULL 8) returned 6
[ 3.976782] clear_user (NULL 9) returned 6
[ 3.980390] clear_user (NULL 10) returned 6
[ 3.984052] clear_user (NULL 11) returned 6
[ 3.987524] clear_user (NULL 12) returned 6
Fix this by subtracting t0 from a2 (rather than $0), effectivey giving:
unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
a2 = a2 - t0 + 1
This fixes the value returned from __clear user when the number of bytes
to set is > LONGSIZE and the address is invalid and unaligned.
Unfortunately, this breaks the fixup handling for unaligned bytes after
the final long, where register a2 still contains the number of bytes
remaining to be set and the t0 register is to 0 - the number of
unaligned bytes remaining.
Because t0 is now is now subtracted from a2 rather than 0, the number of
bytes unset is reported incorrectly:
static int __init test_clear_user(void)
{
char *test;
int j, k;
pr_info("\n\n\nTesting clear_user\n");
test = vmalloc(PAGE_SIZE);
for (j = 256; j < 512; j++) {
if ((k = clear_user(test + PAGE_SIZE - 254, j)) != j - 254) {
pr_err("clear_user (%px %d) returned %d\n",
test + PAGE_SIZE - 254, j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
[ 3.976775] clear_user (c00000000000df02 256) returned 4
[ 3.981957] clear_user (c00000000000df02 257) returned 6
[ 3.986425] clear_user (c00000000000df02 258) returned 8
[ 3.990850] clear_user (c00000000000df02 259) returned 10
[ 3.995332] clear_user (c00000000000df02 260) returned 12
[ 3.999815] clear_user (c00000000000df02 261) returned 14
Fix this by ensuring that a2 is set to 0 during the set of final
unaligned bytes.
Signed-off-by: Matt Redfearn <matt.redfearn(a)mips.com>
Signed-off-by: Paul Burton <paul.burton(a)mips.com>
Fixes: 8c56208aff77 ("MIPS: lib: memset: Add MIPS R6 support")
Patchwork: https://patchwork.linux-mips.org/patch/19338/
Cc: James Hogan <jhogan(a)kernel.org>
Cc: Ralf Baechle <ralf(a)linux-mips.org>
Cc: linux-mips(a)linux-mips.org
Cc: linux-kernel(a)vger.kernel.org
Cc: stable(a)vger.kernel.org # v4.0+
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 1cc306520a55..fac26ce64b2f 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -195,6 +195,7 @@
#endif
#else
PTR_SUBU t0, $0, a2
+ move a2, zero /* No remaining longs */
PTR_ADDIU t0, 1
STORE_BYTE(0)
STORE_BYTE(1)
@@ -231,7 +232,7 @@
#ifdef CONFIG_CPU_MIPSR6
.Lbyte_fixup\@:
- PTR_SUBU a2, $0, t0
+ PTR_SUBU a2, t0
jr ra
PTR_ADDIU a2, 1
#endif /* CONFIG_CPU_MIPSR6 */
The patch below does not apply to the 4.18-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 81365a947de453bcaba2558f8de5dadcffe05bc1 Mon Sep 17 00:00:00 2001
From: Masami Hiramatsu <mhiramat(a)kernel.org>
Date: Sat, 28 Apr 2018 21:36:02 +0900
Subject: [PATCH] kprobes: Show address of kprobes if kallsyms does
Show probed address in debugfs kprobe list file as same
as kallsyms does. This information is used for checking
kprobes are placed in the expected address. So it should
be able to compared with address in kallsyms.
Signed-off-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Cc: Ananth N Mavinakayanahalli <ananth(a)in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy(a)intel.com>
Cc: Arnd Bergmann <arnd(a)arndb.de>
Cc: David Howells <dhowells(a)redhat.com>
Cc: David S . Miller <davem(a)davemloft.net>
Cc: Heiko Carstens <heiko.carstens(a)de.ibm.com>
Cc: Jon Medhurst <tixy(a)linaro.org>
Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Thomas Richter <tmricht(a)linux.ibm.com>
Cc: Tobin C . Harding <me(a)tobin.cc>
Cc: Will Deacon <will.deacon(a)arm.com>
Cc: acme(a)kernel.org
Cc: akpm(a)linux-foundation.org
Cc: brueckner(a)linux.vnet.ibm.com
Cc: linux-arch(a)vger.kernel.org
Cc: rostedt(a)goodmis.org
Cc: schwidefsky(a)de.ibm.com
Cc: stable(a)vger.kernel.org
Link: https://lkml.kernel.org/lkml/152491896256.9916.1583733714492565296.stgit@de…
Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ab1bfa3d1d9c..1d6130d2937a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2226,19 +2226,23 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
const char *sym, int offset, char *modname, struct kprobe *pp)
{
char *kprobe_type;
+ void *addr = p->addr;
if (p->pre_handler == pre_handler_kretprobe)
kprobe_type = "r";
else
kprobe_type = "k";
+ if (!kallsyms_show_value())
+ addr = NULL;
+
if (sym)
- seq_printf(pi, "%p %s %s+0x%x %s ",
- p->addr, kprobe_type, sym, offset,
+ seq_printf(pi, "%px %s %s+0x%x %s ",
+ addr, kprobe_type, sym, offset,
(modname ? modname : " "));
- else
- seq_printf(pi, "%p %s %p ",
- p->addr, kprobe_type, p->addr);
+ else /* try to use %pS */
+ seq_printf(pi, "%px %s %pS ",
+ addr, kprobe_type, p->addr);
if (!pp)
pp = p;
The patch below does not apply to the 4.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 Mon Sep 17 00:00:00 2001
From: Andi Kleen <ak(a)linux.intel.com>
Date: Fri, 24 Aug 2018 10:03:50 -0700
Subject: [PATCH] x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+
On Nehalem and newer core CPUs the CPU cache internally uses 44 bits
physical address space. The L1TF workaround is limited by this internal
cache address width, and needs to have one bit free there for the
mitigation to work.
Older client systems report only 36bit physical address space so the range
check decides that L1TF is not mitigated for a 36bit phys/32GB system with
some memory holes.
But since these actually have the larger internal cache width this warning
is bogus because it would only really be needed if the system had more than
43bits of memory.
Add a new internal x86_cache_bits field. Normally it is the same as the
physical bits field reported by CPUID, but for Nehalem and newerforce it to
be at least 44bits.
Change the L1TF memory size warning to use the new cache_bits field to
avoid bogus warnings and remove the bogus comment about memory size.
Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf")
Reported-by: George Anchev <studio(a)anchev.net>
Reported-by: Christopher Snowhill <kode54(a)gmail.com>
Signed-off-by: Andi Kleen <ak(a)linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: x86(a)kernel.org
Cc: linux-kernel(a)vger.kernel.org
Cc: Michael Hocko <mhocko(a)suse.com>
Cc: vbabka(a)suse.cz
Cc: stable(a)vger.kernel.org
Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c24297268ebc..d53c54b842da 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
+ /* Address space bits used by the cache internally */
+ u8 x86_cache_bits;
unsigned initialized : 1;
} __randomize_layout;
@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
static inline unsigned long long l1tf_pfn_limit(void)
{
- return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
+ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
}
extern void early_cpu_init(void);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4c2313d0b9ca..40bdaea97fe7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+/*
+ * These CPUs all support 44bits physical address space internally in the
+ * cache but CPUID can report a smaller number of physical address bits.
+ *
+ * The L1TF mitigation uses the top most address bit for the inversion of
+ * non present PTEs. When the installed memory reaches into the top most
+ * address bit due to memory holes, which has been observed on machines
+ * which report 36bits physical address bits and have 32G RAM installed,
+ * then the mitigation range check in l1tf_select_mitigation() triggers.
+ * This is a false positive because the mitigation is still possible due to
+ * the fact that the cache uses 44bit internally. Use the cache bits
+ * instead of the reported physical bits and adjust them on the affected
+ * machines to 44bit if the reported bits are less than 44.
+ */
+static void override_cache_bits(struct cpuinfo_x86 *c)
+{
+ if (c->x86 != 6)
+ return;
+
+ switch (c->x86_model) {
+ case INTEL_FAM6_NEHALEM:
+ case INTEL_FAM6_WESTMERE:
+ case INTEL_FAM6_SANDYBRIDGE:
+ case INTEL_FAM6_IVYBRIDGE:
+ case INTEL_FAM6_HASWELL_CORE:
+ case INTEL_FAM6_HASWELL_ULT:
+ case INTEL_FAM6_HASWELL_GT3E:
+ case INTEL_FAM6_BROADWELL_CORE:
+ case INTEL_FAM6_BROADWELL_GT3E:
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
+ if (c->x86_cache_bits < 44)
+ c->x86_cache_bits = 44;
+ break;
+ }
+}
+
static void __init l1tf_select_mitigation(void)
{
u64 half_pa;
@@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return;
+ override_cache_bits(&boot_cpu_data);
+
switch (l1tf_mitigation) {
case L1TF_MITIGATION_OFF:
case L1TF_MITIGATION_FLUSH_NOWARN:
@@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void)
return;
#endif
- /*
- * This is extremely unlikely to happen because almost all
- * systems have far more MAX_PA/2 than RAM can be fit into
- * DIMM slots.
- */
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 84dee5ab745a..44c4ef3d989b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
c->x86_phys_bits = 36;
#endif
+ c->x86_cache_bits = c->x86_phys_bits;
}
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
From: Ulf Hansson <ulf.hansson(a)linaro.org>
In case the PM domain fails to be powered on in genpd_dev_pm_attach(), it
returns -EPROBE_DEFER, but keeping the device attached to its PM domain.
This leads to problems when the next attempt to attach is re-tried. More
precisely, in that situation an -EEXIST error code is returned, because the
device already has its PM domain pointer assigned, from the first attempt.
Now, because of the sloppy error handling by the existing callers of
dev_pm_domain_attach(), probing is allowed to continue when -EEXIST is
returned. However, in such case there are no guarantees that the PM domain
is powered on by genpd, which may lead to hangs when buses/drivers tried to
access their devices.
Let's fix this behaviour, simply by detaching the device when powering on
fails in genpd_dev_pm_attach().
Cc: v4.11+ <stable(a)vger.kernel.org> # v4.11+
Signed-off-by: Ulf Hansson <ulf.hansson(a)linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki(a)intel.com>
(cherry picked from commit 72038df3c580c4c326b83c86149d7ac34007532a)
Cherry-picked to imx_4.9.y with minimal conflicts so that we can
properly handle errors from SCFW pm calls
Signed-off-by: Leonard Crestez <leonard.crestez(a)nxp.com>
---
drivers/base/power/domain.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9c3e535795a0..d9681d372997 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1936,10 +1936,13 @@ int genpd_dev_pm_attach(struct device *dev)
dev->pm_domain->sync = genpd_dev_pm_sync;
mutex_lock(&pd->lock);
ret = genpd_poweron(pd, 0);
mutex_unlock(&pd->lock);
+
+ if (ret)
+ genpd_remove_device(pd, dev);
out:
return ret ? -EPROBE_DEFER : 0;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
--
2.17.1
The steps taken by usb core to set a new interface is very different from
what is done on the xHC host side.
xHC hardware will do everything in one go. One command is used to set up
new endpoints, free old endpoints, check bandwidth, and run the new
endpoints.
All this is done by xHC when usb core asks the hcd to check for
available bandwidth. At this point usb core has not yet flushed the old
endpoints, which will cause use-after-free issues in xhci driver as
queued URBs are cancelled on a re-allocated endpoint.
To resolve this add a call to usb_disable_interface() which will flush
the endpoints before calling usb_hcd_alloc_bandwidth()
Additional checks in xhci driver will also be implemented to gracefully
handle stale URB cancel on freed and re-allocated endpoints
Cc: <stable(a)vger.kernel.org>
Reported-by: Sudip Mukherjee <sudipm.mukherjee(a)gmail.com>
Signed-off-by: Mathias Nyman <mathias.nyman(a)linux.intel.com>
---
drivers/usb/core/message.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 228672f..304bef2 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1377,6 +1377,13 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
return -EINVAL;
}
+ /*
+ * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
+ * including freeing dropped endpoint ring buffers.
+ * Make sure the interface endpoints are flushed before that
+ */
+ usb_disable_interface(dev, iface, false);
+
/* Make sure we have enough bandwidth for this alternate interface.
* Remove the current alt setting and add the new alt setting.
*/
--
2.7.4
Hi,
I would like to take the chance to speak with the person who handle your
images in your company?
Our ability of imaging editing.
Cutting out for your images
Clipping path for your images
Masking for your images
Retouching for your images
Retouching for the Portraits images
We have 17 photo editors in house and daily basis 700 images can be done.
We can give testing for your photos if you send us one or two to start
working.
Thanks,
Jason Jones
The patch below does not apply to the 4.4-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From d814a49198eafa6163698bdd93961302f3a877a4 Mon Sep 17 00:00:00 2001
From: Ethan Lien <ethanlien(a)synology.com>
Date: Mon, 2 Jul 2018 15:44:58 +0800
Subject: [PATCH] btrfs: use correct compare function of dirty_metadata_bytes
We use customized, nodesize batch value to update dirty_metadata_bytes.
We should also use batch version of compare function or we will easily
goto fast path and get false result from percpu_counter_compare().
Fixes: e2d845211eda ("Btrfs: use percpu counter for dirty metadata count")
CC: stable(a)vger.kernel.org # 4.4+
Signed-off-by: Ethan Lien <ethanlien(a)synology.com>
Reviewed-by: Nikolay Borisov <nborisov(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 6023eed3e805..e3858b2fe014 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -959,8 +959,9 @@ static int btree_writepages(struct address_space *mapping,
fs_info = BTRFS_I(mapping->host)->root->fs_info;
/* this is a bit racy, but that's ok */
- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
- BTRFS_DIRTY_METADATA_THRESH);
+ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH,
+ fs_info->dirty_metadata_batch);
if (ret < 0)
return 0;
}
@@ -4134,8 +4135,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
if (flush_delayed)
btrfs_balance_delayed_items(fs_info);
- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
- BTRFS_DIRTY_METADATA_THRESH);
+ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH,
+ fs_info->dirty_metadata_batch);
if (ret > 0) {
balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping);
}
commit 4c4a39dd5fe2d13e2d2fa5fceb8ef95d19fc389a upstream
If there is a mismatch in the I/D min line size, we must
always use the system wide safe value both in applications
and in the kernel, while performing cache operations. However,
we have been checking more bits than just the min line sizes,
which triggers false negatives. We may need to trap the user
accesses in such cases, but not necessarily patch the kernel.
This patch fixes the check to do the right thing as advertised.
A new capability will be added to check mismatches in other
fields and ensure we trap the CTR accesses.
Fixes: be68a8aaf925 ("arm64: cpufeature: Fix CTR_EL0 field definitions")
Cc: <stable(a)vger.kernel.org> # v4.14
Cc: Mark Rutland <mark.rutland(a)arm.com>
Cc: Catalin Marinas <catalin.marinas(a)arm.com>
Reported-by: Will Deacon <will.deacon(a)arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose(a)arm.com>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
---
arch/arm64/include/asm/cache.h | 5 +++++
arch/arm64/kernel/cpu_errata.c | 6 ++++--
arch/arm64/kernel/cpufeature.c | 4 ++--
3 files changed, 11 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index ea9bb4e..e40f8a2 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -20,9 +20,14 @@
#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
+#define CTR_DMINLINE_SHIFT 16
+#define CTR_IMINLINE_SHIFT 0
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
+#define CTR_CACHE_MINLINE_MASK \
+ (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
+
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
#define ICACHE_POLICY_VPIPT 0
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index eccdb28..3d1569c 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -48,9 +48,11 @@ static bool
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
int scope)
{
+ u64 mask = CTR_CACHE_MINLINE_MASK;
+
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
- (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
+ return (read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask);
}
static int cpu_enable_trap_ctr_access(void *__unused)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 376cf12..003dd39 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -180,14 +180,14 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
* make use of *minLine.
* If we have differing I-cache policies, report it as the weakest - VIPT.
*/
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
ARM64_FTR_END,
};
--
2.7.4