Hi,
[This is an automated email]
This commit has been processed by the -stable helper bot and determined
to be a high probability candidate for -stable trees. (score: 20.8863)
The bot has tested the following trees: v4.16, v4.15.15, v4.14.32, v4.9.92, v4.4.126.
v4.16: Build OK!
v4.15.15: Build OK!
v4.14.32: Build OK!
v4.9.92: Failed to apply! Possible dependencies:
94116f8126de ("ACPI: Switch to use generic guid_t in acpi_evaluate_dsm()")
v4.4.126: Failed to apply! Possible dependencies:
94116f8126de ("ACPI: Switch to use generic guid_t in acpi_evaluate_dsm()")
Please let us know if you'd like to have this patch included in a stable tree.
--
Thanks,
Sasha
Enabling virtual mapped kernel stacks breaks the thunderx_zip
driver. On compression or decompression the executing CPU hangs
in an endless loop. The reason for this is the usage of __pa
by the driver which does no longer work for an address that is
not part of the 1:1 mapping.
The zip driver allocates a result struct on the stack and needs
to tell the hardware the physical address within this struct
that is used to signal the completion of the request.
As the hardware gets the wrong address after the broken __pa
conversion it writes to an arbitrary address. The zip driver then
waits forever for the completion byte to contain a non-zero value.
Allocating the result struct from 1:1 mapped memory resolves this
bug.
Signed-off-by: Jan Glauber <jglauber(a)cavium.com>
Reviewed-by: Robert Richter <rrichter(a)cavium.com>
Cc: stable <stable(a)vger.kernel.org> # 4.14
---
drivers/crypto/cavium/zip/zip_crypto.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c
index 8df4d26cf9d4..b92b6e7e100f 100644
--- a/drivers/crypto/cavium/zip/zip_crypto.c
+++ b/drivers/crypto/cavium/zip/zip_crypto.c
@@ -124,7 +124,7 @@ int zip_compress(const u8 *src, unsigned int slen,
struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *zip_ops = NULL;
- struct zip_state zip_state;
+ struct zip_state *zip_state;
struct zip_device *zip = NULL;
int ret;
@@ -135,20 +135,23 @@ int zip_compress(const u8 *src, unsigned int slen,
if (!zip)
return -ENODEV;
- memset(&zip_state, 0, sizeof(struct zip_state));
+ zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
+ if (!zip_state)
+ return -ENOMEM;
+
zip_ops = &zip_ctx->zip_comp;
zip_ops->input_len = slen;
zip_ops->output_len = *dlen;
memcpy(zip_ops->input, src, slen);
- ret = zip_deflate(zip_ops, &zip_state, zip);
+ ret = zip_deflate(zip_ops, zip_state, zip);
if (!ret) {
*dlen = zip_ops->output_len;
memcpy(dst, zip_ops->output, *dlen);
}
-
+ kfree(zip_state);
return ret;
}
@@ -157,7 +160,7 @@ int zip_decompress(const u8 *src, unsigned int slen,
struct zip_kernel_ctx *zip_ctx)
{
struct zip_operation *zip_ops = NULL;
- struct zip_state zip_state;
+ struct zip_state *zip_state;
struct zip_device *zip = NULL;
int ret;
@@ -168,7 +171,10 @@ int zip_decompress(const u8 *src, unsigned int slen,
if (!zip)
return -ENODEV;
- memset(&zip_state, 0, sizeof(struct zip_state));
+ zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC);
+ if (!zip_state)
+ return -ENOMEM;
+
zip_ops = &zip_ctx->zip_decomp;
memcpy(zip_ops->input, src, slen);
@@ -179,13 +185,13 @@ int zip_decompress(const u8 *src, unsigned int slen,
zip_ops->input_len = slen;
zip_ops->output_len = *dlen;
- ret = zip_inflate(zip_ops, &zip_state, zip);
+ ret = zip_inflate(zip_ops, zip_state, zip);
if (!ret) {
*dlen = zip_ops->output_len;
memcpy(dst, zip_ops->output, *dlen);
}
-
+ kfree(zip_state);
return ret;
}
--
2.16.2
Hi,
[This is an automated email]
This commit has been processed by the -stable helper bot and determined
to be a high probability candidate for -stable trees. (score: 33.5930)
The bot has tested the following trees: v4.16, v4.15.15, v4.14.32, v4.9.92, v4.4.126.
v4.16: Build OK!
v4.15.15: Build OK!
v4.14.32: Build OK!
v4.9.92: Build OK!
v4.4.126: Build OK!
Please let us know if you'd like to have this patch included in a stable tree.
--
Thanks,
Sasha
Hi Parag,
On Sun, 8 Apr 2018 21:21:19 -0400, Parag Warudkar wrote:
> On Sun, Apr 8, 2018 at 8:18 PM, Sasha Levin <Alexander.Levin(a)microsoft.com>
> wrote:
>
> > From: Jean Delvare <jdelvare(a)suse.de>
> >
> > [ Upstream commit a7770ae194569e96a93c48aceb304edded9cc648 ]
> >
> > [snip]
> >
>
>
> > * Strings starting with 8 spaces are all considered empty, even if
> > non-space characters follow (sounds like a weird thing to do, but
> > I have actually seen occurrences of this in DMI tables before.)
> >
>
> Unless I am misreading the patch we will now allocate memory for
> len(spaces)+len(string) for this case.
Correct.
> Have you seen BIOSes with lots of <space>string occurrences?
A fair number, yes, but most of them were thankfully not in strings we
are saving. Also note that most of them only have 1 or 2 leading
spaces, not 8+, so this commit makes no difference for them.
> If so what's the point of allocating memory for the leading spaces?
Presented like that, it sounds like we are silly. But look at things
the other way around: what's the point of storing these leading spaces
in the DMI table in the first place? Whenever this happens, the
hardware manufacturer is to blame, not us.
As much as possible, we should stick to the specification and assume
the other party does as well. Stripping the leading spaces would be
trivial, but hiding their mistakes does not help hardware manufacturers
in the long run anyway. If they can't see that it's wrong, it's harder
for them to fix it.
> IOW, what happens if we strip leading space before allocating?
At boot time, we save a few bytes of memory, on the affected systems
only. No code cost, but possibly some confusion as to why we strip
leading spaces (which are rare) but not trailing spaces (which in my
experience are more frequent.)
At run time, the exact matching of the affected DMI strings would be
less error-prone (although strings having leading spaces are likely to
also have trailing spaces, annihilating the benefits.) Non-exact
matching would be marginally faster, again only for the affected
systems.
As a conclusion, it's doable, but the benefit is very small and limited
to a few broken systems, and it has the downside of not discouraging
low-quality tables, so my position is that it's not worth it and not
desirable.
--
Jean Delvare
SUSE L3 Support
This is a note to let you know that I've just added the patch titled
x86/gart: Exclude GART aperture from vmcore
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-gart-exclude-gart-aperture-from-vmcore.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Mon Apr 9 13:58:16 CEST 2018
From: Jiri Bohac <jbohac(a)suse.cz>
Date: Sat, 6 Jan 2018 02:00:13 +0100
Subject: x86/gart: Exclude GART aperture from vmcore
From: Jiri Bohac <jbohac(a)suse.cz>
[ Upstream commit 2a3e83c6f96c513f43ce5a8c9034608ea584a255 ]
On machines where the GART aperture is mapped over physical RAM
/proc/vmcore contains the remapped range and reading it may cause hangs or
reboots.
In the past, the GART region was added into the resource map, implemented
by commit 56dd669a138c ("[PATCH] Insert GART region into resource map")
However, inserting the iomem_resource from the early GART code caused
resource conflicts with some AGP drivers (bko#72201), which got avoided by
reverting the patch in commit 707d4eefbdb3 ("Revert [PATCH] Insert GART
region into resource map"). This revert introduced the /proc/vmcore bug.
The vmcore ELF header is either prepared by the kernel (when using the
kexec_file_load syscall) or by the kexec userspace (when using the kexec_load
syscall). Since we no longer have the GART iomem resource, the userspace
kexec has no way of knowing which region to exclude from the ELF header.
Changes from v1 of this patch:
Instead of excluding the aperture from the ELF header, this patch
makes /proc/vmcore return zeroes in the second kernel when attempting to
read the aperture region. This is done by reusing the
gart_oldmem_pfn_is_ram infrastructure originally intended to exclude XEN
balooned memory. This works for both, the kexec_file_load and kexec_load
syscalls.
[Note that the GART region is the same in the first and second kernels:
regardless whether the first kernel fixed up the northbridge/bios setting
and mapped the aperture over physical memory, the second kernel finds the
northbridge properly configured by the first kernel and the aperture
never overlaps with e820 memory because the second kernel has a fake e820
map created from the crashkernel memory regions. Thus, the second kernel
keeps the aperture address/size as configured by the first kernel.]
register_oldmem_pfn_is_ram can only register one callback and returns an error
if the callback has been registered already. Since XEN used to be the only user
of this function, it never checks the return value. Now that we have more than
one user, I added a WARN_ON just in case agp, XEN, or any other future user of
register_oldmem_pfn_is_ram were to step on each other's toes.
Fixes: 707d4eefbdb3 ("Revert [PATCH] Insert GART region into resource map")
Signed-off-by: Jiri Bohac <jbohac(a)suse.cz>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Baoquan He <bhe(a)redhat.com>
Cc: Toshi Kani <toshi.kani(a)hpe.com>
Cc: David Airlie <airlied(a)linux.ie>
Cc: yinghai(a)kernel.org
Cc: joro(a)8bytes.org
Cc: kexec(a)lists.infradead.org
Cc: Borislav Petkov <bp(a)alien8.de>
Cc: Bjorn Helgaas <bhelgaas(a)google.com>
Cc: Dave Young <dyoung(a)redhat.com>
Cc: Vivek Goyal <vgoyal(a)redhat.com>
Link: https://lkml.kernel.org/r/20180106010013.73suskgxm7lox7g6@dwarf.suse.cz
Signed-off-by: Sasha Levin <alexander.levin(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/kernel/aperture_64.c | 46 +++++++++++++++++++++++++++++++++++++++++-
arch/x86/xen/mmu_hvm.c | 2 -
2 files changed, 46 insertions(+), 2 deletions(-)
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -30,6 +30,7 @@
#include <asm/dma.h>
#include <asm/amd_nb.h>
#include <asm/x86_init.h>
+#include <linux/crash_dump.h>
/*
* Using 512M as goal, in case kexec will load kernel_big
@@ -56,6 +57,33 @@ int fallback_aper_force __initdata;
int fix_aperture __initdata = 1;
+#ifdef CONFIG_PROC_VMCORE
+/*
+ * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
+ * use the same range because it will remain configured in the northbridge.
+ * Trying to dump this area via /proc/vmcore may crash the machine, so exclude
+ * it from vmcore.
+ */
+static unsigned long aperture_pfn_start, aperture_page_count;
+
+static int gart_oldmem_pfn_is_ram(unsigned long pfn)
+{
+ return likely((pfn < aperture_pfn_start) ||
+ (pfn >= aperture_pfn_start + aperture_page_count));
+}
+
+static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+{
+ aperture_pfn_start = aper_base >> PAGE_SHIFT;
+ aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
+ WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram));
+}
+#else
+static void exclude_from_vmcore(u64 aper_base, u32 aper_order)
+{
+}
+#endif
+
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
@@ -435,8 +463,16 @@ int __init gart_iommu_hole_init(void)
out:
if (!fix && !fallback_aper_force) {
- if (last_aper_base)
+ if (last_aper_base) {
+ /*
+ * If this is the kdump kernel, the first kernel
+ * may have allocated the range over its e820 RAM
+ * and fixed up the northbridge
+ */
+ exclude_from_vmcore(last_aper_base, last_aper_order);
+
return 1;
+ }
return 0;
}
@@ -473,6 +509,14 @@ out:
return 0;
}
+ /*
+ * If this is the kdump kernel _and_ the first kernel did not
+ * configure the aperture in the northbridge, this range may
+ * overlap with the first kernel's memory. We can't access the
+ * range through vmcore even though it should be part of the dump.
+ */
+ exclude_from_vmcore(aper_alloc, aper_order);
+
/* Fix up the north bridges */
for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
int bus, dev_base, dev_limit;
--- a/arch/x86/xen/mmu_hvm.c
+++ b/arch/x86/xen/mmu_hvm.c
@@ -75,6 +75,6 @@ void __init xen_hvm_init_mmu_ops(void)
if (is_pagetable_dying_supported())
pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
#ifdef CONFIG_PROC_VMCORE
- register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
+ WARN_ON(register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram));
#endif
}
Patches currently in stable-queue which might be from jbohac(a)suse.cz are
queue-4.14/x86-gart-exclude-gart-aperture-from-vmcore.patch
This is a note to let you know that I've just added the patch titled
wl1251: check return from call to wl1251_acx_arp_ip_filter
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
wl1251-check-return-from-call-to-wl1251_acx_arp_ip_filter.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Mon Apr 9 13:58:16 CEST 2018
From: Colin Ian King <colin.king(a)canonical.com>
Date: Tue, 26 Dec 2017 17:33:18 +0000
Subject: wl1251: check return from call to wl1251_acx_arp_ip_filter
From: Colin Ian King <colin.king(a)canonical.com>
[ Upstream commit ac1181c60822292176ab96912208ec9f9819faf8 ]
Currently the less than zero error check on ret is incorrect
as it is checking a far earlier ret assignment rather than the
return from the call to wl1251_acx_arp_ip_filter. Fix this by
adding in the missing assginment.
Detected by CoverityScan, CID#1164835 ("Logically dead code")
Fixes: 204cc5c44fb6 ("wl1251: implement hardware ARP filtering")
Signed-off-by: Colin Ian King <colin.king(a)canonical.com>
Signed-off-by: Kalle Valo <kvalo(a)codeaurora.org>
Signed-off-by: Sasha Levin <alexander.levin(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/net/wireless/ti/wl1251/main.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(s
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
- wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+ ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
Patches currently in stable-queue which might be from colin.king(a)canonical.com are
queue-4.14/wl1251-check-return-from-call-to-wl1251_acx_arp_ip_filter.patch
This is a note to let you know that I've just added the patch titled
watchdog: dw_wdt: add stop watchdog operation
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
watchdog-dw_wdt-add-stop-watchdog-operation.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Mon Apr 9 13:58:16 CEST 2018
From: Oleksij Rempel <o.rempel(a)pengutronix.de>
Date: Tue, 26 Sep 2017 08:11:22 +0200
Subject: watchdog: dw_wdt: add stop watchdog operation
From: Oleksij Rempel <o.rempel(a)pengutronix.de>
[ Upstream commit 1bfe8889380890efe4943d125124f5a7b48571b0 ]
The only way of stopping the watchdog is by resetting it.
Add the watchdog op for stopping the device and reset if
a reset line is provided.
At same time WDOG_HW_RUNNING should be remove from dw_wdt_start.
As commented by Guenter Roeck:
dw_wdt sets WDOG_HW_RUNNING in its open function. Result is
that the kref_get() in watchdog_open() won't be executed. But then
kref_put() in close will be called since the watchdog now does stop.
This causes the imbalance.
Signed-off-by: Oleksij Rempel <o.rempel(a)pengutronix.de>
Cc: Wim Van Sebroeck <wim(a)iguana.be>
Cc: Guenter Roeck <linux(a)roeck-us.net>
Cc: linux-watchdog(a)vger.kernel.org
Reviewed-by: Guenter Roeck <linux(a)roeck-us.net>
Signed-off-by: Guenter Roeck <linux(a)roeck-us.net>
Signed-off-by: Wim Van Sebroeck <wim(a)iguana.be>
Signed-off-by: Sasha Levin <alexander.levin(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/watchdog/dw_wdt.c | 18 ++++++++++++++++--
1 file changed, 16 insertions(+), 2 deletions(-)
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -127,14 +127,27 @@ static int dw_wdt_start(struct watchdog_
dw_wdt_set_timeout(wdd, wdd->timeout);
- set_bit(WDOG_HW_RUNNING, &wdd->status);
-
writel(WDOG_CONTROL_REG_WDT_EN_MASK,
dw_wdt->regs + WDOG_CONTROL_REG_OFFSET);
return 0;
}
+static int dw_wdt_stop(struct watchdog_device *wdd)
+{
+ struct dw_wdt *dw_wdt = to_dw_wdt(wdd);
+
+ if (!dw_wdt->rst) {
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ return 0;
+ }
+
+ reset_control_assert(dw_wdt->rst);
+ reset_control_deassert(dw_wdt->rst);
+
+ return 0;
+}
+
static int dw_wdt_restart(struct watchdog_device *wdd,
unsigned long action, void *data)
{
@@ -173,6 +186,7 @@ static const struct watchdog_info dw_wdt
static const struct watchdog_ops dw_wdt_ops = {
.owner = THIS_MODULE,
.start = dw_wdt_start,
+ .stop = dw_wdt_stop,
.ping = dw_wdt_ping,
.set_timeout = dw_wdt_set_timeout,
.get_timeleft = dw_wdt_get_timeleft,
Patches currently in stable-queue which might be from o.rempel(a)pengutronix.de are
queue-4.14/watchdog-dw_wdt-add-stop-watchdog-operation.patch
This is a note to let you know that I've just added the patch titled
VFS: close race between getcwd() and d_move()
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
vfs-close-race-between-getcwd-and-d_move.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From foo@baz Mon Apr 9 13:58:16 CEST 2018
From: NeilBrown <neilb(a)suse.com>
Date: Fri, 10 Nov 2017 15:45:41 +1100
Subject: VFS: close race between getcwd() and d_move()
From: NeilBrown <neilb(a)suse.com>
[ Upstream commit 61647823aa920e395afcce4b57c32afb51456cab ]
d_move() will call __d_drop() and then __d_rehash()
on the dentry being moved. This creates a small window
when the dentry appears to be unhashed. Many tests
of d_unhashed() are made under ->d_lock and so are safe
from racing with this window, but some aren't.
In particular, getcwd() calls d_unlinked() (which calls
d_unhashed()) without d_lock protection, so it can race.
This races has been seen in practice with lustre, which uses d_move() as
part of name lookup. See:
https://jira.hpdd.intel.com/browse/LU-9735
It could race with a regular rename(), and result in ENOENT instead
of either the 'before' or 'after' name.
The race can be demonstrated with a simple program which
has two threads, one renaming a directory back and forth
while another calls getcwd() within that directory: it should never
fail, but does. See:
https://patchwork.kernel.org/patch/9455345/
We could fix this race by taking d_lock and rechecking when
d_unhashed() reports true. Alternately when can remove the window,
which is the approach this patch takes.
___d_drop() is introduce which does *not* clear d_hash.pprev
so the dentry still appears to be hashed. __d_drop() calls
___d_drop(), then clears d_hash.pprev.
__d_move() now uses ___d_drop() and only clears d_hash.pprev
when not rehashing.
Signed-off-by: NeilBrown <neilb(a)suse.com>
Signed-off-by: Al Viro <viro(a)zeniv.linux.org.uk>
Signed-off-by: Sasha Levin <alexander.levin(a)microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
fs/dcache.c | 23 ++++++++++++++++-------
1 file changed, 16 insertions(+), 7 deletions(-)
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -468,9 +468,11 @@ static void dentry_lru_add(struct dentry
* d_drop() is used mainly for stuff that wants to invalidate a dentry for some
* reason (NFS timeouts or autofs deletes).
*
- * __d_drop requires dentry->d_lock.
+ * __d_drop requires dentry->d_lock
+ * ___d_drop doesn't mark dentry as "unhashed"
+ * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
*/
-void __d_drop(struct dentry *dentry)
+static void ___d_drop(struct dentry *dentry)
{
if (!d_unhashed(dentry)) {
struct hlist_bl_head *b;
@@ -486,12 +488,17 @@ void __d_drop(struct dentry *dentry)
hlist_bl_lock(b);
__hlist_bl_del(&dentry->d_hash);
- dentry->d_hash.pprev = NULL;
hlist_bl_unlock(b);
/* After this call, in-progress rcu-walk path lookup will fail. */
write_seqcount_invalidate(&dentry->d_seq);
}
}
+
+void __d_drop(struct dentry *dentry)
+{
+ ___d_drop(dentry);
+ dentry->d_hash.pprev = NULL;
+}
EXPORT_SYMBOL(__d_drop);
void d_drop(struct dentry *dentry)
@@ -2386,7 +2393,7 @@ EXPORT_SYMBOL(d_delete);
static void __d_rehash(struct dentry *entry)
{
struct hlist_bl_head *b = d_hash(entry->d_name.hash);
- BUG_ON(!d_unhashed(entry));
+
hlist_bl_lock(b);
hlist_bl_add_head_rcu(&entry->d_hash, b);
hlist_bl_unlock(b);
@@ -2821,9 +2828,9 @@ static void __d_move(struct dentry *dent
write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
/* unhash both */
- /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
- __d_drop(dentry);
- __d_drop(target);
+ /* ___d_drop does write_seqcount_barrier, but they're OK to nest. */
+ ___d_drop(dentry);
+ ___d_drop(target);
/* Switch the names.. */
if (exchange)
@@ -2835,6 +2842,8 @@ static void __d_move(struct dentry *dent
__d_rehash(dentry);
if (exchange)
__d_rehash(target);
+ else
+ target->d_hash.pprev = NULL;
/* ... and switch them in the tree */
if (IS_ROOT(dentry)) {
Patches currently in stable-queue which might be from neilb(a)suse.com are
queue-4.14/staging-lustre-disable-preempt-while-sampling-processor-id.patch
queue-4.14/vfs-close-race-between-getcwd-and-d_move.patch