Detect when a directory entry is (possibly partially) beyond directory
size and return EIO in that case since it means the filesystem is
corrupted. Otherwise directory operations can further corrupt the
directory and possibly also oops the kernel.
CC: Anatoly Trosinenko <anatoly.trosinenko(a)gmail.com>
CC: stable(a)vger.kernel.org
Reported-by: Anatoly Trosinenko <anatoly.trosinenko(a)gmail.com>
Signed-off-by: Jan Kara <jack(a)suse.cz>
---
fs/udf/directory.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 0a98a2369738..3835f983cc99 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -152,6 +152,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
sizeof(struct fileIdentDesc));
}
}
+ /* Got last entry outside of dir size - fs is corrupted! */
+ if (*nf_pos > dir->i_size)
+ return NULL;
return fi;
}
--
2.16.4
SysRq-L and RCU stall detector call arch_trigger_cpumask_backtrace() to
trigger other CPU's backtrace, but its behavior is totally broken. The
root cause is arch_trigger_cpumask_backtrace() use call-function IPI in
irq context, which trigger deadlocks in smp_call_function_single() and
smp_call_function_many().
This patch fix arch_trigger_cpumask_backtrace() by:
1, Use a dedecated IPI (SMP_CPU_BACKTRACE) to trigger backtraces;
2, If myself is in target cpumask, do backtrace and clear myself;
3, Use a spinlock to avoid parallel backtrace output;
4, Handle SMP_CPU_BACKTRACE IPI for Loongson-3.
I have attempted to implement SMP_CPU_BACKTRACE for all MIPS CPUs, but I
failed because some of their IPIs are not extensible. :(
Cc: stable(a)vger.kernel.org
Signed-off-by: Huacai Chen <chenhc(a)lemote.com>
---
arch/mips/include/asm/smp.h | 3 +++
arch/mips/kernel/process.c | 23 ++++++++++++++++++-----
arch/mips/loongson64/loongson-3/smp.c | 6 ++++++
3 files changed, 27 insertions(+), 5 deletions(-)
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 88ebd83..b0521f4 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -43,6 +43,7 @@ extern int __cpu_logical_map[NR_CPUS];
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4
#define SMP_ASK_C0COUNT 0x8
+#define SMP_CPU_BACKTRACE 0x10
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;
@@ -81,6 +82,8 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void);
#endif
+void arch_dump_stack(void);
+
/*
* This function will set up the necessary IPIs for Linux to communicate
* with the CPUs in mask.
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 57028d4..647e15d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -655,26 +655,39 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ALMASK;
}
-static void arch_dump_stack(void *info)
+void arch_dump_stack(void)
{
struct pt_regs *regs;
+ static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ arch_spin_lock(&lock);
regs = get_irq_regs();
if (regs)
show_regs(regs);
dump_stack();
+ arch_spin_unlock(&lock);
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
long this_cpu = get_cpu();
+ struct cpumask backtrace_mask;
+ extern const struct plat_smp_ops *mp_ops;
+
+ cpumask_copy(&backtrace_mask, mask);
+ if (cpumask_test_cpu(this_cpu, mask)) {
+ if (!exclude_self) {
+ struct pt_regs *regs = get_irq_regs();
+ if (regs)
+ show_regs(regs);
+ dump_stack();
+ }
+ cpumask_clear_cpu(this_cpu, &backtrace_mask);
+ }
- if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
- dump_stack();
-
- smp_call_function_many(mask, arch_dump_stack, NULL, 1);
+ mp_ops->send_ipi_mask(&backtrace_mask, SMP_CPU_BACKTRACE);
put_cpu();
}
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 8501109..0655114 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -291,6 +291,12 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
__wbflush(); /* Let others see the result ASAP */
}
+ if (action & SMP_CPU_BACKTRACE) {
+ irq_enter();
+ arch_dump_stack();
+ irq_exit();
+ }
+
if (irqs) {
int irq;
while ((irq = ffs(irqs))) {
--
2.7.0
The patch titled
Subject: slub: fix failure when we delete and create a slab cache
has been added to the -mm tree. Its filename is
slub-fix-failure-when-we-delete-and-create-a-slab-cache.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/slub-fix-failure-when-we-delete-an…
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/slub-fix-failure-when-we-delete-an…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Mikulas Patocka <mpatocka(a)redhat.com>
Subject: slub: fix failure when we delete and create a slab cache
In kernel 4.17 I removed some code from dm-bufio that did slab cache
merging (21bb13276768) - both slab and slub support merging caches with
identical attributes, so dm-bufio now just calls kmem_cache_create and
relies on implicit merging.
This uncovered a bug in the slub subsystem - if we delete a cache and
immediatelly create another cache with the same attributes, it fails
because of duplicate filename in /sys/kernel/slab/. The slub subsystem
offloads freeing the cache to a workqueue - and if we create the new cache
before the workqueue runs, it complains because of duplicate filename in
sysfs.
This patch fixes the bug by moving the call of kobject_del from
sysfs_slab_remove_workfn to shutdown_cache. kobject_del must be called
while we hold slab_mutex - so that the sysfs entry is deleted before a
cache with the same attributes could be created.
Running device-mapper-test-suite with:
dmtest run --suite thin-provisioning -n /commit_failure_causes_fallback/
triggers:
[ 119.618958] Buffer I/O error on dev dm-0, logical block 1572848, async page read
[ 119.686224] device-mapper: thin: 253:1: metadata operation 'dm_pool_alloc_data_block' failed: error = -5
[ 119.695821] device-mapper: thin: 253:1: aborting current metadata transaction
[ 119.703255] sysfs: cannot create duplicate filename '/kernel/slab/:a-0000144'
[ 119.710394] CPU: 2 PID: 1037 Comm: kworker/u48:1 Not tainted 4.17.0.snitm+ #25
[ 119.717608] Hardware name: Supermicro SYS-1029P-WTR/X11DDW-L, BIOS 2.0a 12/06/2017
[ 119.725177] Workqueue: dm-thin do_worker [dm_thin_pool]
[ 119.730401] Call Trace:
[ 119.732856] dump_stack+0x5a/0x73
[ 119.736173] sysfs_warn_dup+0x58/0x70
[ 119.739839] sysfs_create_dir_ns+0x77/0x80
[ 119.743939] kobject_add_internal+0xba/0x2e0
[ 119.748210] kobject_init_and_add+0x70/0xb0
[ 119.752399] ? sysfs_slab_add+0x101/0x250
[ 119.756409] sysfs_slab_add+0xb1/0x250
[ 119.760161] __kmem_cache_create+0x116/0x150
[ 119.764436] ? number+0x2fb/0x340
[ 119.767755] ? _cond_resched+0x15/0x30
[ 119.771508] create_cache+0xd9/0x1f0
[ 119.775085] kmem_cache_create_usercopy+0x1c1/0x250
[ 119.779965] kmem_cache_create+0x18/0x20
[ 119.783894] dm_bufio_client_create+0x1ae/0x410 [dm_bufio]
[ 119.789380] ? dm_block_manager_alloc_callback+0x20/0x20 [dm_persistent_data]
[ 119.796509] ? kmem_cache_alloc_trace+0xae/0x1d0
[ 119.801131] dm_block_manager_create+0x5e/0x90 [dm_persistent_data]
[ 119.807397] __create_persistent_data_objects+0x38/0x940 [dm_thin_pool]
[ 119.814008] dm_pool_abort_metadata+0x64/0x90 [dm_thin_pool]
[ 119.819669] metadata_operation_failed+0x59/0x100 [dm_thin_pool]
[ 119.825673] alloc_data_block.isra.53+0x86/0x180 [dm_thin_pool]
[ 119.831592] process_cell+0x2a3/0x550 [dm_thin_pool]
[ 119.836558] ? mempool_alloc+0x6f/0x180
[ 119.840400] ? u32_swap+0x10/0x10
[ 119.843717] ? sort+0x17b/0x270
[ 119.846863] ? u32_swap+0x10/0x10
[ 119.850181] do_worker+0x28d/0x8f0 [dm_thin_pool]
[ 119.854890] ? move_linked_works+0x6f/0xa0
[ 119.858989] process_one_work+0x171/0x370
[ 119.862999] worker_thread+0x49/0x3f0
[ 119.866669] kthread+0xf8/0x130
[ 119.869813] ? max_active_store+0x80/0x80
[ 119.873827] ? kthread_bind+0x10/0x10
[ 119.877493] ret_from_fork+0x35/0x40
[ 119.881076] kobject_add_internal failed for :a-0000144 with -EEXIST, don't try to register things with the same name in the same directory.
[ 119.893580] kmem_cache_create(dm_bufio_buffer-16) failed with error -17
Link: http://lkml.kernel.org/r/alpine.LRH.2.02.1806151817130.6333@file01.intranet…
Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com>
Reported-by: Mike Snitzer <snitzer(a)redhat.com>
Tested-by: Mike Snitzer <snitzer(a)redhat.com>
Cc: Christoph Lameter <cl(a)linux.com>
Cc: Pekka Enberg <penberg(a)kernel.org>
Cc: David Rientjes <rientjes(a)google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim(a)lge.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
include/linux/slub_def.h | 4 ++++
mm/slab_common.c | 4 ++++
mm/slub.c | 7 ++++++-
3 files changed, 14 insertions(+), 1 deletion(-)
diff -puN include/linux/slub_def.h~slub-fix-failure-when-we-delete-and-create-a-slab-cache include/linux/slub_def.h
--- a/include/linux/slub_def.h~slub-fix-failure-when-we-delete-and-create-a-slab-cache
+++ a/include/linux/slub_def.h
@@ -155,8 +155,12 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *);
#else
+static inline void sysfs_slab_unlink(struct kmem_cache *s)
+{
+}
static inline void sysfs_slab_release(struct kmem_cache *s)
{
}
diff -puN mm/slab_common.c~slub-fix-failure-when-we-delete-and-create-a-slab-cache mm/slab_common.c
--- a/mm/slab_common.c~slub-fix-failure-when-we-delete-and-create-a-slab-cache
+++ a/mm/slab_common.c
@@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_ca
list_del(&s->list);
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
+#ifdef SLAB_SUPPORTS_SYSFS
+ sysfs_slab_unlink(s);
+#endif
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
#ifdef SLAB_SUPPORTS_SYSFS
+ sysfs_slab_unlink(s);
sysfs_slab_release(s);
#else
slab_kmem_cache_release(s);
diff -puN mm/slub.c~slub-fix-failure-when-we-delete-and-create-a-slab-cache mm/slub.c
--- a/mm/slub.c~slub-fix-failure-when-we-delete-and-create-a-slab-cache
+++ a/mm/slub.c
@@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(str
kset_unregister(s->memcg_kset);
#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
- kobject_del(&s->kobj);
out:
kobject_put(&s->kobj);
}
@@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kme
schedule_work(&s->kobj_remove_work);
}
+void sysfs_slab_unlink(struct kmem_cache *s)
+{
+ if (slab_state >= FULL)
+ kobject_del(&s->kobj);
+}
+
void sysfs_slab_release(struct kmem_cache *s)
{
if (slab_state >= FULL)
_
Patches currently in -mm which might be from mpatocka(a)redhat.com are
slub-fix-failure-when-we-delete-and-create-a-slab-cache.patch
The patch titled
Subject: mm: fix devmem_is_allowed() for sub-page System RAM intersections
has been removed from the -mm tree. Its filename was
mm-fix-devmem_is_allowed-for-sub-page-system-ram-intersections.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: Dan Williams <dan.j.williams(a)intel.com>
Subject: mm: fix devmem_is_allowed() for sub-page System RAM intersections
Hussam reports:
I was poking around and for no real reason, I did cat /dev/mem and
strings /dev/mem. Then I saw the following warning in dmesg. I saved it
and rebooted immediately.
memremap attempted on mixed range 0x000000000009c000 size: 0x1000
------------[ cut here ]------------
WARNING: CPU: 0 PID: 11810 at kernel/memremap.c:98 memremap+0x104/0x170
[..]
Call Trace:
xlate_dev_mem_ptr+0x25/0x40
read_mem+0x89/0x1a0
__vfs_read+0x36/0x170
The memremap() implementation checks for attempts to remap System RAM with
MEMREMAP_WB and instead redirects those mapping attempts to the linear
map. However, that only works if the physical address range being
remapped is page aligned. In low memory we have situations like the
following:
00000000-00000fff : Reserved
00001000-0009fbff : System RAM
0009fc00-0009ffff : Reserved
...where System RAM intersects Reserved ranges on a sub-page page
granularity.
Given that devmem_is_allowed() special cases any attempt to map System RAM
in the first 1MB of memory, replace page_is_ram() with the more precise
region_intersects() to trap attempts to map disallowed ranges.
Link: https://bugzilla.kernel.org/show_bug.cgi?id=199999
Link: http://lkml.kernel.org/r/152856436164.18127.2847888121707136898.stgit@dwill…
Fixes: 92281dee825f ("arch: introduce memremap()")
Signed-off-by: Dan Williams <dan.j.williams(a)intel.com>
Reported-by: Hussam Al-Tayeb <me(a)hussam.eu.org>
Tested-by: Hussam Al-Tayeb <me(a)hussam.eu.org>
Cc: Christoph Hellwig <hch(a)lst.de>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
arch/x86/mm/init.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff -puN arch/x86/mm/init.c~mm-fix-devmem_is_allowed-for-sub-page-system-ram-intersections arch/x86/mm/init.c
--- a/arch/x86/mm/init.c~mm-fix-devmem_is_allowed-for-sub-page-system-ram-intersections
+++ a/arch/x86/mm/init.c
@@ -706,7 +706,9 @@ void __init init_mem_mapping(void)
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (page_is_ram(pagenr)) {
+ if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
+ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ != REGION_DISJOINT) {
/*
* For disallowed memory regions in the low 1MB range,
* request that the page be shown as all zeros.
_
Patches currently in -mm which might be from dan.j.williams(a)intel.com are
mm-devm_memremap_pages-mark-devm_memremap_pages-export_symbol_gpl.patch
mm-devm_memremap_pages-handle-errors-allocating-final-devres-action.patch
mm-hmm-use-devm-semantics-for-hmm_devmem_add-remove.patch
mm-hmm-replace-hmm_devmem_pages_create-with-devm_memremap_pages.patch
mm-hmm-mark-hmm_devmem_add-add_resource-export_symbol_gpl.patch