The patch below does not apply to the 4.17-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From e935dba111621bd6a0c5d48e6511a4d9885103b4 Mon Sep 17 00:00:00 2001
From: Marek Szyprowski <m.szyprowski(a)samsung.com>
Date: Wed, 16 May 2018 10:42:39 +0200
Subject: [PATCH] spi: spi-s3c64xx: Fix system resume support
Since Linux v4.10 release (commit 1d9174fbc55e "PM / Runtime: Defer
resuming of the device in pm_runtime_force_resume()"),
pm_runtime_force_resume() function doesn't runtime resume device if it was
not runtime active before system suspend. Thus, driver should not do any
register access after pm_runtime_force_resume() without checking the
runtime status of the device. To fix this issue, simply move
s3c64xx_spi_hwinit() call to s3c64xx_spi_runtime_resume() to ensure that
hardware is always properly initialized. This fixes Synchronous external
abort issue on system suspend/resume cycle on newer Exynos SoCs.
Signed-off-by: Marek Szyprowski <m.szyprowski(a)samsung.com>
Reviewed-by: Krzysztof Kozlowski <krzk(a)kernel.org>
Signed-off-by: Mark Brown <broonie(a)kernel.org>
Cc: stable(a)vger.kernel.org
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index f55dc78957ad..7b7151ec14c8 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1292,8 +1292,6 @@ static int s3c64xx_spi_resume(struct device *dev)
if (ret < 0)
return ret;
- s3c64xx_spi_hwinit(sdd);
-
return spi_master_resume(master);
}
#endif /* CONFIG_PM_SLEEP */
@@ -1331,6 +1329,8 @@ static int s3c64xx_spi_runtime_resume(struct device *dev)
if (ret != 0)
goto err_disable_src_clk;
+ s3c64xx_spi_hwinit(sdd);
+
return 0;
err_disable_src_clk:
We were copying our last cipher block into the request for use as IV for
all modes of operations. Fix this by discerning the behaviour based on
the mode of operation used: copy ciphertext for CBC, update counter for
CTR.
CC: stable(a)vger.kernel.org
Fixes: 63ee04c8b491 ("crypto: ccree - add skcipher support")
Reported by: Hadar Gat <hadar.gat(a)arm.com>
Signed-off-by: Gilad Ben-Yossef <gilad(a)benyossef.com>
---
drivers/crypto/ccree/cc_cipher.c | 111 +++++++++++++++++++++++++++++----------
1 file changed, 84 insertions(+), 27 deletions(-)
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index d2810c1..958ced3 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -593,34 +593,82 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
}
}
+/*
+ * Update a CTR-AES 128 bit counter
+ */
+static void cc_update_ctr(u8 *ctr, unsigned int increment)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ IS_ALIGNED((unsigned long)ctr, 8)) {
+
+ __be64 *high_be = (__be64 *)ctr;
+ __be64 *low_be = high_be + 1;
+ u64 orig_low = __be64_to_cpu(*low_be);
+ u64 new_low = orig_low + (u64)increment;
+
+ *low_be = __cpu_to_be64(new_low);
+
+ if (new_low < orig_low)
+ *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1);
+ } else {
+ u8 *pos = (ctr + AES_BLOCK_SIZE);
+ u8 val;
+ unsigned int size;
+
+ for (; increment; increment--)
+ for (size = AES_BLOCK_SIZE; size; size--) {
+ val = *--pos + 1;
+ *pos = val;
+ if (val)
+ break;
+ }
+ }
+}
+
static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
{
struct skcipher_request *req = (struct skcipher_request *)cc_req;
struct scatterlist *dst = req->dst;
struct scatterlist *src = req->src;
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+ struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
+ unsigned int len;
- cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
- kzfree(req_ctx->iv);
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_CBC:
+ /*
+ * The crypto API expects us to set the req->iv to the last
+ * ciphertext block. For encrypt, simply copy from the result.
+ * For decrypt, we must copy from a saved buffer since this
+ * could be an in-place decryption operation and the src is
+ * lost by this point.
+ */
+ if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
+ memcpy(req->iv, req_ctx->backup_info, ivsize);
+ kzfree(req_ctx->backup_info);
+ } else if (!err) {
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req->iv, req->dst, len,
+ ivsize, 0);
+ }
+ break;
- /*
- * The crypto API expects us to set the req->iv to the last
- * ciphertext block. For encrypt, simply copy from the result.
- * For decrypt, we must copy from a saved buffer since this
- * could be an in-place decryption operation and the src is
- * lost by this point.
- */
- if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
- memcpy(req->iv, req_ctx->backup_info, ivsize);
- kzfree(req_ctx->backup_info);
- } else if (!err) {
- scatterwalk_map_and_copy(req->iv, req->dst,
- (req->cryptlen - ivsize),
- ivsize, 0);
+ case DRV_CIPHER_CTR:
+ /* Compute the counter of the last block */
+ len = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE;
+ cc_update_ctr((u8 *)req->iv, len);
+ break;
+
+ default:
+ break;
}
+ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst);
+ kzfree(req_ctx->iv);
+
skcipher_request_complete(req, err);
}
@@ -752,20 +800,29 @@ static int cc_cipher_encrypt(struct skcipher_request *req)
static int cc_cipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req);
unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm);
gfp_t flags = cc_gfp_flags(&req->base);
+ unsigned int len;
- /*
- * Allocate and save the last IV sized bytes of the source, which will
- * be lost in case of in-place decryption and might be needed for CTS.
- */
- req_ctx->backup_info = kmalloc(ivsize, flags);
- if (!req_ctx->backup_info)
- return -ENOMEM;
+ if (ctx_p->cipher_mode == DRV_CIPHER_CBC) {
+
+ /* Allocate and save the last IV sized bytes of the source,
+ * which will be lost in case of in-place decryption.
+ */
+ req_ctx->backup_info = kzalloc(ivsize, flags);
+ if (!req_ctx->backup_info)
+ return -ENOMEM;
+
+ len = req->cryptlen - ivsize;
+ scatterwalk_map_and_copy(req_ctx->backup_info, req->src, len,
+ ivsize, 0);
+ } else {
+ req_ctx->backup_info = NULL;
+ }
- scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
- (req->cryptlen - ivsize), ivsize, 0);
req_ctx->is_giv = false;
return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
--
2.7.4
The pinctrl settings were incorrect for the touchscreen interrupt line, causing
an interrupt storm. This change has been tested with both the atmel_mxt_ts and
RMI4 drivers on the RDU1 units.
The value 0x4 comes from the value of register IOMUXC_SW_PAD_CTL_PAD_CSI1_D8
from the old vendor kernel.
Signed-off-by: Nick Dyer <nick(a)shmanahar.org>
Fixes: ceef0396f367 ("ARM: dts: imx: add ZII RDU1 board")
Cc: <stable(a)vger.kernel.org> # 4.15+
---
Changes in v3:
- Update commit message to add source of 0x4 value, fixes tag and CC stable
Changes in v2:
- Use hex, only alter IRQ line config
arch/arm/boot/dts/imx51-zii-rdu1.dts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index df9eca94d812..8a878687197b 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -770,7 +770,7 @@
pinctrl_ts: tsgrp {
fsl,pins = <
- MX51_PAD_CSI1_D8__GPIO3_12 0x85
+ MX51_PAD_CSI1_D8__GPIO3_12 0x04
MX51_PAD_CSI1_D9__GPIO3_13 0x85
>;
};
--
2.17.1
The patch titled
Subject: kasan: depend on CONFIG_SLUB_DEBUG
has been removed from the -mm tree. Its filename was
kasan-depend-on-config_slub_debug.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: "Jason A. Donenfeld" <Jason(a)zx2c4.com>
Subject: kasan: depend on CONFIG_SLUB_DEBUG
KASAN depends on having access to some of the accounting that SLUB_DEBUG
does; without it, there are immediate crashes [1]. So, the natural thing
to do is to make KASAN select SLUB_DEBUG.
[1] http://lkml.kernel.org/r/CAHmME9rtoPwxUSnktxzKso14iuVCWT7BE_-_8PAC=pGw1iJnQ…
Link: http://lkml.kernel.org/r/20180622154623.25388-1-Jason@zx2c4.com
Fixes: f9e13c0a5a33 ("slab, slub: skip unnecessary kasan_cache_shutdown()")
Signed-off-by: Jason A. Donenfeld <Jason(a)zx2c4.com>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Reviewed-by: Shakeel Butt <shakeelb(a)google.com>
Acked-by: Christoph Lameter <cl(a)linux.com>
Cc: Shakeel Butt <shakeelb(a)google.com>
Cc: David Rientjes <rientjes(a)google.com>
Cc: Pekka Enberg <penberg(a)kernel.org>
Cc: Joonsoo Kim <iamjoonsoo.kim(a)lge.com>
Cc: Andrey Ryabinin <aryabinin(a)virtuozzo.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
diff -puN lib/Kconfig.kasan~kasan-depend-on-config_slub_debug lib/Kconfig.kasan
--- a/lib/Kconfig.kasan~kasan-depend-on-config_slub_debug
+++ a/lib/Kconfig.kasan
@@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN
config KASAN
bool "KASan: runtime memory debugger"
depends on SLUB || (SLAB && !DEBUG_SLAB)
+ select SLUB_DEBUG if SLUB
select CONSTRUCTORS
select STACKDEPOT
help
_
Patches currently in -mm which might be from Jason(a)zx2c4.com are
The patch titled
Subject: x86/e820: put !E820_TYPE_RAM regions into memblock.reserved
has been removed from the -mm tree. Its filename was
x86-e820-put-e820_type_ram-regions-into-memblockreserved.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: Naoya Horiguchi <n-horiguchi(a)ah.jp.nec.com>
Subject: x86/e820: put !E820_TYPE_RAM regions into memblock.reserved
There is a kernel panic that is triggered when reading /proc/kpageflags on
the kernel booted with kernel parameter 'memmap=nn[KMG]!ss[KMG]':
BUG: unable to handle kernel paging request at fffffffffffffffe
PGD 9b20e067 P4D 9b20e067 PUD 9b210067 PMD 0
Oops: 0000 [#1] SMP PTI
CPU: 2 PID: 1728 Comm: page-types Not tainted 4.17.0-rc6-mm1-v4.17-rc6-180605-0816-00236-g2dfb086ef02c+ #160
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.0-2.fc28 04/01/2014
RIP: 0010:stable_page_flags+0x27/0x3c0
Code: 00 00 00 0f 1f 44 00 00 48 85 ff 0f 84 a0 03 00 00 41 54 55 49 89 fc 53 48 8b 57 08 48 8b 2f 48 8d 42 ff 83 e2 01 48 0f 44 c7 <48> 8b 00 f6 c4 01 0f 84 10 03 00 00 31 db 49 8b 54 24 08 4c 89 e7
RSP: 0018:ffffbbd44111fde0 EFLAGS: 00010202
RAX: fffffffffffffffe RBX: 00007fffffffeff9 RCX: 0000000000000000
RDX: 0000000000000001 RSI: 0000000000000202 RDI: ffffed1182fff5c0
RBP: ffffffffffffffff R08: 0000000000000001 R09: 0000000000000001
R10: ffffbbd44111fed8 R11: 0000000000000000 R12: ffffed1182fff5c0
R13: 00000000000bffd7 R14: 0000000002fff5c0 R15: ffffbbd44111ff10
FS: 00007efc4335a500(0000) GS:ffff93a5bfc00000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: fffffffffffffffe CR3: 00000000b2a58000 CR4: 00000000001406e0
Call Trace:
kpageflags_read+0xc7/0x120
proc_reg_read+0x3c/0x60
__vfs_read+0x36/0x170
vfs_read+0x89/0x130
ksys_pread64+0x71/0x90
do_syscall_64+0x5b/0x160
entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x7efc42e75e23
Code: 09 00 ba 9f 01 00 00 e8 ab 81 f4 ff 66 2e 0f 1f 84 00 00 00 00 00 90 83 3d 29 0a 2d 00 00 75 13 49 89 ca b8 11 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 34 c3 48 83 ec 08 e8 db d3 01 00 48 89 04 24
According to kernel bisection, this problem became visible due to commit
f7f99100d8d9 ("mm: stop zeroing memory during allocation in vmemmap")
which changes how struct pages are initialized.
Memblock layout affects the pfn ranges covered by node/zone. Consider
that we have a VM with 2 NUMA nodes and each node has 4GB memory, and the
default (no memmap= given) memblock layout is like below:
MEMBLOCK configuration:
memory size = 0x00000001fff75c00 reserved size = 0x000000000300c000
memory.cnt = 0x4
memory[0x0] [0x0000000000001000-0x000000000009efff], 0x000000000009e000 bytes on node 0 flags: 0x0
memory[0x1] [0x0000000000100000-0x00000000bffd6fff], 0x00000000bfed7000 bytes on node 0 flags: 0x0
memory[0x2] [0x0000000100000000-0x000000013fffffff], 0x0000000040000000 bytes on node 0 flags: 0x0
memory[0x3] [0x0000000140000000-0x000000023fffffff], 0x0000000100000000 bytes on node 1 flags: 0x0
...
If you give memmap=1G!4G (so it just covers memory[0x2]),
the range [0x100000000-0x13fffffff] is gone:
MEMBLOCK configuration:
memory size = 0x00000001bff75c00 reserved size = 0x000000000300c000
memory.cnt = 0x3
memory[0x0] [0x0000000000001000-0x000000000009efff], 0x000000000009e000 bytes on node 0 flags: 0x0
memory[0x1] [0x0000000000100000-0x00000000bffd6fff], 0x00000000bfed7000 bytes on node 0 flags: 0x0
memory[0x2] [0x0000000140000000-0x000000023fffffff], 0x0000000100000000 bytes on node 1 flags: 0x0
...
This causes shrinking node 0's pfn range because it is calculated by the
address range of memblock.memory. So some of struct pages in the gap
range are left uninitialized.
We have a function zero_resv_unavail() which does zeroing the struct pages
within the reserved unavailable range (i.e. memblock.memory &&
!memblock.reserved). This patch utilizes it to cover all unavailable
ranges by putting them into memblock.reserved.
Link: http://lkml.kernel.org/r/20180615072947.GB23273@hori1.linux.bs1.fc.nec.co.jp
Fixes: f7f99100d8d9 ("mm: stop zeroing memory during allocation in vmemmap")
Signed-off-by: Naoya Horiguchi <n-horiguchi(a)ah.jp.nec.com>
Tested-by: Oscar Salvador <osalvador(a)suse.de>
Tested-by: "Herton R. Krzesinski" <herton(a)redhat.com>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Reviewed-by: Pavel Tatashin <pasha.tatashin(a)oracle.com>
Cc: Steven Sistare <steven.sistare(a)oracle.com>
Cc: Daniel Jordan <daniel.m.jordan(a)oracle.com>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
diff -puN arch/x86/kernel/e820.c~x86-e820-put-e820_type_ram-regions-into-memblockreserved arch/x86/kernel/e820.c
--- a/arch/x86/kernel/e820.c~x86-e820-put-e820_type_ram-regions-into-memblockreserved
+++ a/arch/x86/kernel/e820.c
@@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void)
{
int i;
u64 end;
+ u64 addr = 0;
/*
* The bootstrap memblock region count maximum is 128 entries
@@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void)
struct e820_entry *entry = &e820_table->entries[i];
end = entry->addr + entry->size;
+ if (addr < entry->addr)
+ memblock_reserve(addr, entry->addr - addr);
+ addr = end;
if (end != (resource_size_t)end)
continue;
+ /*
+ * all !E820_TYPE_RAM ranges (including gap ranges) are put
+ * into memblock.reserved to make sure that struct pages in
+ * such regions are not left uninitialized after bootup.
+ */
if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
- continue;
-
- memblock_add(entry->addr, entry->size);
+ memblock_reserve(entry->addr, entry->size);
+ else
+ memblock_add(entry->addr, entry->size);
}
/* Throw away partial pages: */
_
Patches currently in -mm which might be from n-horiguchi(a)ah.jp.nec.com are
The patch titled
Subject: slub: fix failure when we delete and create a slab cache
has been removed from the -mm tree. Its filename was
slub-fix-failure-when-we-delete-and-create-a-slab-cache.patch
This patch was dropped because it was merged into mainline or a subsystem tree
------------------------------------------------------
From: Mikulas Patocka <mpatocka(a)redhat.com>
Subject: slub: fix failure when we delete and create a slab cache
In kernel 4.17 I removed some code from dm-bufio that did slab cache
merging (21bb13276768) - both slab and slub support merging caches with
identical attributes, so dm-bufio now just calls kmem_cache_create and
relies on implicit merging.
This uncovered a bug in the slub subsystem - if we delete a cache and
immediatelly create another cache with the same attributes, it fails
because of duplicate filename in /sys/kernel/slab/. The slub subsystem
offloads freeing the cache to a workqueue - and if we create the new cache
before the workqueue runs, it complains because of duplicate filename in
sysfs.
This patch fixes the bug by moving the call of kobject_del from
sysfs_slab_remove_workfn to shutdown_cache. kobject_del must be called
while we hold slab_mutex - so that the sysfs entry is deleted before a
cache with the same attributes could be created.
Running device-mapper-test-suite with:
dmtest run --suite thin-provisioning -n /commit_failure_causes_fallback/
triggers:
[ 119.618958] Buffer I/O error on dev dm-0, logical block 1572848, async page read
[ 119.686224] device-mapper: thin: 253:1: metadata operation 'dm_pool_alloc_data_block' failed: error = -5
[ 119.695821] device-mapper: thin: 253:1: aborting current metadata transaction
[ 119.703255] sysfs: cannot create duplicate filename '/kernel/slab/:a-0000144'
[ 119.710394] CPU: 2 PID: 1037 Comm: kworker/u48:1 Not tainted 4.17.0.snitm+ #25
[ 119.717608] Hardware name: Supermicro SYS-1029P-WTR/X11DDW-L, BIOS 2.0a 12/06/2017
[ 119.725177] Workqueue: dm-thin do_worker [dm_thin_pool]
[ 119.730401] Call Trace:
[ 119.732856] dump_stack+0x5a/0x73
[ 119.736173] sysfs_warn_dup+0x58/0x70
[ 119.739839] sysfs_create_dir_ns+0x77/0x80
[ 119.743939] kobject_add_internal+0xba/0x2e0
[ 119.748210] kobject_init_and_add+0x70/0xb0
[ 119.752399] ? sysfs_slab_add+0x101/0x250
[ 119.756409] sysfs_slab_add+0xb1/0x250
[ 119.760161] __kmem_cache_create+0x116/0x150
[ 119.764436] ? number+0x2fb/0x340
[ 119.767755] ? _cond_resched+0x15/0x30
[ 119.771508] create_cache+0xd9/0x1f0
[ 119.775085] kmem_cache_create_usercopy+0x1c1/0x250
[ 119.779965] kmem_cache_create+0x18/0x20
[ 119.783894] dm_bufio_client_create+0x1ae/0x410 [dm_bufio]
[ 119.789380] ? dm_block_manager_alloc_callback+0x20/0x20 [dm_persistent_data]
[ 119.796509] ? kmem_cache_alloc_trace+0xae/0x1d0
[ 119.801131] dm_block_manager_create+0x5e/0x90 [dm_persistent_data]
[ 119.807397] __create_persistent_data_objects+0x38/0x940 [dm_thin_pool]
[ 119.814008] dm_pool_abort_metadata+0x64/0x90 [dm_thin_pool]
[ 119.819669] metadata_operation_failed+0x59/0x100 [dm_thin_pool]
[ 119.825673] alloc_data_block.isra.53+0x86/0x180 [dm_thin_pool]
[ 119.831592] process_cell+0x2a3/0x550 [dm_thin_pool]
[ 119.836558] ? mempool_alloc+0x6f/0x180
[ 119.840400] ? u32_swap+0x10/0x10
[ 119.843717] ? sort+0x17b/0x270
[ 119.846863] ? u32_swap+0x10/0x10
[ 119.850181] do_worker+0x28d/0x8f0 [dm_thin_pool]
[ 119.854890] ? move_linked_works+0x6f/0xa0
[ 119.858989] process_one_work+0x171/0x370
[ 119.862999] worker_thread+0x49/0x3f0
[ 119.866669] kthread+0xf8/0x130
[ 119.869813] ? max_active_store+0x80/0x80
[ 119.873827] ? kthread_bind+0x10/0x10
[ 119.877493] ret_from_fork+0x35/0x40
[ 119.881076] kobject_add_internal failed for :a-0000144 with -EEXIST, don't try to register things with the same name in the same directory.
[ 119.893580] kmem_cache_create(dm_bufio_buffer-16) failed with error -17
Link: http://lkml.kernel.org/r/alpine.LRH.2.02.1806151817130.6333@file01.intranet…
Signed-off-by: Mikulas Patocka <mpatocka(a)redhat.com>
Reported-by: Mike Snitzer <snitzer(a)redhat.com>
Tested-by: Mike Snitzer <snitzer(a)redhat.com>
Cc: Christoph Lameter <cl(a)linux.com>
Cc: Pekka Enberg <penberg(a)kernel.org>
Cc: David Rientjes <rientjes(a)google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim(a)lge.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
diff -puN include/linux/slub_def.h~slub-fix-failure-when-we-delete-and-create-a-slab-cache include/linux/slub_def.h
--- a/include/linux/slub_def.h~slub-fix-failure-when-we-delete-and-create-a-slab-cache
+++ a/include/linux/slub_def.h
@@ -155,8 +155,12 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *);
#else
+static inline void sysfs_slab_unlink(struct kmem_cache *s)
+{
+}
static inline void sysfs_slab_release(struct kmem_cache *s)
{
}
diff -puN mm/slab_common.c~slub-fix-failure-when-we-delete-and-create-a-slab-cache mm/slab_common.c
--- a/mm/slab_common.c~slub-fix-failure-when-we-delete-and-create-a-slab-cache
+++ a/mm/slab_common.c
@@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_ca
list_del(&s->list);
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
+#ifdef SLAB_SUPPORTS_SYSFS
+ sysfs_slab_unlink(s);
+#endif
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
#ifdef SLAB_SUPPORTS_SYSFS
+ sysfs_slab_unlink(s);
sysfs_slab_release(s);
#else
slab_kmem_cache_release(s);
diff -puN mm/slub.c~slub-fix-failure-when-we-delete-and-create-a-slab-cache mm/slub.c
--- a/mm/slub.c~slub-fix-failure-when-we-delete-and-create-a-slab-cache
+++ a/mm/slub.c
@@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(str
kset_unregister(s->memcg_kset);
#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
- kobject_del(&s->kobj);
out:
kobject_put(&s->kobj);
}
@@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kme
schedule_work(&s->kobj_remove_work);
}
+void sysfs_slab_unlink(struct kmem_cache *s)
+{
+ if (slab_state >= FULL)
+ kobject_del(&s->kobj);
+}
+
void sysfs_slab_release(struct kmem_cache *s)
{
if (slab_state >= FULL)
_
Patches currently in -mm which might be from mpatocka(a)redhat.com are
The size of kvm's shadow page tables corresponds to the size of the
guest virtual machines on the system. Large VMs can spend a significant
amount of memory as shadow page tables which can not be left as system
memory overhead. So, account shadow page tables to the kmemcg.
Signed-off-by: Shakeel Butt <shakeelb(a)google.com>
Cc: Michal Hocko <mhocko(a)kernel.org>
Cc: Johannes Weiner <hannes(a)cmpxchg.org>
Cc: Vladimir Davydov <vdavydov.dev(a)gmail.com>
Cc: Paolo Bonzini <pbonzini(a)redhat.com>
Cc: Greg Thelen <gthelen(a)google.com>
Cc: Radim Krčmář <rkrcmar(a)redhat.com>
Cc: Peter Feiner <pfeiner(a)google.com>
Cc: Andrew Morton <akpm(a)linux-foundation.org>
Cc: stable(a)vger.kernel.org
---
Changelog since v1:
- replaced (GFP_KERNEL|__GFP_ACCOUNT) with GFP_KERNEL_ACCOUNT
arch/x86/kvm/mmu.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d594690d8b95..6b8f11521c41 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
- page = (void *)__get_free_page(GFP_KERNEL);
+ page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
if (!page)
return -ENOMEM;
cache->objects[cache->nobjs++] = page;
--
2.18.0.rc2.346.g013aa6912e-goog
This patch set is based on the mmc.git / fixes branch.
Yoshihiro Shimoda (2):
mmc: renesas_sdhi_internal_dmac: Fix missing unmap in error patch
mmc: renesas_sdhi_internal_dmac: Cannot clear the RX_IN_USE in abort
drivers/mmc/host/renesas_sdhi_internal_dmac.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
--
1.9.1
This was supposed to be a mask of all known rings, but it is being used
by execbuffer to filter out invalid rings, and so is instead mapping high
unused values onto valid rings. Instead of a mask of all known rings,
we need it to be the mask of all possible rings.
Fixes: 549f7365820a ("drm/i915: Enable SandyBridge blitter ring")
Fixes: de1add360522 ("drm/i915: Decouple execbuf uAPI from internal implementation")
Signed-off-by: Chris Wilson <chris(a)chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin(a)intel.com>
Cc: <stable(a)vger.kernel.org> # v4.6+
---
include/uapi/drm/i915_drm.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index f50065413bfa..b90d31ba913b 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -962,7 +962,7 @@ struct drm_i915_gem_execbuffer2 {
* struct drm_i915_gem_exec_fence *fences.
*/
__u64 cliprects_ptr;
-#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
--
2.18.0
From: Ping-Ke Shih <pkshih(a)realtek.com>
Without this patch, firmware will not run properly on rtl8821ae, and it
causes bad user experience. For example, bad connection performance with
low rate, higher power consumption, and so on.
rtl8821ae uses two kinds of firmwares for normal and WoWlan cases, and
each firmware has firmware data buffer and size individually. Original
code always overwrite size of normal firmware rtlpriv->rtlhal.fwsize, and
this mismatch causes firmware checksum error, then firmware can't start.
In this situation, driver gives message "Firmware is not ready to run!".
Fixes: fe89707f0afa ("rtlwifi: rtl8821ae: Simplify loading of WOWLAN firmware")
Signed-off-by: Ping-Ke Shih <pkshih(a)realtek.com>
Cc: Stable <stable(a)vger.kernel.org> # 4.0+
Reviewed-by: Larry Finger <Larry.Finger(a)lwfinger.net>
---
V3: Add more commit logs, Cc and Reviewed-by. Thank you, Larry.
v2: fix commit log typo.
---
drivers/net/wireless/realtek/rtlwifi/core.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index a3f46203ee7a..4bf7967590ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -130,7 +130,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
firmware->size);
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
- rtlpriv->rtlhal.fwsize = firmware->size;
release_firmware(firmware);
}
--
2.15.1
This patch series includes some improvement to Machine check handler
for pseries. Patch 1 fixes an issue where machine check handler crashes
kernel while accessing vmalloc-ed buffer while in nmi context.
Patch 2 fixes endain bug while restoring of r3 in MCE handler.
Patch 4 implements a real mode mce handler and flushes the SLBs on SLB error.
Patch 5 display's the MCE error details on console.
Patch 6 saves and dumps the SLB contents on SLB MCE errors to improve the
debugability.
Change in V4:
- Flush the SLBs in real mode mce handler to handle SLB errors for entry 0.
- Allocate buffers per cpu to hold rtas error log and old slb contents.
- Defer the logging of rtas error log to irq work queue.
Change in V3:
- Moved patch 5 to patch 2
Change in V2:
- patch 3: Display additional info (NIP and task info) in MCE error details.
- patch 5: Fix endain bug while restoring of r3 in MCE handler.
---
Mahesh Salgaonkar (6):
powerpc/pseries: Defer the logging of rtas error to irq work queue.
powerpc/pseries: Fix endainness while restoring of r3 in MCE handler.
powerpc/pseries: Define MCE error event section.
powerpc/pseries: flush SLB contents on SLB MCE errors.
powerpc/pseries: Display machine check error details.
powerpc/pseries: Dump the SLB contents on SLB MCE errors.
arch/powerpc/include/asm/book3s/64/mmu-hash.h | 8 +
arch/powerpc/include/asm/machdep.h | 1
arch/powerpc/include/asm/paca.h | 4
arch/powerpc/include/asm/rtas.h | 116 ++++++++++++
arch/powerpc/kernel/exceptions-64s.S | 42 ++++
arch/powerpc/kernel/mce.c | 16 +-
arch/powerpc/mm/slb.c | 63 +++++++
arch/powerpc/platforms/powernv/opal.c | 1
arch/powerpc/platforms/pseries/pseries.h | 1
arch/powerpc/platforms/pseries/ras.c | 236 +++++++++++++++++++++++--
arch/powerpc/platforms/pseries/setup.c | 27 +++
11 files changed, 497 insertions(+), 18 deletions(-)
--
Signature
QUEUE_FLAG_DAX is an indication that a given block device supports
filesystem DAX and should not be set for PMEM namespaces which are in "raw"
or "sector" modes. These namespaces lack struct page and are prevented
from participating in filesystem DAX.
Signed-off-by: Ross Zwisler <ross.zwisler(a)linux.intel.com>
Suggested-by: Mike Snitzer <snitzer(a)redhat.com>
Cc: stable(a)vger.kernel.org
---
drivers/nvdimm/pmem.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 68940356cad3..8b1fd7f1a224 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+ if (pmem->pfn_flags & PFN_MAP)
+ blk_queue_flag_set(QUEUE_FLAG_DAX, q);
q->queuedata = pmem;
disk = alloc_disk_node(0, nid);
--
2.14.4
The other day I was testing one of the HP laptops at my office with an
i915/amdgpu hybrid setup and noticed that hotplugging was non-functional
on almost all of the display outputs. I eventually discovered that all
of the external outputs were connected to the amdgpu device instead of
i915, and that the hotplugs weren't being detected so long as the GPU
was in runtime suspend. After some talking with folks at AMD, I learned
that amdgpu is actually supposed to support hotplug detection in runtime
suspend so long as the OEM has implemented it properly in the firmware.
On this HP ZBook 15 G4 (the machine in question), amdgpu wasn't managing
to find the ATIF handle at all despite the fact that I could see acpi
events being sent in response to any hotplugging. After going through
dumps of the firmware, I discovered that this machine did in fact
support ATIF, but that it's ATIF method lived in an entirely different
namespace than this device's handle (the device handle was
\_SB_.PCI0.PEG0.PEGP, but ATIF lives in ATPX's handle at
\_SB_.PCI0.GFX0).
According to AMD, the reason for this appears to be that on laptops with
PRIME configurations, the ATIF and other related ACPI handles usually
live under the namespace of the intel GPU instead of the AMD GPU.
Machines that don't have more then a single GPU have the ATIF handle
located in the namespace of the AMD GPU.
So, fix this by probing ATPX's ACPI parent's namespace if we can't find
ATIF elsewhere, along with storing a pointer to the proper handle to use
for ATIF and using that instead of the device's handle.
This fixes HPD detection while in runtime suspend for this ZBook!
Signed-off-by: Lyude Paul <lyude(a)redhat.com>
Cc: stable(a)vger.kernel.org
---
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 80 +++++++++++++++++-------
1 file changed, 59 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 22c7e8ec0b9a..b0d7978c475d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -65,6 +65,8 @@ struct amdgpu_atif_functions {
};
struct amdgpu_atif {
+ acpi_handle handle;
+
struct amdgpu_atif_notifications notifications;
struct amdgpu_atif_functions functions;
struct amdgpu_atif_notification_cfg notification_cfg;
@@ -83,8 +85,9 @@ struct amdgpu_atif {
* Executes the requested ATIF function (all asics).
* Returns a pointer to the acpi output buffer.
*/
-static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
- struct acpi_buffer *params)
+static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+ int function,
+ struct acpi_buffer *params)
{
acpi_status status;
union acpi_object atif_arg_elements[2];
@@ -107,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
atif_arg_elements[1].integer.value = 0;
}
- status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+ status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
+ &buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -178,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
* (all asics).
* returns 0 on success, error on failure.
*/
-static int amdgpu_atif_verify_interface(acpi_handle handle,
- struct amdgpu_atif *atif)
+static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
{
union acpi_object *info;
struct atif_verify_interface output;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
@@ -213,6 +216,36 @@ static int amdgpu_atif_verify_interface(acpi_handle handle,
return err;
}
+static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
+{
+ acpi_handle handle = NULL;
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+ acpi_status status;
+
+ /* First check the device handle */
+ status = acpi_get_handle(dhandle, "ATIF", &handle);
+ if (ACPI_SUCCESS(status))
+ goto out;
+
+ /* Some vendors stick the ATIF methods in the same namespace as the
+ * ATPX methods
+ */
+ if (amdgpu_has_atpx()) {
+ status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
+ &handle);
+ if (ACPI_SUCCESS(status))
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("No ATIF handle found\n");
+ return NULL;
+out:
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+ return handle;
+}
+
/**
* amdgpu_atif_get_notification_params - determine notify configuration
*
@@ -225,15 +258,16 @@ static int amdgpu_atif_verify_interface(acpi_handle handle,
* where n is specified in the result if a notifier is used.
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_notification_params(acpi_handle handle,
- struct amdgpu_atif_notification_cfg *n)
+static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
{
union acpi_object *info;
+ struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
struct atif_system_params params;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
+ NULL);
if (!info) {
err = -EIO;
goto out;
@@ -287,14 +321,15 @@ static int amdgpu_atif_get_notification_params(acpi_handle handle,
* (all asics).
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
- struct atif_sbios_requests *req)
+static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
+ struct atif_sbios_requests *req)
{
union acpi_object *info;
size_t size;
int count = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
+ NULL);
if (!info)
return -EIO;
@@ -327,11 +362,10 @@ static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
* Returns NOTIFY code
*/
static int amdgpu_atif_handler(struct amdgpu_device *adev,
- struct acpi_bus_event *event)
+ struct acpi_bus_event *event)
{
struct amdgpu_atif *atif = adev->atif;
struct atif_sbios_requests req;
- acpi_handle handle;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -347,8 +381,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
return NOTIFY_DONE;
/* Check pending SBIOS requests */
- handle = ACPI_HANDLE(&adev->pdev->dev);
- count = amdgpu_atif_get_sbios_requests(handle, &req);
+ count = amdgpu_atif_get_sbios_requests(atif, &req);
if (count <= 0)
return NOTIFY_DONE;
@@ -679,7 +712,7 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
*/
int amdgpu_acpi_init(struct amdgpu_device *adev)
{
- acpi_handle handle;
+ acpi_handle handle, atif_handle;
struct amdgpu_atif *atif;
struct amdgpu_atcs *atcs = &adev->atcs;
int ret;
@@ -696,14 +729,20 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
}
- /* Call the ATIF method */
+ /* Probe for ATIF, and initialize it if found */
+ atif_handle = amdgpu_atif_probe_handle(handle);
+ if (!atif_handle)
+ goto out;
+
atif = kzalloc(sizeof(*atif), GFP_KERNEL);
if (!atif) {
DRM_WARN("Not enough memory to initialize ATIF\n");
goto out;
}
+ atif->handle = atif_handle;
- ret = amdgpu_atif_verify_interface(handle, atif);
+ /* Call the ATIF method */
+ ret = amdgpu_atif_verify_interface(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
kfree(atif);
@@ -739,8 +778,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
if (atif->functions.system_params) {
- ret = amdgpu_atif_get_notification_params(handle,
- &atif->notification_cfg);
+ ret = amdgpu_atif_get_notification_params(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
ret);
--
2.17.1
The handles for ACPI methods like ATPX and ATIF will live under the
integrated GPU's namespace on systems with more then one GPU. ATPX is
already detected regardless of the namespace it lives in, and it will
always be in the same namespace as ATIF. So we can make things easier on
ourselves by just adding some functions to retrieve said namespace so it
can be searched later for ATIF.
Signed-off-by: Lyude Paul <lyude(a)redhat.com>
Cc: stable(a)vger.kernel.org
---
drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 6 ++++++
2 files changed, 12 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7df5d3d11aff..7dcbac8af9a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1858,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
+#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void);
+#else
+static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
+#endif
+
/*
* KMS
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a3896412a019..b33f1680c9a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
}
+#if defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void) {
+ return amdgpu_atpx_priv.dhandle;
+}
+#endif
+
/**
* amdgpu_atpx_call - call an ATPX method
*
--
2.17.1
Until the original patch that adds the set_memory_block_size_order()
function is added, then the following two patches should also not be
applied. In fact I'm surprised it built with that function undefined.
Thanks,
Mike
On 6/27/2018 7:08 PM, gregkh(a)linuxfoundation.org wrote:
>
> This is a note to let you know that I've just added the patch titled
>
> x86/platform/UV: Use new set memory block size function
>
> to the 4.14-stable tree which can be found at:
> http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
>
> The filename of the patch is:
> x86-platform-uv-use-new-set-memory-block-size-function.patch
> and it can be found in the queue-4.14 subdirectory.
>
> If you, or anyone else, feels it should not be added to the stable tree,
> please let <stable(a)vger.kernel.org> know about it.
>
>
> From bbbd2b51a2aa0d76b3676271e216cf3647773397 Mon Sep 17 00:00:00 2001
> From: "mike.travis(a)hpe.com" <mike.travis(a)hpe.com>
> Date: Thu, 24 May 2018 15:17:13 -0500
> Subject: x86/platform/UV: Use new set memory block size function
>
> From: mike.travis(a)hpe.com <mike.travis(a)hpe.com>
>
> commit bbbd2b51a2aa0d76b3676271e216cf3647773397 upstream.
>
> Add a call to the new function to "adjust" the current fixed UV memory
> block size of 2GB so it can be changed to a different physical boundary.
> This accommodates changes in the Intel BIOS, and therefore UV BIOS,
> which now can align boundaries different than the previous UV standard
> of 2GB. It also flags any UV Global Address boundaries from BIOS that
> cause a change in the mem block size (boundary).
>
> The current boundary of 2GB has been used on UV since the first system
> release in 2009 with Linux 2.6 and has worked fine. But the new NVDIMM
> persistent memory modules (PMEM), along with the Intel BIOS changes to
> support these modules caused the memory block size boundary to be set
> to a lower limit. Intel only guarantees that this minimum boundary at
> 64MB though the current Linux limit is 128MB.
>
> Note that the default remains 2GB if no changes occur.
>
> Signed-off-by: Mike Travis <mike.travis(a)hpe.com>
> Reviewed-by: Andrew Banman <andrew.banman(a)hpe.com>
> Cc: Andrew Morton <akpm(a)linux-foundation.org>
> Cc: Dimitri Sivanich <dimitri.sivanich(a)hpe.com>
> Cc: Linus Torvalds <torvalds(a)linux-foundation.org>
> Cc: Peter Zijlstra <peterz(a)infradead.org>
> Cc: Russ Anderson <russ.anderson(a)hpe.com>
> Cc: Thomas Gleixner <tglx(a)linutronix.de>
> Cc: dan.j.williams(a)intel.com
> Cc: jgross(a)suse.com
> Cc: kirill.shutemov(a)linux.intel.com
> Cc: mhocko(a)suse.com
> Cc: stable(a)vger.kernel.org
> Link: https://lkml.kernel.org/lkml/20180524201711.732785782@stormcage.americas.sg…
> Signed-off-by: Ingo Molnar <mingo(a)kernel.org>
> Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
>
> ---
> arch/x86/kernel/apic/x2apic_uv_x.c | 49 ++++++++++++++++++++++++++++++++++---
> 1 file changed, 46 insertions(+), 3 deletions(-)
>
> --- a/arch/x86/kernel/apic/x2apic_uv_x.c
> +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
> @@ -26,6 +26,7 @@
> #include <linux/delay.h>
> #include <linux/crash_dump.h>
> #include <linux/reboot.h>
> +#include <linux/memory.h>
>
> #include <asm/uv/uv_mmrs.h>
> #include <asm/uv/uv_hub.h>
> @@ -346,6 +347,40 @@ extern int uv_hub_info_version(void)
> }
> EXPORT_SYMBOL(uv_hub_info_version);
>
> +/* Default UV memory block size is 2GB */
> +static unsigned long mem_block_size = (2UL << 30);
> +
> +static __init int adj_blksize(u32 lgre)
> +{
> + unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
> + unsigned long size;
> +
> + for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
> + if (IS_ALIGNED(base, size))
> + break;
> +
> + if (size >= mem_block_size)
> + return 0;
> +
> + mem_block_size = size;
> + return 1;
> +}
> +
> +static __init void set_block_size(void)
> +{
> + unsigned int order = ffs(mem_block_size);
> +
> + if (order) {
> + /* adjust for ffs return of 1..64 */
> + set_memory_block_size_order(order - 1);
> + pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
> + } else {
> + /* bad or zero value, default to 1UL << 31 (2GB) */
> + pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
> + set_memory_block_size_order(31);
> + }
> +}
> +
> /* Build GAM range lookup table: */
> static __init void build_uv_gr_table(void)
> {
> @@ -1144,23 +1179,30 @@ static void __init decode_gam_rng_tbl(un
> << UV_GAM_RANGE_SHFT);
> int order = 0;
> char suffix[] = " KMGTPE";
> + int flag = ' ';
>
> while (size > 9999 && order < sizeof(suffix)) {
> size /= 1024;
> order++;
> }
>
> + /* adjust max block size to current range start */
> + if (gre->type == 1 || gre->type == 2)
> + if (adj_blksize(lgre))
> + flag = '*';
> +
> if (!index) {
> pr_info("UV: GAM Range Table...\n");
> - pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
> + pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
> }
> - pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
> + pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n",
> index++,
> (unsigned long)lgre << UV_GAM_RANGE_SHFT,
> (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
> - size, suffix[order],
> + flag, size, suffix[order],
> gre->type, gre->nasid, gre->sockid, gre->pnode);
>
> + /* update to next range start */
> lgre = gre->limit;
> if (sock_min > gre->sockid)
> sock_min = gre->sockid;
> @@ -1391,6 +1433,7 @@ static void __init uv_system_init_hub(vo
>
> build_socket_tables();
> build_uv_gr_table();
> + set_block_size();
> uv_init_hub_info(&hub_info);
> uv_possible_blades = num_possible_nodes();
> if (!_node_to_pnode)
>
>
> Patches currently in stable-queue which might be from mike.travis(a)hpe.com are
>
> queue-4.14/x86-platform-uv-add-kernel-parameter-to-set-memory-block-size.patch
> queue-4.14/x86-platform-uv-use-new-set-memory-block-size-function.patch
>
This is a note to let you know that I've just added the patch titled
serdev: fix memleak on module unload
to my tty git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
in the tty-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
>From bc6cf3669d22371f573ab0305b3abf13887c0786 Mon Sep 17 00:00:00 2001
From: Johan Hovold <johan(a)kernel.org>
Date: Wed, 13 Jun 2018 17:08:59 +0200
Subject: serdev: fix memleak on module unload
Make sure to free all resources associated with the ida on module
exit.
Fixes: cd6484e1830b ("serdev: Introduce new bus for serial attached devices")
Cc: stable <stable(a)vger.kernel.org> # 4.11
Signed-off-by: Johan Hovold <johan(a)kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/tty/serdev/core.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index df93b727e984..9e59f4788589 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
static void __exit serdev_exit(void)
{
bus_unregister(&serdev_bus_type);
+ ida_destroy(&ctrl_ida);
}
module_exit(serdev_exit);
--
2.18.0
This is a note to let you know that I've just added the patch titled
vt: prevent leaking uninitialized data to userspace via /dev/vcs*
to my tty git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
in the tty-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
>From 21eff69aaaa0e766ca0ce445b477698dc6a9f55a Mon Sep 17 00:00:00 2001
From: Alexander Potapenko <glider(a)google.com>
Date: Thu, 14 Jun 2018 12:23:09 +0200
Subject: vt: prevent leaking uninitialized data to userspace via /dev/vcs*
KMSAN reported an infoleak when reading from /dev/vcs*:
BUG: KMSAN: kernel-infoleak in vcs_read+0x18ba/0x1cc0
Call Trace:
...
kmsan_copy_to_user+0x7a/0x160 mm/kmsan/kmsan.c:1253
copy_to_user ./include/linux/uaccess.h:184
vcs_read+0x18ba/0x1cc0 drivers/tty/vt/vc_screen.c:352
__vfs_read+0x1b2/0x9d0 fs/read_write.c:416
vfs_read+0x36c/0x6b0 fs/read_write.c:452
...
Uninit was created at:
kmsan_save_stack_with_flags mm/kmsan/kmsan.c:279
kmsan_internal_poison_shadow+0xb8/0x1b0 mm/kmsan/kmsan.c:189
kmsan_kmalloc+0x94/0x100 mm/kmsan/kmsan.c:315
__kmalloc+0x13a/0x350 mm/slub.c:3818
kmalloc ./include/linux/slab.h:517
vc_allocate+0x438/0x800 drivers/tty/vt/vt.c:787
con_install+0x8c/0x640 drivers/tty/vt/vt.c:2880
tty_driver_install_tty drivers/tty/tty_io.c:1224
tty_init_dev+0x1b5/0x1020 drivers/tty/tty_io.c:1324
tty_open_by_driver drivers/tty/tty_io.c:1959
tty_open+0x17b4/0x2ed0 drivers/tty/tty_io.c:2007
chrdev_open+0xc25/0xd90 fs/char_dev.c:417
do_dentry_open+0xccc/0x1440 fs/open.c:794
vfs_open+0x1b6/0x2f0 fs/open.c:908
...
Bytes 0-79 of 240 are uninitialized
Consistently allocating |vc_screenbuf| with kzalloc() fixes the problem
Reported-by: syzbot+17a8efdf800000(a)syzkaller.appspotmail.com
Signed-off-by: Alexander Potapenko <glider(a)google.com>
Cc: stable <stable(a)vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/tty/vt/vt.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 1eb1a376a041..15eb6c829d39 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
if (!*vc->vc_uni_pagedir_loc)
con_set_default_unimap(vc);
- vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf)
goto err_free;
@@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_screen_size > (4 << 20))
return -EINVAL;
- newscreen = kmalloc(new_screen_size, GFP_USER);
+ newscreen = kzalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
--
2.18.0