The syscall number may have been changed by a tracer, so we should pass
the actual number in from the caller instead of pulling it from the
saved r7 value directly.
Cc: <stable(a)vger.kernel.org>
Cc: Dave Martin <Dave.Martin(a)arm.com>
Cc: Pi-Hsun Shih <pihsun(a)chromium.org>
Signed-off-by: Will Deacon <will.deacon(a)arm.com>
---
arch/arm64/kernel/sys_compat.c | 9 ++++-----
arch/arm64/kernel/syscall.c | 9 ++++-----
2 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 5972b7533fa0..54c29cd38ff9 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
/*
* Handle all unrecognised system calls.
*/
-long compat_arm_syscall(struct pt_regs *regs)
+long compat_arm_syscall(struct pt_regs *regs, int scno)
{
- unsigned int no = regs->regs[7];
void __user *addr;
- switch (no) {
+ switch (scno) {
/*
* Flush a region from virtual address 'r0' to virtual address 'r1'
* _exclusive_. There is no alignment requirement on either address;
@@ -107,7 +106,7 @@ long compat_arm_syscall(struct pt_regs *regs)
* way the calling program can gracefully determine whether
* a feature is supported.
*/
- if (no < __ARM_NR_compat_syscall_end)
+ if (scno < __ARM_NR_compat_syscall_end)
return -ENOSYS;
break;
}
@@ -116,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *regs)
(compat_thumb_mode(regs) ? 2 : 4);
arm64_notify_die("Oops - bad compat syscall(2)", regs,
- SIGILL, ILL_ILLTRP, addr, no);
+ SIGILL, ILL_ILLTRP, addr, scno);
return 0;
}
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 032d22312881..5610ac01c1ec 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -13,16 +13,15 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
-long compat_arm_syscall(struct pt_regs *regs);
-
+long compat_arm_syscall(struct pt_regs *regs, int scno);
long sys_ni_syscall(void);
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
+static long do_ni_syscall(struct pt_regs *regs, int scno)
{
#ifdef CONFIG_COMPAT
long ret;
if (is_compat_task()) {
- ret = compat_arm_syscall(regs);
+ ret = compat_arm_syscall(regs, scno);
if (ret != -ENOSYS)
return ret;
}
@@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
ret = __invoke_syscall(regs, syscall_fn);
} else {
- ret = do_ni_syscall(regs);
+ ret = do_ni_syscall(regs, scno);
}
regs->regs[0] = ret;
--
2.1.4
The CAN frame modification rules allow bitwise logical operations which can
be also applied to the can_dlc field. Ensure the manipulation result to
maintain the can_dlc boundaries so that the CAN drivers do not accidently
write arbitrary content beyond the data registers in the CAN controllers
I/O mem when processing can-gw manipulated outgoing frames. When passing these
frames to user space this issue did not have any effect to the kernel or any
leaked data as we always strictly copy sizeof(struct can_frame) bytes.
Reported-by: Muyu Yu <ieatmuttonchuan(a)gmail.com>
Reported-by: Marcus Meissner <meissner(a)suse.de>
Tested-by: Muyu Yu <ieatmuttonchuan(a)gmail.com>
Signed-off-by: Oliver Hartkopp <socketcan(a)hartkopp.net>
Cc: linux-stable <stable(a)vger.kernel.org> # >= v3.2
---
net/can/gw.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/net/can/gw.c b/net/can/gw.c
index faa3da88a127..9000d9b8a133 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -418,6 +418,10 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
/* check for checksum updates when the CAN frame has been modified */
if (modidx) {
+ /* ensure DLC boundaries after the different mods */
+ if (cf->can_dlc > 8)
+ cf->can_dlc = 8;
+
if (gwj->mod.csumfunc.crc8)
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
--
2.19.2
Editing for photos. Retouching for photos. Cutting out for photos,
We are a photo team of 20 image editors and we can edit your photos today
if you need help,
We mainly work on:
Clipping path - image cut out
Shadow creation
Image masking
Image retouching
Beauty model retouching
Please reply if interested.
Thanks,
Jessica
Editing for photos. Retouching for photos. Cutting out for photos,
We are a photo team of 20 image editors and we can edit your photos today
if you need help,
We mainly work on:
Clipping path - image cut out
Shadow creation
Image masking
Image retouching
Beauty model retouching
Please reply if interested.
Thanks,
Jessica
Editing for photos. Retouching for photos. Cutting out for photos,
We are a photo team of 20 image editors and we can edit your photos today
if you need help,
We mainly work on:
Clipping path - image cut out
Shadow creation
Image masking
Image retouching
Beauty model retouching
Please reply if interested.
Thanks,
Jessica
From: Stanley Chu <stanley.chu(a)mediatek.com>
The commit 356fd2663cff ("scsi: Set request queue runtime PM status
back to active on resume") fixed up the inconsistent RPM status between
request queue and device. However changing request queue RPM status
shall be done only on successful resume, otherwise status may be still
inconsistent as below,
Request queue: RPM_ACTIVE
Device: RPM_SUSPENDED
This ends up soft lockup because requests can be submitted to
underlying devices but those devices and their required resource
are not resumed.
For example,
After above inconsistent status happens, IO request can be submitted
to UFS device driver but required resource (like clock) is not resumed
yet thus lead to warning as below call stack,
WARN_ON(hba->clk_gating.state != CLKS_ON);
ufshcd_queuecommand
scsi_dispatch_cmd
scsi_request_fn
__blk_run_queue
cfq_insert_request
__elv_add_request
blk_flush_plug_list
blk_finish_plug
jbd2_journal_commit_transaction
kjournald2
We may see all behind IO requests hang because of no response from
storage host or device and then soft lockup happens in system. In the
end, system may crash in many ways.
Fixes: 356fd2663cff (scsi: Set request queue runtime PM status
back to active on resume)
Cc: stable(a)vger.kernel.org
Signed-off-by: Stanley Chu <stanley.chu(a)mediatek.com>
---
drivers/scsi/scsi_pm.c | 26 +++++++++++++++-----------
1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index a2b4179bfdf7..7639df91b110 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev,
if (err == 0) {
pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
+ err = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ /*
+ * Forcibly set runtime PM status of request queue to "active"
+ * to make sure we can again get requests from the queue
+ * (see also blk_pm_peek_request()).
+ *
+ * The resume hook will correct runtime PM status of the disk.
+ */
+ if (!err && scsi_is_sdev_device(dev)) {
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (sdev->request_queue->dev)
+ blk_set_runtime_active(sdev->request_queue);
+ }
}
return err;
@@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev,
else
fn = NULL;
- /*
- * Forcibly set runtime PM status of request queue to "active" to
- * make sure we can again get requests from the queue (see also
- * blk_pm_peek_request()).
- *
- * The resume hook will correct runtime PM status of the disk.
- */
- if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
- blk_set_runtime_active(to_scsi_device(dev)->request_queue);
-
if (fn) {
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
--
2.18.0
From: Eric Biggers <ebiggers(a)google.com>
The memcpy() in crypto_cfb_decrypt_inplace() uses walk->iv as both the
source and destination, which has undefined behavior. It is unneeded
because walk->iv is already used to hold the previous ciphertext block;
thus, walk->iv is already updated to its final value. So, remove it.
Also, note that in-place decryption is the only case where the previous
ciphertext block is not directly available. Therefore, as a related
cleanup I also updated crypto_cfb_encrypt_segment() to directly use the
previous ciphertext block rather than save it into walk->iv. This makes
it consistent with in-place encryption and out-of-place decryption; now
only in-place decryption is different, because it has to be.
Fixes: a7d85e06ed80 ("crypto: cfb - add support for Cipher FeedBack mode")
Cc: <stable(a)vger.kernel.org> # v4.17+
Cc: James Bottomley <James.Bottomley(a)HansenPartnership.com>
Signed-off-by: Eric Biggers <ebiggers(a)google.com>
---
crypto/cfb.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 183e8a9c33128..4abfe32ff8451 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -77,12 +77,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk,
do {
crypto_cfb_encrypt_one(tfm, iv, dst);
crypto_xor(dst, src, bsize);
- memcpy(iv, dst, bsize);
+ iv = dst;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
+ memcpy(walk->iv, iv, bsize);
+
return nbytes;
}
@@ -162,7 +164,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
const unsigned int bsize = crypto_cfb_bsize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE];
do {
@@ -172,8 +174,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
-
return nbytes;
}
--
2.20.1
From: Eric Biggers <ebiggers(a)google.com>
Like some other block cipher mode implementations, the CFB
implementation assumes that while walking through the scatterlist, a
partial block does not occur until the end. But the walk is incorrectly
being done with a blocksize of 1, as 'cra_blocksize' is set to 1 (since
CFB is a stream cipher) but no 'chunksize' is set. This bug causes
incorrect encryption/decryption for some scatterlist layouts.
Fix it by setting the 'chunksize'. Also extend the CFB test vectors to
cover this bug as well as cases where the message length is not a
multiple of the block size.
Fixes: a7d85e06ed80 ("crypto: cfb - add support for Cipher FeedBack mode")
Cc: <stable(a)vger.kernel.org> # v4.17+
Cc: James Bottomley <James.Bottomley(a)HansenPartnership.com>
Signed-off-by: Eric Biggers <ebiggers(a)google.com>
---
crypto/cfb.c | 6 ++++++
crypto/testmgr.h | 25 +++++++++++++++++++++++++
2 files changed, 31 insertions(+)
diff --git a/crypto/cfb.c b/crypto/cfb.c
index e81e456734985..183e8a9c33128 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -298,6 +298,12 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
+ /*
+ * To simplify the implementation, configure the skcipher walk to only
+ * give a partial block at the very end, never earlier.
+ */
+ inst->alg.chunksize = alg->cra_blocksize;
+
inst->alg.ivsize = alg->cra_blocksize;
inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index e8f47d7b92cdd..7f4dae7a57a1c 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -12870,6 +12870,31 @@ static const struct cipher_testvec aes_cfb_tv_template[] = {
"\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
"\x20\x31\x62\x3d\x55\xb1\xe4\x71",
.len = 64,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 31, 33 },
+ }, { /* > 16 bytes, not a multiple of 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8",
+ .len = 17,
+ }, { /* < 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
+ .len = 7,
},
};
--
2.20.1
From: Nicholas Kazlauskas <nicholas.kazlauskas(a)amd.com>
[Why]
If the "max bpc" isn't explicitly set in the atomic state then it
have a value of 0. This has the correct behavior of limiting a panel
to 8bpc in the case where the panel supports 8bpc. In the case of eDP
panels this isn't a true assumption - there are panels that can only
do 6bpc.
Banding occurs for these displays.
[How]
Initialize the max_bpc when the connector resets to 8bpc. Also carry
over the value when the state is duplicated.
Bugzilla: https://bugs.freedesktop.org/108825
Fixes: 307638884f72 ("drm/amd/display: Support amdgpu "max bpc" connector property")
Backport of commit 49f1c44b581b08e3289127ffe58bd208c3166701 upstream.
Fixes regression caused by the automatic patch select of these two patches:
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/dri…https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/dri…
Bug: https://bugs.freedesktop.org/show_bug.cgi?id=109122
Cc: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas(a)amd.com>
Reviewed-by: Alex Deucher <alexander.deucher(a)amd.com>
Signed-off-by: Alex Deucher <alexander.deucher(a)amd.com>
(cherry picked from commit 49f1c44b581b08e3289127ffe58bd208c3166701)
Cc: <stable(a)vger.kernel.org> # 4.19.x
---
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 299def84e69c..d792735f1365 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2894,6 +2894,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_enable = false;
state->underscan_hborder = 0;
state->underscan_vborder = 0;
+ state->max_bpc = 8;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -2911,6 +2912,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
if (new_state) {
__drm_atomic_helper_connector_duplicate_state(connector,
&new_state->base);
+ new_state->max_bpc = state->max_bpc;
return &new_state->base;
}
--
2.20.1