This is a note to let you know that I've just added the patch titled
bpf: add bpf_patch_insn_single helper
to the 4.4-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
bpf-add-bpf_patch_insn_single-helper.patch
and it can be found in the queue-4.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From c237ee5eb33bf19fe0591c04ff8db19da7323a83 Mon Sep 17 00:00:00 2001
From: Daniel Borkmann <daniel(a)iogearbox.net>
Date: Fri, 13 May 2016 19:08:30 +0200
Subject: bpf: add bpf_patch_insn_single helper
From: Daniel Borkmann <daniel(a)iogearbox.net>
commit c237ee5eb33bf19fe0591c04ff8db19da7323a83 upstream.
Move the functionality to patch instructions out of the verifier
code and into the core as the new bpf_patch_insn_single() helper
will be needed later on for blinding as well. No changes in
functionality.
Signed-off-by: Daniel Borkmann <daniel(a)iogearbox.net>
Acked-by: Alexei Starovoitov <ast(a)kernel.org>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Jiri Slaby <jslaby(a)suse.cz>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 4ff0e647598f..c4aae496f376 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -495,6 +495,9 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
void bpf_int_jit_compile(struct bpf_prog *fp);
bool bpf_helper_changes_skb_data(void *func);
+struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+ const struct bpf_insn *patch, u32 len);
+
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 5313d09d4b62..49b5538a5301 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -136,6 +136,77 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp);
}
+static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
+{
+ return BPF_CLASS(insn->code) == BPF_JMP &&
+ /* Call and Exit are both special jumps with no
+ * target inside the BPF instruction image.
+ */
+ BPF_OP(insn->code) != BPF_CALL &&
+ BPF_OP(insn->code) != BPF_EXIT;
+}
+
+static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
+{
+ struct bpf_insn *insn = prog->insnsi;
+ u32 i, insn_cnt = prog->len;
+
+ for (i = 0; i < insn_cnt; i++, insn++) {
+ if (!bpf_is_jmp_and_has_target(insn))
+ continue;
+
+ /* Adjust offset of jmps if we cross boundaries. */
+ if (i < pos && i + insn->off + 1 > pos)
+ insn->off += delta;
+ else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
+ insn->off -= delta;
+ }
+}
+
+struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+ const struct bpf_insn *patch, u32 len)
+{
+ u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
+ struct bpf_prog *prog_adj;
+
+ /* Since our patchlet doesn't expand the image, we're done. */
+ if (insn_delta == 0) {
+ memcpy(prog->insnsi + off, patch, sizeof(*patch));
+ return prog;
+ }
+
+ insn_adj_cnt = prog->len + insn_delta;
+
+ /* Several new instructions need to be inserted. Make room
+ * for them. Likely, there's no need for a new allocation as
+ * last page could have large enough tailroom.
+ */
+ prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
+ GFP_USER);
+ if (!prog_adj)
+ return NULL;
+
+ prog_adj->len = insn_adj_cnt;
+
+ /* Patching happens in 3 steps:
+ *
+ * 1) Move over tail of insnsi from next instruction onwards,
+ * so we can patch the single target insn with one or more
+ * new ones (patching is always from 1 to n insns, n > 0).
+ * 2) Inject new instructions at the target location.
+ * 3) Adjust branch offsets if necessary.
+ */
+ insn_rest = insn_adj_cnt - off - len;
+
+ memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
+ sizeof(*patch) * insn_rest);
+ memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
+
+ bpf_adj_branches(prog_adj, off, insn_delta);
+
+ return prog_adj;
+}
+
#ifdef CONFIG_BPF_JIT
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 84bff68cf80e..a08d66215245 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2587,26 +2587,6 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
insn->src_reg = 0;
}
-static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
-{
- struct bpf_insn *insn = prog->insnsi;
- int insn_cnt = prog->len;
- int i;
-
- for (i = 0; i < insn_cnt; i++, insn++) {
- if (BPF_CLASS(insn->code) != BPF_JMP ||
- BPF_OP(insn->code) == BPF_CALL ||
- BPF_OP(insn->code) == BPF_EXIT)
- continue;
-
- /* adjust offset of jmps if necessary */
- if (i < pos && i + insn->off + 1 > pos)
- insn->off += delta;
- else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
- insn->off -= delta;
- }
-}
-
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
@@ -2616,14 +2596,15 @@ static int convert_ctx_accesses(struct verifier_env *env)
int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16];
struct bpf_prog *new_prog;
- u32 cnt;
- int i;
enum bpf_access_type type;
+ int i;
if (!env->prog->aux->ops->convert_ctx_access)
return 0;
for (i = 0; i < insn_cnt; i++, insn++) {
+ u32 insn_delta, cnt;
+
if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
type = BPF_READ;
else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
@@ -2645,34 +2626,18 @@ static int convert_ctx_accesses(struct verifier_env *env)
return -EINVAL;
}
- if (cnt == 1) {
- memcpy(insn, insn_buf, sizeof(*insn));
- continue;
- }
-
- /* several new insns need to be inserted. Make room for them */
- insn_cnt += cnt - 1;
- new_prog = bpf_prog_realloc(env->prog,
- bpf_prog_size(insn_cnt),
- GFP_USER);
+ new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
- new_prog->len = insn_cnt;
-
- memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
- sizeof(*insn) * (insn_cnt - i - cnt));
-
- /* copy substitute insns in place of load instruction */
- memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
-
- /* adjust branches in the whole program */
- adjust_branches(new_prog, i, cnt - 1);
+ insn_delta = cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = new_prog;
- insn = new_prog->insnsi + i + cnt - 1;
- i += cnt - 1;
+ insn = new_prog->insnsi + i + insn_delta;
+
+ insn_cnt += insn_delta;
+ i += insn_delta;
}
return 0;
Patches currently in stable-queue which might be from daniel(a)iogearbox.net are
queue-4.4/bpf-add-bpf_patch_insn_single-helper.patch
This is a note to let you know that I've just added the patch titled
x86/microcode/intel: Extend BDW late-loading with a revision check
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
x86-microcode-intel-extend-bdw-late-loading-with-a-revision-check.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From b94b7373317164402ff7728d10f7023127a02b60 Mon Sep 17 00:00:00 2001
From: Jia Zhang <qianyue.zj(a)alibaba-inc.com>
Date: Mon, 1 Jan 2018 10:04:47 +0800
Subject: x86/microcode/intel: Extend BDW late-loading with a revision check
From: Jia Zhang <qianyue.zj(a)alibaba-inc.com>
commit b94b7373317164402ff7728d10f7023127a02b60 upstream.
Instead of blacklisting all model 79 CPUs when attempting a late
microcode loading, limit that only to CPUs with microcode revisions <
0x0b000021 because only on those late loading may cause a system hang.
For such processors either:
a) a BIOS update which might contain a newer microcode revision
or
b) the early microcode loading method
should be considered.
Processors with revisions 0x0b000021 or higher will not experience such
hangs.
For more details, see erratum BDF90 in document #334165 (Intel Xeon
Processor E7-8800/4800 v4 Product Family Specification Update) from
September 2017.
[ bp: Heavily massage commit message and pr_* statements. ]
Fixes: 723f2828a98c ("x86/microcode/intel: Disable late loading on model 79")
Signed-off-by: Jia Zhang <qianyue.zj(a)alibaba-inc.com>
Signed-off-by: Borislav Petkov <bp(a)suse.de>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Acked-by: Tony Luck <tony.luck(a)intel.com>
Cc: x86-ml <x86(a)kernel.org>
Link: http://lkml.kernel.org/r/1514772287-92959-1-git-send-email-qianyue.zj@aliba…
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/kernel/cpu/microcode/intel.c | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -910,8 +910,17 @@ static bool is_blacklisted(unsigned int
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
- if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) {
- pr_err_once("late loading on model 79 is disabled.\n");
+ /*
+ * Late loading on model 79 with microcode revision less than 0x0b000021
+ * may result in a system hang. This behavior is documented in item
+ * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
+ */
+ if (c->x86 == 6 &&
+ c->x86_model == INTEL_FAM6_BROADWELL_X &&
+ c->x86_mask == 0x01 &&
+ c->microcode < 0x0b000021) {
+ pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+ pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
return true;
}
Patches currently in stable-queue which might be from qianyue.zj(a)alibaba-inc.com are
queue-4.14/x86-microcode-intel-extend-bdw-late-loading-with-a-revision-check.patch
This is a note to let you know that I've just added the patch titled
rbd: set max_segments to USHRT_MAX
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
rbd-set-max_segments-to-ushrt_max.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 21acdf45f4958135940f0b4767185cf911d4b010 Mon Sep 17 00:00:00 2001
From: Ilya Dryomov <idryomov(a)gmail.com>
Date: Thu, 21 Dec 2017 15:35:11 +0100
Subject: rbd: set max_segments to USHRT_MAX
From: Ilya Dryomov <idryomov(a)gmail.com>
commit 21acdf45f4958135940f0b4767185cf911d4b010 upstream.
Commit d3834fefcfe5 ("rbd: bump queue_max_segments") bumped
max_segments (unsigned short) to max_hw_sectors (unsigned int).
max_hw_sectors is set to the number of 512-byte sectors in an object
and overflows unsigned short for 32M (largest possible) objects, making
the block layer resort to handing us single segment (i.e. single page
or even smaller) bios in that case.
Fixes: d3834fefcfe5 ("rbd: bump queue_max_segments")
Signed-off-by: Ilya Dryomov <idryomov(a)gmail.com>
Reviewed-by: Alex Elder <elder(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/block/rbd.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4421,7 +4421,7 @@ static int rbd_init_disk(struct rbd_devi
segment_size = rbd_obj_bytes(&rbd_dev->header);
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
q->limits.max_sectors = queue_max_hw_sectors(q);
- blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
+ blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, segment_size);
blk_queue_io_min(q, segment_size);
blk_queue_io_opt(q, segment_size);
Patches currently in stable-queue which might be from idryomov(a)gmail.com are
queue-4.14/rbd-reacquire-lock-should-update-lock-owner-client-id.patch
queue-4.14/rbd-set-max_segments-to-ushrt_max.patch
This is a note to let you know that I've just added the patch titled
rbd: reacquire lock should update lock owner client id
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
rbd-reacquire-lock-should-update-lock-owner-client-id.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From edd8ca8015800b354453b891d38960f3a474b7e4 Mon Sep 17 00:00:00 2001
From: Florian Margaine <florian(a)platform.sh>
Date: Wed, 13 Dec 2017 16:43:59 +0100
Subject: rbd: reacquire lock should update lock owner client id
From: Florian Margaine <florian(a)platform.sh>
commit edd8ca8015800b354453b891d38960f3a474b7e4 upstream.
Otherwise, future operations on this RBD using exclusive-lock are
going to require the lock from a non-existent client id.
Fixes: 14bb211d324d ("rbd: support updating the lock cookie without releasing the lock")
Link: http://tracker.ceph.com/issues/19929
Signed-off-by: Florian Margaine <florian(a)platform.sh>
[idryomov(a)gmail.com: rbd_set_owner_cid() call, __rbd_lock() helper]
Signed-off-by: Ilya Dryomov <idryomov(a)gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/block/rbd.c | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3074,13 +3074,21 @@ static void format_lock_cookie(struct rb
mutex_unlock(&rbd_dev->watch_mutex);
}
+static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
+{
+ struct rbd_client_id cid = rbd_get_cid(rbd_dev);
+
+ strcpy(rbd_dev->lock_cookie, cookie);
+ rbd_set_owner_cid(rbd_dev, &cid);
+ queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+}
+
/*
* lock_rwsem must be held for write
*/
static int rbd_lock(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct rbd_client_id cid = rbd_get_cid(rbd_dev);
char cookie[32];
int ret;
@@ -3095,9 +3103,7 @@ static int rbd_lock(struct rbd_device *r
return ret;
rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
- strcpy(rbd_dev->lock_cookie, cookie);
- rbd_set_owner_cid(rbd_dev, &cid);
- queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
+ __rbd_lock(rbd_dev, cookie);
return 0;
}
@@ -3883,7 +3889,7 @@ static void rbd_reacquire_lock(struct rb
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->lock_dwork, 0);
} else {
- strcpy(rbd_dev->lock_cookie, cookie);
+ __rbd_lock(rbd_dev, cookie);
}
}
Patches currently in stable-queue which might be from florian(a)platform.sh are
queue-4.14/rbd-reacquire-lock-should-update-lock-owner-client-id.patch
This is a note to let you know that I've just added the patch titled
mmc: renesas_sdhi: Add MODULE_LICENSE
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
mmc-renesas_sdhi-add-module_license.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 967a6a07e95c58eb9c1581d22a1d9c2d1929843f Mon Sep 17 00:00:00 2001
From: Masaharu Hayakawa <masaharu.hayakawa.ry(a)renesas.com>
Date: Wed, 13 Dec 2017 11:33:00 +0900
Subject: mmc: renesas_sdhi: Add MODULE_LICENSE
From: Masaharu Hayakawa <masaharu.hayakawa.ry(a)renesas.com>
commit 967a6a07e95c58eb9c1581d22a1d9c2d1929843f upstream.
The following error occurs when loading renesas_sdhi_core.c module,
so add MODULE_LICENSE("GPL v2").
renesas_sdhi_core: module license 'unspecified' taints kernel.
Signed-off-by: Masaharu Hayakawa <masaharu.hayakawa.ry(a)renesas.com>
Fixes: 9d08428afb72 ("mmc: renesas-sdhi: make renesas_sdhi_sys_dmac main module file")
[Shimoda: Added Fixes tag and Cc to the stable ML]
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh(a)renesas.com>
Reviewed-by: Simon Horman <horms+renesas(a)verge.net.au>
Acked-by: Wolfram Sang <wsa+renesas(a)sang-engineering.com>
Signed-off-by: Ulf Hansson <ulf.hansson(a)linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/mmc/host/renesas_sdhi_core.c | 3 +++
1 file changed, 3 insertions(+)
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
@@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_
return 0;
}
EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
+
+MODULE_LICENSE("GPL v2");
Patches currently in stable-queue which might be from masaharu.hayakawa.ry(a)renesas.com are
queue-4.14/mmc-renesas_sdhi-add-module_license.patch
This is a note to let you know that I've just added the patch titled
KVM: x86: Add memory barrier on vmcs field lookup
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
kvm-x86-add-memory-barrier-on-vmcs-field-lookup.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 75f139aaf896d6fdeec2e468ddfa4b2fe469bf40 Mon Sep 17 00:00:00 2001
From: Andrew Honig <ahonig(a)google.com>
Date: Wed, 10 Jan 2018 10:12:03 -0800
Subject: KVM: x86: Add memory barrier on vmcs field lookup
From: Andrew Honig <ahonig(a)google.com>
commit 75f139aaf896d6fdeec2e468ddfa4b2fe469bf40 upstream.
This adds a memory barrier when performing a lookup into
the vmcs_field_to_offset_table. This is related to
CVE-2017-5753.
Signed-off-by: Andrew Honig <ahonig(a)google.com>
Reviewed-by: Jim Mattson <jmattson(a)google.com>
Signed-off-by: Paolo Bonzini <pbonzini(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/x86/kvm/vmx.c | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -888,8 +888,16 @@ static inline short vmcs_field_to_offset
{
BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
- if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
- vmcs_field_to_offset_table[field] == 0)
+ if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
+ return -ENOENT;
+
+ /*
+ * FIXME: Mitigation for CVE-2017-5753. To be replaced with a
+ * generic mechanism.
+ */
+ asm("lfence");
+
+ if (vmcs_field_to_offset_table[field] == 0)
return -ENOENT;
return vmcs_field_to_offset_table[field];
Patches currently in stable-queue which might be from ahonig(a)google.com are
queue-4.14/kvm-vmx-scrub-hardware-gprs-at-vm-exit.patch
queue-4.14/kvm-x86-add-memory-barrier-on-vmcs-field-lookup.patch
This is a note to let you know that I've just added the patch titled
KVM: PPC: Book3S PR: Fix WIMG handling under pHyp
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
kvm-ppc-book3s-pr-fix-wimg-handling-under-phyp.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 6c7d47c33ed323f14f2a3b8de925e831dbaa4e69 Mon Sep 17 00:00:00 2001
From: Alexey Kardashevskiy <aik(a)ozlabs.ru>
Date: Wed, 22 Nov 2017 14:42:21 +1100
Subject: KVM: PPC: Book3S PR: Fix WIMG handling under pHyp
From: Alexey Kardashevskiy <aik(a)ozlabs.ru>
commit 6c7d47c33ed323f14f2a3b8de925e831dbaa4e69 upstream.
Commit 96df226 ("KVM: PPC: Book3S PR: Preserve storage control bits")
added code to preserve WIMG bits but it missed 2 special cases:
- a magic page in kvmppc_mmu_book3s_64_xlate() and
- guest real mode in kvmppc_handle_pagefault().
For these ptes, WIMG was 0 and pHyp failed on these causing a guest to
stop in the very beginning at NIP=0x100 (due to bd9166ffe "KVM: PPC:
Book3S PR: Exit KVM on failed mapping").
According to LoPAPR v1.1 14.5.4.1.2 H_ENTER:
The hypervisor checks that the WIMG bits within the PTE are appropriate
for the physical page number else H_Parameter return. (For System Memory
pages WIMG=0010, or, 1110 if the SAO option is enabled, and for IO pages
WIMG=01**.)
This hence initializes WIMG to non-zero value HPTE_R_M (0x10), as expected
by pHyp.
[paulus(a)ozlabs.org - fix compile for 32-bit]
Fixes: 96df226 "KVM: PPC: Book3S PR: Preserve storage control bits"
Signed-off-by: Alexey Kardashevskiy <aik(a)ozlabs.ru>
Tested-by: Ruediger Oertel <ro(a)suse.de>
Reviewed-by: Greg Kurz <groug(a)kaod.org>
Tested-by: Greg Kurz <groug(a)kaod.org>
Signed-off-by: Paul Mackerras <paulus(a)ozlabs.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/powerpc/kvm/book3s_64_mmu.c | 1 +
arch/powerpc/kvm/book3s_pr.c | 2 ++
2 files changed, 3 insertions(+)
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(st
gpte->may_read = true;
gpte->may_write = true;
gpte->page_size = MMU_PAGE_4K;
+ gpte->wimg = HPTE_R_M;
return 0;
}
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm
#define MSR_USER32 MSR_USER
#define MSR_USER64 MSR_USER
#define HW_PAGE_SIZE PAGE_SIZE
+#define HPTE_R_M _PAGE_COHERENT
#endif
static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_r
pte.eaddr = eaddr;
pte.vpage = eaddr >> 12;
pte.page_size = MMU_PAGE_64K;
+ pte.wimg = HPTE_R_M;
}
switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
Patches currently in stable-queue which might be from aik(a)ozlabs.ru are
queue-4.14/kvm-ppc-book3s-pr-fix-wimg-handling-under-phyp.patch
This is a note to let you know that I've just added the patch titled
KVM: PPC: Book3S HV: Fix use after free in case of multiple resize requests
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
kvm-ppc-book3s-hv-fix-use-after-free-in-case-of-multiple-resize-requests.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 4ed11aeefda439c76ddae3ceebcfa4fad111f149 Mon Sep 17 00:00:00 2001
From: Serhii Popovych <spopovyc(a)redhat.com>
Date: Mon, 4 Dec 2017 09:36:42 -0500
Subject: KVM: PPC: Book3S HV: Fix use after free in case of multiple resize requests
From: Serhii Popovych <spopovyc(a)redhat.com>
commit 4ed11aeefda439c76ddae3ceebcfa4fad111f149 upstream.
When serving multiple resize requests following could happen:
CPU0 CPU1
---- ----
kvm_vm_ioctl_resize_hpt_prepare(1);
-> schedule_work()
/* system_rq might be busy: delay */
kvm_vm_ioctl_resize_hpt_prepare(2);
mutex_lock();
if (resize) {
...
release_hpt_resize();
}
... resize_hpt_prepare_work()
-> schedule_work() {
mutex_unlock() /* resize->kvm could be wrong */
struct kvm *kvm = resize->kvm;
mutex_lock(&kvm->lock); <<<< UAF
...
}
i.e. a second resize request with different order could be started by
kvm_vm_ioctl_resize_hpt_prepare(), causing the previous request to be
free()d when there's still an active worker thread which will try to
access it. This leads to a use after free in point marked with UAF on
the diagram above.
To prevent this from happening, instead of unconditionally releasing a
pre-existing resize structure from the prepare ioctl(), we check if
the existing structure has an in-progress worker. We do that by
checking if the resize->error == -EBUSY, which is safe because the
resize->error field is protected by the kvm->lock. If there is an
active worker, instead of releasing, we mark the structure as stale by
unlinking it from kvm_struct.
In the worker thread we check for a stale structure (with kvm->lock
held), and in that case abort, releasing the stale structure ourself.
We make the check both before and the actual allocation. Strictly,
only the check afterwards is needed, the check before is an
optimization: if the structure happens to become stale before the
worker thread is dispatched, rather than during the allocation, it
means we can avoid allocating then immediately freeing a potentially
substantial amount of memory.
This fixes following or similar host kernel crash message:
[ 635.277361] Unable to handle kernel paging request for data at address 0x00000000
[ 635.277438] Faulting instruction address: 0xc00000000052f568
[ 635.277446] Oops: Kernel access of bad area, sig: 11 [#1]
[ 635.277451] SMP NR_CPUS=2048 NUMA PowerNV
[ 635.277470] Modules linked in: xt_CHECKSUM iptable_mangle ipt_MASQUERADE
nf_nat_masquerade_ipv4 iptable_nat nf_nat_ipv4 nf_nat nf_conntrack_ipv4
nf_defrag_ipv4 xt_conntrack nf_conntrack ipt_REJECT nf_reject_ipv4 tun bridge stp llc
ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter nfsv3 nfs_acl nfs
lockd grace fscache kvm_hv kvm rpcrdma sunrpc ib_isert iscsi_target_mod ib_iser libiscsi
scsi_transport_iscsi ib_srpt target_core_mod ext4 ib_srp scsi_transport_srp
ib_ipoib mbcache jbd2 rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ocrdma(T)
ib_core ses enclosure scsi_transport_sas sg shpchp leds_powernv ibmpowernv i2c_opal
i2c_core powernv_rng ipmi_powernv ipmi_devintf ipmi_msghandler ip_tables xfs
libcrc32c sr_mod sd_mod cdrom lpfc nvme_fc(T) nvme_fabrics nvme_core ipr nvmet_fc(T)
tg3 nvmet libata be2net crc_t10dif crct10dif_generic scsi_transport_fc ptp scsi_tgt
pps_core crct10dif_common dm_mirror dm_region_hash dm_log dm_mod
[ 635.278687] CPU: 40 PID: 749 Comm: kworker/40:1 Tainted: G
------------ T 3.10.0.bz1510771+ #1
[ 635.278782] Workqueue: events resize_hpt_prepare_work [kvm_hv]
[ 635.278851] task: c0000007e6840000 ti: c0000007e9180000 task.ti: c0000007e9180000
[ 635.278919] NIP: c00000000052f568 LR: c0000000009ea310 CTR: c0000000009ea4f0
[ 635.278988] REGS: c0000007e91837f0 TRAP: 0300 Tainted: G
------------ T (3.10.0.bz1510771+)
[ 635.279077] MSR: 9000000100009033 <SF,HV,EE,ME,IR,DR,RI,LE> CR: 24002022 XER:
00000000
[ 635.279248] CFAR: c000000000009368 DAR: 0000000000000000 DSISR: 40000000 SOFTE: 1
GPR00: c0000000009ea310 c0000007e9183a70 c000000001250b00 c0000007e9183b10
GPR04: 0000000000000000 0000000000000000 c0000007e9183650 0000000000000000
GPR08: c0000007ffff7b80 00000000ffffffff 0000000080000028 d00000000d2529a0
GPR12: 0000000000002200 c000000007b56800 c000000000120028 c0000007f135bb40
GPR16: 0000000000000000 c000000005c1e018 c000000005c1e018 0000000000000000
GPR20: 0000000000000001 c0000000011bf778 0000000000000001 fffffffffffffef7
GPR24: 0000000000000000 c000000f1e262e50 0000000000000002 c0000007e9180000
GPR28: c000000f1e262e4c c000000f1e262e50 0000000000000000 c0000007e9183b10
[ 635.280149] NIP [c00000000052f568] __list_add+0x38/0x110
[ 635.280197] LR [c0000000009ea310] __mutex_lock_slowpath+0xe0/0x2c0
[ 635.280253] Call Trace:
[ 635.280277] [c0000007e9183af0] [c0000000009ea310] __mutex_lock_slowpath+0xe0/0x2c0
[ 635.280356] [c0000007e9183b70] [c0000000009ea554] mutex_lock+0x64/0x70
[ 635.280426] [c0000007e9183ba0] [d00000000d24da04]
resize_hpt_prepare_work+0xe4/0x1c0 [kvm_hv]
[ 635.280507] [c0000007e9183c40] [c000000000113c0c] process_one_work+0x1dc/0x680
[ 635.280587] [c0000007e9183ce0] [c000000000114250] worker_thread+0x1a0/0x520
[ 635.280655] [c0000007e9183d80] [c00000000012010c] kthread+0xec/0x100
[ 635.280724] [c0000007e9183e30] [c00000000000a4b8] ret_from_kernel_thread+0x5c/0xa4
[ 635.280814] Instruction dump:
[ 635.280880] 7c0802a6 fba1ffe8 fbc1fff0 7cbd2b78 fbe1fff8 7c9e2378 7c7f1b78
f8010010
[ 635.281099] f821ff81 e8a50008 7fa52040 40de00b8 <e8be0000> 7fbd2840 40de008c
7fbff040
[ 635.281324] ---[ end trace b628b73449719b9d ]---
Fixes: b5baa6877315 ("KVM: PPC: Book3S HV: KVM-HV HPT resizing implementation")
Signed-off-by: Serhii Popovych <spopovyc(a)redhat.com>
[dwg: Replaced BUG_ON()s with WARN_ONs() and reworded commit message
for clarity]
Signed-off-by: David Gibson <david(a)gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus(a)ozlabs.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/powerpc/kvm/book3s_64_mmu_hv.c | 54 ++++++++++++++++++++++++------------
1 file changed, 37 insertions(+), 17 deletions(-)
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1430,16 +1430,20 @@ static void resize_hpt_pivot(struct kvm_
static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
{
- BUG_ON(kvm->arch.resize_hpt != resize);
+ if (WARN_ON(!mutex_is_locked(&kvm->lock)))
+ return;
if (!resize)
return;
- if (resize->hpt.virt)
- kvmppc_free_hpt(&resize->hpt);
+ if (resize->error != -EBUSY) {
+ if (resize->hpt.virt)
+ kvmppc_free_hpt(&resize->hpt);
+ kfree(resize);
+ }
- kvm->arch.resize_hpt = NULL;
- kfree(resize);
+ if (kvm->arch.resize_hpt == resize)
+ kvm->arch.resize_hpt = NULL;
}
static void resize_hpt_prepare_work(struct work_struct *work)
@@ -1448,26 +1452,42 @@ static void resize_hpt_prepare_work(stru
struct kvm_resize_hpt,
work);
struct kvm *kvm = resize->kvm;
- int err;
+ int err = 0;
if (WARN_ON(resize->error != -EBUSY))
return;
- resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
- resize->order);
-
- err = resize_hpt_allocate(resize);
-
- /* We have strict assumption about -EBUSY
- * when preparing for HPT resize.
- */
- if (WARN_ON(err == -EBUSY))
- err = -EINPROGRESS;
-
mutex_lock(&kvm->lock);
+ /* Request is still current? */
+ if (kvm->arch.resize_hpt == resize) {
+ /* We may request large allocations here:
+ * do not sleep with kvm->lock held for a while.
+ */
+ mutex_unlock(&kvm->lock);
+
+ resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
+ resize->order);
+
+ err = resize_hpt_allocate(resize);
+
+ /* We have strict assumption about -EBUSY
+ * when preparing for HPT resize.
+ */
+ if (WARN_ON(err == -EBUSY))
+ err = -EINPROGRESS;
+
+ mutex_lock(&kvm->lock);
+ /* It is possible that kvm->arch.resize_hpt != resize
+ * after we grab kvm->lock again.
+ */
+ }
+
resize->error = err;
+ if (kvm->arch.resize_hpt != resize)
+ resize_hpt_release(kvm, resize);
+
mutex_unlock(&kvm->lock);
}
Patches currently in stable-queue which might be from spopovyc(a)redhat.com are
queue-4.14/kvm-ppc-book3s-hv-drop-prepare_done-from-struct-kvm_resize_hpt.patch
queue-4.14/kvm-ppc-book3s-hv-fix-use-after-free-in-case-of-multiple-resize-requests.patch
This is a note to let you know that I've just added the patch titled
KVM: PPC: Book3S HV: Drop prepare_done from struct kvm_resize_hpt
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
kvm-ppc-book3s-hv-drop-prepare_done-from-struct-kvm_resize_hpt.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From 3073774e638ef18d222465fe92bfc8fccb90d288 Mon Sep 17 00:00:00 2001
From: Serhii Popovych <spopovyc(a)redhat.com>
Date: Mon, 4 Dec 2017 09:36:41 -0500
Subject: KVM: PPC: Book3S HV: Drop prepare_done from struct kvm_resize_hpt
From: Serhii Popovych <spopovyc(a)redhat.com>
commit 3073774e638ef18d222465fe92bfc8fccb90d288 upstream.
Currently the kvm_resize_hpt structure has two fields relevant to the
state of an ongoing resize: 'prepare_done', which indicates whether
the worker thread has completed or not, and 'error' which indicates
whether it was successful or not.
Since the success/failure isn't known until completion, this is
confusingly redundant. This patch consolidates the information into
just the 'error' value: -EBUSY indicates the worked is still in
progress, other negative values indicate (completed) failure, 0
indicates successful completion.
As a bonus this reduces size of struct kvm_resize_hpt by
__alignof__(struct kvm_hpt_info) and saves few bytes of code.
While there correct comment in struct kvm_resize_hpt which references
a non-existent semaphore (leftover from an early draft).
Assert with WARN_ON() in case of HPT allocation thread work runs more
than once for resize request or resize_hpt_allocate() returns -EBUSY
that is treated specially.
Change comparison against zero to make checkpatch.pl happy.
Signed-off-by: Serhii Popovych <spopovyc(a)redhat.com>
[dwg: Changed BUG_ON()s to WARN_ON()s and altered commit message for
clarity]
Signed-off-by: David Gibson <david(a)gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus(a)ozlabs.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/powerpc/kvm/book3s_64_mmu_hv.c | 44 ++++++++++++++++++++++--------------
1 file changed, 27 insertions(+), 17 deletions(-)
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -65,11 +65,17 @@ struct kvm_resize_hpt {
u32 order;
/* These fields protected by kvm->lock */
+
+ /* Possible values and their usage:
+ * <0 an error occurred during allocation,
+ * -EBUSY allocation is in the progress,
+ * 0 allocation made successfuly.
+ */
int error;
- bool prepare_done;
- /* Private to the work thread, until prepare_done is true,
- * then protected by kvm->resize_hpt_sem */
+ /* Private to the work thread, until error != -EBUSY,
+ * then protected by kvm->lock.
+ */
struct kvm_hpt_info hpt;
};
@@ -1444,15 +1450,23 @@ static void resize_hpt_prepare_work(stru
struct kvm *kvm = resize->kvm;
int err;
+ if (WARN_ON(resize->error != -EBUSY))
+ return;
+
resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
resize->order);
err = resize_hpt_allocate(resize);
+ /* We have strict assumption about -EBUSY
+ * when preparing for HPT resize.
+ */
+ if (WARN_ON(err == -EBUSY))
+ err = -EINPROGRESS;
+
mutex_lock(&kvm->lock);
resize->error = err;
- resize->prepare_done = true;
mutex_unlock(&kvm->lock);
}
@@ -1477,14 +1491,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(str
if (resize) {
if (resize->order == shift) {
- /* Suitable resize in progress */
- if (resize->prepare_done) {
- ret = resize->error;
- if (ret != 0)
- resize_hpt_release(kvm, resize);
- } else {
+ /* Suitable resize in progress? */
+ ret = resize->error;
+ if (ret == -EBUSY)
ret = 100; /* estimated time in ms */
- }
+ else if (ret)
+ resize_hpt_release(kvm, resize);
goto out;
}
@@ -1504,6 +1516,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(str
ret = -ENOMEM;
goto out;
}
+
+ resize->error = -EBUSY;
resize->order = shift;
resize->kvm = kvm;
INIT_WORK(&resize->work, resize_hpt_prepare_work);
@@ -1558,16 +1572,12 @@ long kvm_vm_ioctl_resize_hpt_commit(stru
if (!resize || (resize->order != shift))
goto out;
- ret = -EBUSY;
- if (!resize->prepare_done)
- goto out;
-
ret = resize->error;
- if (ret != 0)
+ if (ret)
goto out;
ret = resize_hpt_rehash(resize);
- if (ret != 0)
+ if (ret)
goto out;
resize_hpt_pivot(resize);
Patches currently in stable-queue which might be from spopovyc(a)redhat.com are
queue-4.14/kvm-ppc-book3s-hv-drop-prepare_done-from-struct-kvm_resize_hpt.patch
queue-4.14/kvm-ppc-book3s-hv-fix-use-after-free-in-case-of-multiple-resize-requests.patch
This is a note to let you know that I've just added the patch titled
KVM: PPC: Book3S HV: Always flush TLB in kvmppc_alloc_reset_hpt()
to the 4.14-stable tree which can be found at:
http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
The filename of the patch is:
kvm-ppc-book3s-hv-always-flush-tlb-in-kvmppc_alloc_reset_hpt.patch
and it can be found in the queue-4.14 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree,
please let <stable(a)vger.kernel.org> know about it.
>From ecba8297aafd50db6ae867e90844eead1611ef1c Mon Sep 17 00:00:00 2001
From: David Gibson <david(a)gibson.dropbear.id.au>
Date: Wed, 10 Jan 2018 17:04:39 +1100
Subject: KVM: PPC: Book3S HV: Always flush TLB in kvmppc_alloc_reset_hpt()
From: David Gibson <david(a)gibson.dropbear.id.au>
commit ecba8297aafd50db6ae867e90844eead1611ef1c upstream.
The KVM_PPC_ALLOCATE_HTAB ioctl(), implemented by kvmppc_alloc_reset_hpt()
is supposed to completely clear and reset a guest's Hashed Page Table (HPT)
allocating or re-allocating it if necessary.
In the case where an HPT of the right size already exists and it just
zeroes it, it forces a TLB flush on all guest CPUs, to remove any stale TLB
entries loaded from the old HPT.
However, that situation can arise when the HPT is resizing as well - or
even when switching from an RPT to HPT - so those cases need a TLB flush as
well.
So, move the TLB flush to trigger in all cases except for errors.
Fixes: f98a8bf9ee20 ("KVM: PPC: Book3S HV: Allow KVM_PPC_ALLOCATE_HTAB ioctl() to change HPT size")
Signed-off-by: David Gibson <david(a)gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus(a)ozlabs.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
arch/powerpc/kvm/book3s_64_mmu_hv.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -165,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *
* Reset all the reverse-mapping chains for all memslots
*/
kvmppc_rmap_reset(kvm);
- /* Ensure that each vcpu will flush its TLB on next entry. */
- cpumask_setall(&kvm->arch.need_tlb_flush);
err = 0;
goto out;
}
@@ -182,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *
kvmppc_set_hpt(kvm, &info);
out:
+ if (err == 0)
+ /* Ensure that each vcpu will flush its TLB on next entry. */
+ cpumask_setall(&kvm->arch.need_tlb_flush);
+
mutex_unlock(&kvm->lock);
return err;
}
Patches currently in stable-queue which might be from david(a)gibson.dropbear.id.au are
queue-4.14/kvm-ppc-book3s-hv-always-flush-tlb-in-kvmppc_alloc_reset_hpt.patch
queue-4.14/kvm-ppc-book3s-hv-drop-prepare_done-from-struct-kvm_resize_hpt.patch
queue-4.14/kvm-ppc-book3s-hv-fix-use-after-free-in-case-of-multiple-resize-requests.patch