The patch below does not apply to the 4.9-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From daeb725e919c0d2d4b628aeaa1fa053125f888b2 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris(a)chris-wilson.co.uk>
Date: Thu, 5 Apr 2018 12:49:15 +0100
Subject: [PATCH] drm/i915/psr: Chase psr.enabled only under the psr.lock
Inside the psr work function, we want to wait for PSR to idle first and
wish to do so without blocking the normal modeset path, so we do so
without holding the PSR lock. However, we first have to find which pipe
PSR was enabled on, which requires chasing into the PSR struct and
requires locking to prevent intel_psr_disable() from concurrently
setting our pointer to NULL.
Fixes: 995d30477496 ("drm/i915: VLV/CHV PSR Software timer mode")
Signed-off-by: Chris Wilson <chris(a)chris-wilson.co.uk>
Cc: Durgadoss R <durgadoss.r(a)intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi(a)intel.com>
Cc: <stable(a)vger.kernel.org> # v4.0+
Reviewed-by: Jose Roberto de Souza <jose.souza(a)intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi(a)intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405114915.29609-1-chris@…
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 2d53f7398a6d..69a5b276f4d8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -775,53 +775,59 @@ void intel_psr_disable(struct intel_dp *intel_dp,
cancel_delayed_work_sync(&dev_priv->psr.work);
}
-static void intel_psr_work(struct work_struct *work)
+static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_dp *intel_dp;
+ i915_reg_t reg;
+ u32 mask;
+ int err;
+
+ intel_dp = dev_priv->psr.enabled;
+ if (!intel_dp)
+ return false;
- /* We have to make sure PSR is ready for re-enable
- * otherwise it keeps disabled until next full enable/disable cycle.
- * PSR might take some time to get fully disabled
- * and be ready for re-enable.
- */
if (HAS_DDI(dev_priv)) {
if (dev_priv->psr.psr2_enabled) {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR2_STATUS,
- EDP_PSR2_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
- return;
- }
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
}
} else {
- if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(pipe),
- VLV_EDP_PSR_IN_TRANS,
- 0,
- 1)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
+ struct drm_crtc *crtc =
+ dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ reg = VLV_PSRSTAT(pipe);
+ mask = VLV_EDP_PSR_IN_TRANS;
}
+
+ mutex_unlock(&dev_priv->psr.lock);
+
+ err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+ if (err)
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+
+ /* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&dev_priv->psr.lock);
- intel_dp = dev_priv->psr.enabled;
+ return err == 0 && dev_priv->psr.enabled;
+}
- if (!intel_dp)
+static void intel_psr_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ /*
+ * We have to make sure PSR is ready for re-enable
+ * otherwise it keeps disabled until next full enable/disable cycle.
+ * PSR might take some time to get fully disabled
+ * and be ready for re-enable.
+ */
+ if (!psr_wait_for_idle(dev_priv))
goto unlock;
/*
@@ -832,7 +838,7 @@ static void intel_psr_work(struct work_struct *work)
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
- intel_psr_activate(intel_dp);
+ intel_psr_activate(dev_priv->psr.enabled);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
The patch below does not apply to the 4.14-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From daeb725e919c0d2d4b628aeaa1fa053125f888b2 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris(a)chris-wilson.co.uk>
Date: Thu, 5 Apr 2018 12:49:15 +0100
Subject: [PATCH] drm/i915/psr: Chase psr.enabled only under the psr.lock
Inside the psr work function, we want to wait for PSR to idle first and
wish to do so without blocking the normal modeset path, so we do so
without holding the PSR lock. However, we first have to find which pipe
PSR was enabled on, which requires chasing into the PSR struct and
requires locking to prevent intel_psr_disable() from concurrently
setting our pointer to NULL.
Fixes: 995d30477496 ("drm/i915: VLV/CHV PSR Software timer mode")
Signed-off-by: Chris Wilson <chris(a)chris-wilson.co.uk>
Cc: Durgadoss R <durgadoss.r(a)intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi(a)intel.com>
Cc: <stable(a)vger.kernel.org> # v4.0+
Reviewed-by: Jose Roberto de Souza <jose.souza(a)intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi(a)intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405114915.29609-1-chris@…
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 2d53f7398a6d..69a5b276f4d8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -775,53 +775,59 @@ void intel_psr_disable(struct intel_dp *intel_dp,
cancel_delayed_work_sync(&dev_priv->psr.work);
}
-static void intel_psr_work(struct work_struct *work)
+static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_dp *intel_dp;
+ i915_reg_t reg;
+ u32 mask;
+ int err;
+
+ intel_dp = dev_priv->psr.enabled;
+ if (!intel_dp)
+ return false;
- /* We have to make sure PSR is ready for re-enable
- * otherwise it keeps disabled until next full enable/disable cycle.
- * PSR might take some time to get fully disabled
- * and be ready for re-enable.
- */
if (HAS_DDI(dev_priv)) {
if (dev_priv->psr.psr2_enabled) {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR2_STATUS,
- EDP_PSR2_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
- return;
- }
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
}
} else {
- if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(pipe),
- VLV_EDP_PSR_IN_TRANS,
- 0,
- 1)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
+ struct drm_crtc *crtc =
+ dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ reg = VLV_PSRSTAT(pipe);
+ mask = VLV_EDP_PSR_IN_TRANS;
}
+
+ mutex_unlock(&dev_priv->psr.lock);
+
+ err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+ if (err)
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+
+ /* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&dev_priv->psr.lock);
- intel_dp = dev_priv->psr.enabled;
+ return err == 0 && dev_priv->psr.enabled;
+}
- if (!intel_dp)
+static void intel_psr_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ /*
+ * We have to make sure PSR is ready for re-enable
+ * otherwise it keeps disabled until next full enable/disable cycle.
+ * PSR might take some time to get fully disabled
+ * and be ready for re-enable.
+ */
+ if (!psr_wait_for_idle(dev_priv))
goto unlock;
/*
@@ -832,7 +838,7 @@ static void intel_psr_work(struct work_struct *work)
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
- intel_psr_activate(intel_dp);
+ intel_psr_activate(dev_priv->psr.enabled);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
The patch below does not apply to the 4.17-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From daeb725e919c0d2d4b628aeaa1fa053125f888b2 Mon Sep 17 00:00:00 2001
From: Chris Wilson <chris(a)chris-wilson.co.uk>
Date: Thu, 5 Apr 2018 12:49:15 +0100
Subject: [PATCH] drm/i915/psr: Chase psr.enabled only under the psr.lock
Inside the psr work function, we want to wait for PSR to idle first and
wish to do so without blocking the normal modeset path, so we do so
without holding the PSR lock. However, we first have to find which pipe
PSR was enabled on, which requires chasing into the PSR struct and
requires locking to prevent intel_psr_disable() from concurrently
setting our pointer to NULL.
Fixes: 995d30477496 ("drm/i915: VLV/CHV PSR Software timer mode")
Signed-off-by: Chris Wilson <chris(a)chris-wilson.co.uk>
Cc: Durgadoss R <durgadoss.r(a)intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi(a)intel.com>
Cc: <stable(a)vger.kernel.org> # v4.0+
Reviewed-by: Jose Roberto de Souza <jose.souza(a)intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi(a)intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180405114915.29609-1-chris@…
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 2d53f7398a6d..69a5b276f4d8 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -775,53 +775,59 @@ void intel_psr_disable(struct intel_dp *intel_dp,
cancel_delayed_work_sync(&dev_priv->psr.work);
}
-static void intel_psr_work(struct work_struct *work)
+static bool psr_wait_for_idle(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct intel_dp *intel_dp;
+ i915_reg_t reg;
+ u32 mask;
+ int err;
+
+ intel_dp = dev_priv->psr.enabled;
+ if (!intel_dp)
+ return false;
- /* We have to make sure PSR is ready for re-enable
- * otherwise it keeps disabled until next full enable/disable cycle.
- * PSR might take some time to get fully disabled
- * and be ready for re-enable.
- */
if (HAS_DDI(dev_priv)) {
if (dev_priv->psr.psr2_enabled) {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR2_STATUS,
- EDP_PSR2_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
- return;
- }
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
}
} else {
- if (intel_wait_for_register(dev_priv,
- VLV_PSRSTAT(pipe),
- VLV_EDP_PSR_IN_TRANS,
- 0,
- 1)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
+ struct drm_crtc *crtc =
+ dp_to_dig_port(intel_dp)->base.base.crtc;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ reg = VLV_PSRSTAT(pipe);
+ mask = VLV_EDP_PSR_IN_TRANS;
}
+
+ mutex_unlock(&dev_priv->psr.lock);
+
+ err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+ if (err)
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+
+ /* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&dev_priv->psr.lock);
- intel_dp = dev_priv->psr.enabled;
+ return err == 0 && dev_priv->psr.enabled;
+}
- if (!intel_dp)
+static void intel_psr_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ /*
+ * We have to make sure PSR is ready for re-enable
+ * otherwise it keeps disabled until next full enable/disable cycle.
+ * PSR might take some time to get fully disabled
+ * and be ready for re-enable.
+ */
+ if (!psr_wait_for_idle(dev_priv))
goto unlock;
/*
@@ -832,7 +838,7 @@ static void intel_psr_work(struct work_struct *work)
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
- intel_psr_activate(intel_dp);
+ intel_psr_activate(dev_priv->psr.enabled);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
The patch below does not apply to the 4.17-stable tree.
If someone wants it applied there, or to any other stable or longterm
tree, then please email the backport, including the original git commit
id to <stable(a)vger.kernel.org>.
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
>From 642ad57058baaa2c105925a75c153bb486877513 Mon Sep 17 00:00:00 2001
From: Harry Wentland <harry.wentland(a)amd.com>
Date: Thu, 12 Apr 2018 10:51:51 -0400
Subject: [PATCH] Revert "drm/amd/display: fix dereferencing possible
ERR_PTR()"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This reverts commit cd2d6c92a8e39d7e50a5af9fcc67d07e6a89e91d.
Cc: Shirish S <shirish.s(a)amd.com>
Cc: Alex Deucher <alexander.deucher(a)amd.com>
Cc: stable(a)vger.kernel.org
Reviewed-by: Michel Dänzer <michel.daenzer(a)amd.com>
Signed-off-by: Harry Wentland <harry.wentland(a)amd.com>
Signed-off-by: Alex Deucher <alexander.deucher(a)amd.com>
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 265f0166f688..0c29f3b97398 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4941,9 +4941,6 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
return -EDEADLK;
crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
- if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
-
if (crtc->primary == plane && crtc_state->active) {
if (!plane_state->fb)
return -EINVAL;
ioremap() calls pud_free_pmd_page() / pmd_free_pte_page() when it creates
a pud / pmd map. The following preconditions are met at their entry.
- All pte entries for a target pud/pmd address range have been cleared.
- System-wide TLB purges have been peformed for a target pud/pmd address
range.
The preconditions assure that there is no stale TLB entry for the range.
Speculation may not cache TLB entries since it requires all levels of page
entries, including ptes, to have P & A-bits set for an associated address.
However, speculation may cache pud/pmd entries (paging-structure caches)
when they have P-bit set.
Add a system-wide TLB purge (INVLPG) to a single page after clearing
pud/pmd entry's P-bit.
SDM 4.10.4.1, Operation that Invalidate TLBs and Paging-Structure Caches,
states that:
INVLPG invalidates all paging-structure caches associated with the
current PCID regardless of the liner addresses to which they correspond.
Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces")
Signed-off-by: Toshi Kani <toshi.kani(a)hpe.com>
Cc: Andrew Morton <akpm(a)linux-foundation.org>
Cc: Michal Hocko <mhocko(a)suse.com>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Ingo Molnar <mingo(a)redhat.com>
Cc: "H. Peter Anvin" <hpa(a)zytor.com>
Cc: Joerg Roedel <joro(a)8bytes.org>
Cc: <stable(a)vger.kernel.org>
---
arch/x86/mm/pgtable.c | 36 ++++++++++++++++++++++++++++++------
1 file changed, 30 insertions(+), 6 deletions(-)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index fbd14e506758..e3deefb891da 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -725,24 +725,44 @@ int pmd_clear_huge(pmd_t *pmd)
* @pud: Pointer to a PUD.
* @addr: Virtual address associated with pud.
*
- * Context: The pud range has been unmaped and TLB purged.
+ * Context: The pud range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
+ *
+ * NOTE: Callers must allow a single page allocation.
*/
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
- pmd_t *pmd;
+ pmd_t *pmd, *pmd_sv;
+ pte_t *pte;
int i;
if (pud_none(*pud))
return 1;
pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
+ if (!pmd_sv)
+ return 0;
- for (i = 0; i < PTRS_PER_PMD; i++)
- if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE)))
- return 0;
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ pmd_sv[i] = pmd[i];
+ if (!pmd_none(pmd[i]))
+ pmd_clear(&pmd[i]);
+ }
pud_clear(pud);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd_sv[i])) {
+ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
+ free_page((unsigned long)pte);
+ }
+ }
+
+ free_page((unsigned long)pmd_sv);
free_page((unsigned long)pmd);
return 1;
@@ -753,7 +773,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
* @pmd: Pointer to a PMD.
* @addr: Virtual address associated with pmd.
*
- * Context: The pmd range has been unmaped and TLB purged.
+ * Context: The pmd range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
@@ -765,6 +785,10 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
free_page((unsigned long)pte);
return 1;
From: Jens Axboe <axboe(a)kernel.dk>
commit cd4a4ae4683dc2e09380118e205e057896dcda2b upstream
If we end up splitting a bio and the queue goes away between
the initial submission and the later split submission, then we
can block forever in blk_queue_enter() waiting for the reference
to drop to zero. This will never happen, since we already hold
a reference.
Mark a split bio as already having entered the queue, so we can
just use the live non-blocking queue enter variant.
Thanks to Tetsuo Handa for the analysis.
We're running fio tests and the tasks get stuck in a D state forever
when systemd-udevd tries to read the partition table. This patch solves
it. Please apply to 4.17 stable.
Reported-by: syzbot+c4f9cebf9d651f6e54de(a)syzkaller.appspotmail.com
Signed-off-by: Jens Axboe <axboe(a)kernel.dk>
Signed-off-by: Alexandru Moise <00moses.alexander00(a)gmail.com>
---
v2: Fixed "From:"
v3: Added "From: Jens Axboe" above commit sha
block/blk-core.c | 4 +++-
block/blk-merge.c | 10 ++++++++++
include/linux/blk_types.h | 2 ++
3 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index b559b9d4f1a2..47ab2d9d02d9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2392,7 +2392,9 @@ blk_qc_t generic_make_request(struct bio *bio)
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
- if (blk_queue_enter(q, flags) < 0) {
+ if (bio_flagged(bio, BIO_QUEUE_ENTERED))
+ blk_queue_enter_live(q);
+ else if (blk_queue_enter(q, flags) < 0) {
if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 782940c65d8a..481dc02668f9 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -210,6 +210,16 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
/* there isn't chance to merge the splitted bio */
split->bi_opf |= REQ_NOMERGE;
+ /*
+ * Since we're recursing into make_request here, ensure
+ * that we mark this bio as already having entered the queue.
+ * If not, and the queue is going away, we can get stuck
+ * forever on waiting for the queue reference to drop. But
+ * that will never happen, as we're already holding a
+ * reference to it.
+ */
+ bio_set_flag(*bio, BIO_QUEUE_ENTERED);
+
bio_chain(split, *bio);
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
generic_make_request(*bio);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 17b18b91ebac..1602bf4ab4cd 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -186,6 +186,8 @@ struct bio {
* throttling rules. Don't do it again. */
#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
* of this bio. */
+#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
+
/* See BVEC_POOL_OFFSET below before adding new flags */
/*
--
2.18.0
commit cd4a4ae4683dc2e09380118e205e057896dcda2b upstream.
If we end up splitting a bio and the queue goes away between
the initial submission and the later split submission, then we
can block forever in blk_queue_enter() waiting for the reference
to drop to zero. This will never happen, since we already hold
a reference.
Mark a split bio as already having entered the queue, so we can
just use the live non-blocking queue enter variant.
Thanks to Tetsuo Handa for the analysis.
We're running fio tests and the tasks get stuck in a D state forever
when systemd-udevd tries to read the partition table. This patch solves
it. Please apply to 4.17 stable.
Reported-by: syzbot+c4f9cebf9d651f6e54de(a)syzkaller.appspotmail.com
Signed-off-by: Jens Axboe <axboe(a)kernel.dk>
Signed-off-by: Alexandru Moise <00moses.alexander00(a)gmail.com>
---
v2: Changed "From:" The email sent as being from Jens instead of me,
sorry again Jens.
block/blk-core.c | 4 +++-
block/blk-merge.c | 10 ++++++++++
include/linux/blk_types.h | 2 ++
3 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/block/blk-core.c b/block/blk-core.c
index b559b9d4f1a2..47ab2d9d02d9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2392,7 +2392,9 @@ blk_qc_t generic_make_request(struct bio *bio)
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
- if (blk_queue_enter(q, flags) < 0) {
+ if (bio_flagged(bio, BIO_QUEUE_ENTERED))
+ blk_queue_enter_live(q);
+ else if (blk_queue_enter(q, flags) < 0) {
if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 782940c65d8a..481dc02668f9 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -210,6 +210,16 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
/* there isn't chance to merge the splitted bio */
split->bi_opf |= REQ_NOMERGE;
+ /*
+ * Since we're recursing into make_request here, ensure
+ * that we mark this bio as already having entered the queue.
+ * If not, and the queue is going away, we can get stuck
+ * forever on waiting for the queue reference to drop. But
+ * that will never happen, as we're already holding a
+ * reference to it.
+ */
+ bio_set_flag(*bio, BIO_QUEUE_ENTERED);
+
bio_chain(split, *bio);
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
generic_make_request(*bio);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 17b18b91ebac..1602bf4ab4cd 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -186,6 +186,8 @@ struct bio {
* throttling rules. Don't do it again. */
#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
* of this bio. */
+#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
+
/* See BVEC_POOL_OFFSET below before adding new flags */
/*
--
2.18.0
ubifs_log_start_commit() allocates a buffer with kmalloc(),
this buffer is used to build UBIFS CS and REF nodes, all structure
attributes get set, except for the padding field in the ubifs_ref_node.
That way we leak 28 bytes of kernel memory to the MTD.
Fix it by using kzalloc().
Cc: stable(a)vger.kernel.org
Fixes: 1e51764a3c2a ("UBIFS: add new flash file system")
Signed-off-by: Richard Weinberger <richard(a)nod.at>
---
fs/ubifs/log.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index 7cffa120a750..60d49c6dd470 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -369,7 +369,7 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
max_len = ALIGN(max_len, c->min_io_size);
- buf = cs = kmalloc(max_len, GFP_NOFS);
+ buf = cs = kzalloc(max_len, GFP_NOFS);
if (!buf)
return -ENOMEM;
--
2.18.0
Sehr geehrte Damen und Herren,
ich habe bemerkt, dass Sie Ihr Angebot zum größten Teil an Firmen richten und deswegen möchte ich Ihnen ein Produkt anbieten, welches zur Erhöhung der Anzahl Ihrer Kunden erheblich beitragen wird.
Die Datenbanken der Firmen sind in für Sie interessante und relevante Zielgruppen untergliedert.
Der neue Katalog enthält 187.764 schweizerische Firmen und stellt solche Daten zur Verfügung wie: Namen der Firma, Firmenanschrift, Kontaktdaten des Firmeninhabers oder des Managers, E-Mail-Adresse, Telefonummer,
Faxnummer, Branche usw.
http://www.topadressen-ch.net/?page=catalog
***
1. Schweiz 2018 ( 187 764 ) - 149 EUR ( bis zum 04.07.2018 )
***
Die Verwendungsmöglichkeiten der Datenbanken sind praktisch unbegrenzt und Sie können durch Verwendung der von uns entwickelten Programme des personalisierten Versendens von Angeboten u.ä. mittels E-mailing bzw.
Fax effektive und sichere Werbekampagnen damit durchführen.
Bitte informieren Sie sich über die weiteren Details einmal unverbindlich auf unseren Webseite:
http://www.topadressen-ch.net/?page=catalog
MfG
GL-Team
From: Michel Dänzer <michel.daenzer(a)amd.com>
Without this, there could not be enough slots, which could trigger the
BUG_ON in reservation_object_add_shared_fence.
v2:
* Jump to the error label instead of returning directly (Jerry Zhang)
v3:
* Reserve slots for command submission after VM updates (Christian König)
Cc: stable(a)vger.kernel.org
Bugzilla: https://bugs.freedesktop.org/106418
Reported-by: mikhail.v.gavrilov(a)gmail.com
Signed-off-by: Michel Dänzer <michel.daenzer(a)amd.com>
Signed-off-by: Junwei Zhang <Jerry.Zhang(a)amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7a625f3..1bc0281 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -928,6 +928,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
r = amdgpu_bo_vm_update_pte(p);
if (r)
return r;
+
+ r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
+ if (r)
+ return r;
}
return amdgpu_cs_sync_rings(p);
--
1.9.1
From: Cannon Matthews <cannonmatthews(a)google.com>
Subject: mm: hugetlb: yield when prepping struct pages
When booting with very large numbers of gigantic (i.e. 1G) pages, the
operations in the loop of gather_bootmem_prealloc, and specifically
prep_compound_gigantic_page, takes a very long time, and can cause a
softlockup if enough pages are requested at boot.
For example booting with 3844 1G pages requires prepping
(set_compound_head, init the count) over 1 billion 4K tail pages, which
takes considerable time.
Add a cond_resched() to the outer loop in gather_bootmem_prealloc() to
prevent this lockup.
Tested: Booted with softlockup_panic=1 hugepagesz=1G hugepages=3844 and no
softlockup is reported, and the hugepages are reported as successfully
setup.
Link: http://lkml.kernel.org/r/20180627214447.260804-1-cannonmatthews@google.com
Signed-off-by: Cannon Matthews <cannonmatthews(a)google.com>
Reviewed-by: Andrew Morton <akpm(a)linux-foundation.org>
Reviewed-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Acked-by: Michal Hocko <mhocko(a)suse.com>
Cc: Andres Lagar-Cavilla <andreslc(a)google.com>
Cc: Peter Feiner <pfeiner(a)google.com>
Cc: Greg Thelen <gthelen(a)google.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/hugetlb.c | 1 +
1 file changed, 1 insertion(+)
diff -puN mm/hugetlb.c~mm-hugetlb-yield-when-prepping-struct-pages mm/hugetlb.c
--- a/mm/hugetlb.c~mm-hugetlb-yield-when-prepping-struct-pages
+++ a/mm/hugetlb.c
@@ -2163,6 +2163,7 @@ static void __init gather_bootmem_preall
*/
if (hstate_is_gigantic(h))
adjust_managed_page_count(page, 1 << h->order);
+ cond_resched();
}
}
_
From: Janosch Frank <frankja(a)linux.ibm.com>
Subject: userfaultfd: hugetlbfs: fix userfaultfd_huge_must_wait() pte access
Use huge_ptep_get() to translate huge ptes to normal ptes so we can check
them with the huge_pte_* functions. Otherwise some architectures will
check the wrong values and will not wait for userspace to bring in the
memory.
Link: http://lkml.kernel.org/r/20180626132421.78084-1-frankja@linux.ibm.com
Fixes: 369cd2121be4 ("userfaultfd: hugetlbfs: userfaultfd_huge_must_wait for hugepmd ranges")
Signed-off-by: Janosch Frank <frankja(a)linux.ibm.com>
Reviewed-by: David Hildenbrand <david(a)redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Cc: Andrea Arcangeli <aarcange(a)redhat.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
fs/userfaultfd.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff -puN fs/userfaultfd.c~userfaultfd-hugetlbfs-fix-userfaultfd_huge_must_wait-pte-access fs/userfaultfd.c
--- a/fs/userfaultfd.c~userfaultfd-hugetlbfs-fix-userfaultfd_huge_must_wait-pte-access
+++ a/fs/userfaultfd.c
@@ -222,24 +222,26 @@ static inline bool userfaultfd_huge_must
unsigned long reason)
{
struct mm_struct *mm = ctx->mm;
- pte_t *pte;
+ pte_t *ptep, pte;
bool ret = true;
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
- pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
- if (!pte)
+ ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
+
+ if (!ptep)
goto out;
ret = false;
+ pte = huge_ptep_get(ptep);
/*
* Lockless access: we're in a wait_event so it's ok if it
* changes under us.
*/
- if (huge_pte_none(*pte))
+ if (huge_pte_none(pte))
ret = true;
- if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
+ if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
ret = true;
out:
return ret;
_
The patch titled
Subject: fs/proc/task_mmu.c: fix Locked field in /proc/pid/smaps*
has been added to the -mm tree. Its filename is
mm-fix-locked-field-in-proc-pid-smaps.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/mm-fix-locked-field-in-proc-pid-sm…
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/mm-fix-locked-field-in-proc-pid-sm…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Vlastimil Babka <vbabka(a)suse.cz>
Subject: fs/proc/task_mmu.c: fix Locked field in /proc/pid/smaps*
Thomas reports:
: While looking around in /proc on my v4.14.52 system I noticed that
: all processes got a lot of "Locked" memory in /proc/*/smaps. A lot
: more memory than a regular user can usually lock with mlock().
:
: commit 493b0e9d945fa9dfe96be93ae41b4ca4b6fdb317 (v4.14-rc1) seems
: to have changed the behavior of "Locked".
:
: Before that commit the code was like this. Notice the VM_LOCKED
: check.
:
: (vma->vm_flags & VM_LOCKED) ?
: (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
:
: After that commit Locked is now the same as Pss. This looks like a
: mistake.
:
: (unsigned long)(mss->pss >> (10 + PSS_SHIFT)));
Indeed, the commit has added mss->pss_locked with the correct value that
depends on VM_LOCKED, but forgot to actually use it. Fix it.
Link: http://lkml.kernel.org/r/ebf6c7fb-fec3-6a26-544f-710ed193c154@suse.cz
Fixes: 493b0e9d945f ("mm: add /proc/pid/smaps_rollup")
Signed-off-by: Vlastimil Babka <vbabka(a)suse.cz>
Reported-by: Thomas Lindroth <thomas.lindroth(a)gmail.com>
Cc: Alexey Dobriyan <adobriyan(a)gmail.com>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
fs/proc/task_mmu.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff -puN fs/proc/task_mmu.c~mm-fix-locked-field-in-proc-pid-smaps fs/proc/task_mmu.c
--- a/fs/proc/task_mmu.c~mm-fix-locked-field-in-proc-pid-smaps
+++ a/fs/proc/task_mmu.c
@@ -831,7 +831,8 @@ static int show_smap(struct seq_file *m,
SEQ_PUT_DEC(" kB\nSwap: ", mss->swap);
SEQ_PUT_DEC(" kB\nSwapPss: ",
mss->swap_pss >> PSS_SHIFT);
- SEQ_PUT_DEC(" kB\nLocked: ", mss->pss >> PSS_SHIFT);
+ SEQ_PUT_DEC(" kB\nLocked: ",
+ mss->pss_locked >> PSS_SHIFT);
seq_puts(m, " kB\n");
}
if (!rollup_mode) {
_
Patches currently in -mm which might be from vbabka(a)suse.cz are
mm-page_alloc-actually-ignore-mempolicies-for-high-priority-allocations.patch
mm-fix-locked-field-in-proc-pid-smaps.patch