Currently, ima_appraise_measurement() ignores the EVM status when
evm_verifyxattr() returns INTEGRITY_UNKNOWN. If a file has a valid
security.ima xattr with type IMA_XATTR_DIGEST or IMA_XATTR_DIGEST_NG,
ima_appraise_measurement() returns INTEGRITY_PASS regardless of the EVM
status. The problem is that the EVM status is overwritten with the
appraisal status.
This patch mitigates the issue by selecting signature verification as the
only method allowed for appraisal when EVM is not initialized. Since the
new behavior might break user space, it must be turned on by adding the
'-evm' suffix to the value of the ima_appraise= kernel option.
Fixes: 2fe5d6def1672 ("ima: integrity appraisal extension")
Signed-off-by: Roberto Sassu <roberto.sassu(a)huawei.com>
Cc: stable(a)vger.kernel.org
---
Documentation/admin-guide/kernel-parameters.txt | 3 ++-
security/integrity/ima/ima_appraise.c | 8 ++++++++
2 files changed, 10 insertions(+), 1 deletion(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 138f6664b2e2..d84a2e612b93 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1585,7 +1585,8 @@
Set number of hash buckets for inode cache.
ima_appraise= [IMA] appraise integrity measurements
- Format: { "off" | "enforce" | "fix" | "log" }
+ Format: { "off" | "enforce" | "fix" | "log" |
+ "enforce-evm" | "log-evm" }
default: "enforce"
ima_appraise_tcb [IMA] Deprecated. Use ima_policy= instead.
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 5fb7127bbe68..afef06e10fb9 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -18,6 +18,7 @@
#include "ima.h"
+static bool ima_appraise_req_evm __ro_after_init;
static int __init default_appraise_setup(char *str)
{
#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
@@ -28,6 +29,9 @@ static int __init default_appraise_setup(char *str)
else if (strncmp(str, "fix", 3) == 0)
ima_appraise = IMA_APPRAISE_FIX;
#endif
+ if (strcmp(str, "enforce-evm") == 0 ||
+ strcmp(str, "log-evm") == 0)
+ ima_appraise_req_evm = true;
return 1;
}
@@ -245,7 +249,11 @@ int ima_appraise_measurement(enum ima_hooks func,
switch (status) {
case INTEGRITY_PASS:
case INTEGRITY_PASS_IMMUTABLE:
+ break;
case INTEGRITY_UNKNOWN:
+ if (ima_appraise_req_evm &&
+ xattr_value->type != EVM_IMA_XATTR_DIGSIG)
+ goto out;
break;
case INTEGRITY_NOXATTRS: /* No EVM protected xattrs. */
case INTEGRITY_NOLABEL: /* No security.evm xattr. */
--
2.17.1
The patch titled
Subject: mm: mmu_gather: remove __tlb_reset_range() for force flush
has been added to the -mm tree. Its filename is
mm-mmu_gather-remove-__tlb_reset_range-for-force-flush.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/mm-mmu_gather-remove-__tlb_reset_r…
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/mm-mmu_gather-remove-__tlb_reset_r…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Yang Shi <yang.shi(a)linux.alibaba.com>
Subject: mm: mmu_gather: remove __tlb_reset_range() for force flush
A few new fields were added to mmu_gather to make TLB flush smarter for
huge page by telling what level of page table is changed.
__tlb_reset_range() is used to reset all these page table state to
unchanged, which is called by TLB flush for parallel mapping changes for
the same range under non-exclusive lock (i.e. read mmap_sem). Before
commit dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap"),
the syscalls (e.g. MADV_DONTNEED, MADV_FREE) which may update PTEs in
parallel don't remove page tables. But, the forementioned commit may do
munmap() under read mmap_sem and free page tables. This may result in
program hang on aarch64 reported by Jan Stancek. The problem could be
reproduced by his test program with slightly modified below.
---8<---
static int map_size = 4096;
static int num_iter = 500;
static long threads_total;
static void *distant_area;
void *map_write_unmap(void *ptr)
{
int *fd = ptr;
unsigned char *map_address;
int i, j = 0;
for (i = 0; i < num_iter; i++) {
map_address = mmap(distant_area, (size_t) map_size, PROT_WRITE | PROT_READ,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (map_address == MAP_FAILED) {
perror("mmap");
exit(1);
}
for (j = 0; j < map_size; j++)
map_address[j] = 'b';
if (munmap(map_address, map_size) == -1) {
perror("munmap");
exit(1);
}
}
return NULL;
}
void *dummy(void *ptr)
{
return NULL;
}
int main(void)
{
pthread_t thid[2];
/* hint for mmap in map_write_unmap() */
distant_area = mmap(0, DISTANT_MMAP_SIZE, PROT_WRITE | PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
munmap(distant_area, (size_t)DISTANT_MMAP_SIZE);
distant_area += DISTANT_MMAP_SIZE / 2;
while (1) {
pthread_create(&thid[0], NULL, map_write_unmap, NULL);
pthread_create(&thid[1], NULL, dummy, NULL);
pthread_join(thid[0], NULL);
pthread_join(thid[1], NULL);
}
}
---8<---
The program may bring in parallel execution like below:
t1 t2
munmap(map_address)
downgrade_write(&mm->mmap_sem);
unmap_region()
tlb_gather_mmu()
inc_tlb_flush_pending(tlb->mm);
free_pgtables()
tlb->freed_tables = 1
tlb->cleared_pmds = 1
pthread_exit()
madvise(thread_stack, 8M, MADV_DONTNEED)
zap_page_range()
tlb_gather_mmu()
inc_tlb_flush_pending(tlb->mm);
tlb_finish_mmu()
if (mm_tlb_flush_nested(tlb->mm))
__tlb_reset_range()
__tlb_reset_range() would reset freed_tables and cleared_* bits, but this
may cause inconsistency for munmap() which do free page tables. Then it
may result in some architectures, e.g. aarch64, may not flush TLB
completely as expected to have stale TLB entries remained.
Use fullmm flush since it yields much better performance on aarch64 and
non-fullmm doesn't yields significant difference on x86.
The original proposed fix came from Jan Stancek who mainly debugged this
issue, I just wrapped up everything together.
Link: http://lkml.kernel.org/r/1558322252-113575-1-git-send-email-yang.shi@linux.…
Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap")
Signed-off-by: Yang Shi <yang.shi(a)linux.alibaba.com>
Signed-off-by: Jan Stancek <jstancek(a)redhat.com>
Reported-by: Jan Stancek <jstancek(a)redhat.com>
Tested-by: Jan Stancek <jstancek(a)redhat.com>
Suggested-by: Will Deacon <will.deacon(a)arm.com>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Nick Piggin <npiggin(a)gmail.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar(a)linux.ibm.com>
Cc: Nadav Amit <namit(a)vmware.com>
Cc: Minchan Kim <minchan(a)kernel.org>
Cc: Mel Gorman <mgorman(a)suse.de>
Cc: <stable(a)vger.kernel.org> [4.20+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
mm/mmu_gather.c | 24 +++++++++++++++++++-----
1 file changed, 19 insertions(+), 5 deletions(-)
--- a/mm/mmu_gather.c~mm-mmu_gather-remove-__tlb_reset_range-for-force-flush
+++ a/mm/mmu_gather.c
@@ -245,14 +245,28 @@ void tlb_finish_mmu(struct mmu_gather *t
{
/*
* If there are parallel threads are doing PTE changes on same range
- * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
- * flush by batching, a thread has stable TLB entry can fail to flush
- * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
- * forcefully if we detect parallel PTE batching threads.
+ * under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB
+ * flush by batching, one thread may end up seeing inconsistent PTEs
+ * and result in having stale TLB entries. So flush TLB forcefully
+ * if we detect parallel PTE batching threads.
+ *
+ * However, some syscalls, e.g. munmap(), may free page tables, this
+ * needs force flush everything in the given range. Otherwise this
+ * may result in having stale TLB entries for some architectures,
+ * e.g. aarch64, that could specify flush what level TLB.
*/
if (mm_tlb_flush_nested(tlb->mm)) {
+ /*
+ * The aarch64 yields better performance with fullmm by
+ * avoiding multiple CPUs spamming TLBI messages at the
+ * same time.
+ *
+ * On x86 non-fullmm doesn't yield significant difference
+ * against fullmm.
+ */
+ tlb->fullmm = 1;
__tlb_reset_range(tlb);
- __tlb_adjust_range(tlb, start, end - start);
+ tlb->freed_tables = 1;
}
tlb_flush_mmu(tlb);
_
Patches currently in -mm which might be from yang.shi(a)linux.alibaba.com are
mm-mmu_gather-remove-__tlb_reset_range-for-force-flush.patch
Hi Sasha,
On Wed, May 29, 2019 at 07:52:27PM -0400, Sasha Levin wrote:
> brcmfmac: fix Oops when bringing up interface during USB disconnect
> If you, or anyone else, feels it should not be added to the stable tree,
> please let <stable(a)vger.kernel.org> know about it.
I see you taken the brcmfmac fixes to stable, if I recall correctly the
following commit was also pretty relevant and I don't think it was picked-up.
It's in the upstream:
commit 5cdb0ef6144f47440850553579aa923c20a63f23
Author: Piotr Figiel <p.figiel(a)camlintechnologies.com>
Date: Mon Mar 4 15:42:52 2019 +0000
brcmfmac: fix NULL pointer derefence during USB disconnect
Maybe you could consider also taking this.
Best regards,
--
Piotr Figiel
Changes since v7 [1]:
- Make subsection helpers pfn based rather than physical-address based
(Oscar and Pavel)
- Make subsection bitmap definition scalable for different section and
sub-section sizes across architectures. As a result:
unsigned long map_active
...is converted to:
DECLARE_BITMAP(subsection_map, SUBSECTIONS_PER_SECTION)
...and the helpers are renamed with a 'subsection' prefix. (Pavel)
- New in this version is a touch of arch/powerpc/include/asm/sparsemem.h
in "[PATCH v8 01/12] mm/sparsemem: Introduce struct mem_section_usage"
to define ARCH_SUBSECTION_SHIFT.
- Drop "mm/sparsemem: Introduce common definitions for the size and mask
of a section" in favor of Robin's "mm/memremap: Rename and consolidate
SECTION_SIZE" (Pavel)
- Collect some more Reviewed-by tags. Patches that still lack review
tags: 1, 3, 9 - 12
[1]: https://lore.kernel.org/lkml/155677652226.2336373.8700273400832001094.stgit…
---
[merge logistics]
Hi Andrew,
These are too late for v5.2, I'm posting this v8 during the merge window
to maintain the review momentum.
---
[cover letter]
The memory hotplug section is an arbitrary / convenient unit for memory
hotplug. 'Section-size' units have bled into the user interface
('memblock' sysfs) and can not be changed without breaking existing
userspace. The section-size constraint, while mostly benign for typical
memory hotplug, has and continues to wreak havoc with 'device-memory'
use cases, persistent memory (pmem) in particular. Recall that pmem uses
devm_memremap_pages(), and subsequently arch_add_memory(), to allocate a
'struct page' memmap for pmem. However, it does not use the 'bottom
half' of memory hotplug, i.e. never marks pmem pages online and never
exposes the userspace memblock interface for pmem. This leaves an
opening to redress the section-size constraint.
To date, the libnvdimm subsystem has attempted to inject padding to
satisfy the internal constraints of arch_add_memory(). Beyond
complicating the code, leading to bugs [2], wasting memory, and limiting
configuration flexibility, the padding hack is broken when the platform
changes this physical memory alignment of pmem from one boot to the
next. Device failure (intermittent or permanent) and physical
reconfiguration are events that can cause the platform firmware to
change the physical placement of pmem on a subsequent boot, and device
failure is an everyday event in a data-center.
It turns out that sections are only a hard requirement of the
user-facing interface for memory hotplug and with a bit more
infrastructure sub-section arch_add_memory() support can be added for
kernel internal usages like devm_memremap_pages(). Here is an analysis
of the current design assumptions in the current code and how they are
addressed in the new implementation:
Current design assumptions:
- Sections that describe boot memory (early sections) are never
unplugged / removed.
- pfn_valid(), in the CONFIG_SPARSEMEM_VMEMMAP=y, case devolves to a
valid_section() check
- __add_pages() and helper routines assume all operations occur in
PAGES_PER_SECTION units.
- The memblock sysfs interface only comprehends full sections
New design assumptions:
- Sections are instrumented with a sub-section bitmask to track (on x86)
individual 2MB sub-divisions of a 128MB section.
- Partially populated early sections can be extended with additional
sub-sections, and those sub-sections can be removed with
arch_remove_memory(). With this in place we no longer lose usable memory
capacity to padding.
- pfn_valid() is updated to look deeper than valid_section() to also check the
active-sub-section mask. This indication is in the same cacheline as
the valid_section() so the performance impact is expected to be
negligible. So far the lkp robot has not reported any regressions.
- Outside of the core vmemmap population routines which are replaced,
other helper routines like shrink_{zone,pgdat}_span() are updated to
handle the smaller granularity. Core memory hotplug routines that deal
with online memory are not touched.
- The existing memblock sysfs user api guarantees / assumptions are
not touched since this capability is limited to !online
!memblock-sysfs-accessible sections.
Meanwhile the issue reports continue to roll in from users that do not
understand when and how the 128MB constraint will bite them. The current
implementation relied on being able to support at least one misaligned
namespace, but that immediately falls over on any moderately complex
namespace creation attempt. Beyond the initial problem of 'System RAM'
colliding with pmem, and the unsolvable problem of physical alignment
changes, Linux is now being exposed to platforms that collide pmem
ranges with other pmem ranges by default [3]. In short,
devm_memremap_pages() has pushed the venerable section-size constraint
past the breaking point, and the simplicity of section-aligned
arch_add_memory() is no longer tenable.
These patches are exposed to the kbuild robot on my libnvdimm-pending
branch [4], and a preview of the unit test for this functionality is
available on the 'subsection-pending' branch of ndctl [5].
[2]: https://lore.kernel.org/r/155000671719.348031.2347363160141119237.stgit@dwi…
[3]: https://github.com/pmem/ndctl/issues/76
[4]: https://git.kernel.org/pub/scm/linux/kernel/git/djbw/nvdimm.git/log/?h=libn…
[5]: https://github.com/pmem/ndctl/commit/7c59b4867e1c
---
Dan Williams (11):
mm/sparsemem: Introduce struct mem_section_usage
mm/sparsemem: Add helpers track active portions of a section at boot
mm/hotplug: Prepare shrink_{zone,pgdat}_span for sub-section removal
mm/sparsemem: Convert kmalloc_section_memmap() to populate_section_memmap()
mm/hotplug: Kill is_dev_zone() usage in __remove_pages()
mm: Kill is_dev_zone() helper
mm/sparsemem: Prepare for sub-section ranges
mm/sparsemem: Support sub-section hotplug
mm/devm_memremap_pages: Enable sub-section remap
libnvdimm/pfn: Fix fsdax-mode namespace info-block zero-fields
libnvdimm/pfn: Stop padding pmem namespaces to section alignment
Robin Murphy (1):
mm/memremap: Rename and consolidate SECTION_SIZE
arch/powerpc/include/asm/sparsemem.h | 3
arch/x86/mm/init_64.c | 4
drivers/nvdimm/dax_devs.c | 2
drivers/nvdimm/pfn.h | 15 -
drivers/nvdimm/pfn_devs.c | 95 +++------
include/linux/memory_hotplug.h | 7 -
include/linux/mm.h | 4
include/linux/mmzone.h | 93 +++++++--
kernel/memremap.c | 63 ++----
mm/hmm.c | 2
mm/memory_hotplug.c | 172 +++++++++-------
mm/page_alloc.c | 8 -
mm/sparse-vmemmap.c | 21 +-
mm/sparse.c | 369 +++++++++++++++++++++++-----------
14 files changed, 511 insertions(+), 347 deletions(-)
Processing SDIO interrupts while dw_mmc is suspended (or partly
suspended) seems like a bad idea. We really don't want to be
processing them until we've gotten ourselves fully powered up.
You might be wondering how it's even possible to become suspended when
an SDIO interrupt is active. As can be seen in
dw_mci_enable_sdio_irq(), we explicitly keep dw_mmc out of runtime
suspend when the SDIO interrupt is enabled. ...but even though we
stop normal runtime suspend transitions when SDIO interrupts are
enabled, the dw_mci_runtime_suspend() can still get called for a full
system suspend.
Let's handle all this by explicitly masking SDIO interrupts in the
suspend call and unmasking them later in the resume call. To do this
cleanly I'll keep track of whether the client requested that SDIO
interrupts be enabled so that we can reliably restore them regardless
of whether we're masking them for one reason or another.
It should be noted that if dw_mci_enable_sdio_irq() is never called
(for instance, if we don't have an SDIO card plugged in) that
"client_sdio_enb" will always be false. In those cases this patch
adds a tiny bit of overhead to suspend/resume (a spinlock and a
read/write of INTMASK) but other than that is a no-op. The
SDMMC_INT_SDIO bit should always be clear and clearing it again won't
hurt.
Without this fix it can be seen that rk3288-veyron Chromebooks with
Marvell WiFi would sometimes fail to resume WiFi even after picking my
recent mwifiex patch [1]. Specifically you'd see messages like this:
mwifiex_sdio mmc1:0001:1: Firmware wakeup failed
mwifiex_sdio mmc1:0001:1: PREP_CMD: FW in reset state
...and tracing through the resume code in the failing cases showed
that we were processing a SDIO interrupt really early in the resume
call.
NOTE: downstream in Chrome OS 3.14 and 3.18 kernels (both of which
support the Marvell SDIO WiFi card) we had a patch ("CHROMIUM: sdio:
Defer SDIO interrupt handling until after resume") [2]. Presumably
this is the same problem that was solved by that patch.
[1] https://lkml.kernel.org/r/20190404040106.40519-1-dianders@chromium.org
[2] https://crrev.com/c/230765
Cc: <stable(a)vger.kernel.org> # 4.14.x
Signed-off-by: Douglas Anderson <dianders(a)chromium.org>
---
I didn't put any "Fixes" tag here, but presumably this could be
backported to whichever kernels folks found it useful for. I have at
least confirmed that kernels v4.14 and v4.19 (as well as v5.1-rc2)
show the problem. It is very easy to pick this to v4.19 and it
definitely fixes the problem there.
I haven't spent the time to pick this to 4.14 myself, but presumably
it wouldn't be too hard to backport this as far as v4.13 since that
contains commit 32dba73772f8 ("mmc: dw_mmc: Convert to use
MMC_CAP2_SDIO_IRQ_NOTHREAD for SDIO IRQs"). Prior to that it might
make sense for anyone experiencing this problem to just pick the old
CHROMIUM patch to fix them.
Changes in v2:
- Suggested 4.14+ in the stable tag (Sasha-bot)
- Extra note that this is a noop on non-SDIO (Shawn / Emil)
- Make boolean logic cleaner as per https://crrev.com/c/1586207/1
- Hopefully clear comments as per https://crrev.com/c/1586207/1
drivers/mmc/host/dw_mmc.c | 27 +++++++++++++++++++++++----
drivers/mmc/host/dw_mmc.h | 3 +++
2 files changed, 26 insertions(+), 4 deletions(-)
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 80dc2fd6576c..480067b87a94 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1664,7 +1664,8 @@ static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
}
}
-static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
+static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, bool enb,
+ bool client_requested)
{
struct dw_mci *host = slot->host;
unsigned long irqflags;
@@ -1672,6 +1673,20 @@ static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
spin_lock_irqsave(&host->irq_lock, irqflags);
+ /*
+ * If we're being called directly from dw_mci_enable_sdio_irq()
+ * (which means that the client driver actually wants to enable or
+ * disable interrupts) then save the request. Otherwise this
+ * wasn't directly requested by the client and we should logically
+ * AND it with the client request since we want to disable if
+ * _either_ the client disabled OR we have some other reason to
+ * disable temporarily.
+ */
+ if (client_requested)
+ host->client_sdio_enb = enb;
+ else
+ enb &= host->client_sdio_enb;
+
/* Enable/disable Slot Specific SDIO interrupt */
int_mask = mci_readl(host, INTMASK);
if (enb)
@@ -1688,7 +1703,7 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
- __dw_mci_enable_sdio_irq(slot, enb);
+ __dw_mci_enable_sdio_irq(slot, enb, true);
/* Avoid runtime suspending the device when SDIO IRQ is enabled */
if (enb)
@@ -1701,7 +1716,7 @@ static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
- __dw_mci_enable_sdio_irq(slot, 1);
+ __dw_mci_enable_sdio_irq(slot, true, false);
}
static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
@@ -2734,7 +2749,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
mci_writel(host, RINTSTS,
SDMMC_INT_SDIO(slot->sdio_id));
- __dw_mci_enable_sdio_irq(slot, 0);
+ __dw_mci_enable_sdio_irq(slot, false, false);
sdio_signal_irq(slot->mmc);
}
@@ -3424,6 +3439,8 @@ int dw_mci_runtime_suspend(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
+ __dw_mci_enable_sdio_irq(host->slot, false, false);
+
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
@@ -3490,6 +3507,8 @@ int dw_mci_runtime_resume(struct device *dev)
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd(host);
+ __dw_mci_enable_sdio_irq(host->slot, true, false);
+
return 0;
err:
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 46e9f8ec5398..dfbace0f5043 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -127,6 +127,7 @@ struct dw_mci_dma_slave {
* @cmd11_timer: Timer for SD3.0 voltage switch over scheme.
* @cto_timer: Timer for broken command transfer over scheme.
* @dto_timer: Timer for broken data transfer over scheme.
+ * @client_sdio_enb: The value last passed to enable_sdio_irq.
*
* Locking
* =======
@@ -234,6 +235,8 @@ struct dw_mci {
struct timer_list cmd11_timer;
struct timer_list cto_timer;
struct timer_list dto_timer;
+
+ bool client_sdio_enb;
};
/* DMA ops for Internal/External DMAC interface */
--
2.21.0.593.g511ec345e18-goog
I'm looking at CVE-2015-8553 which is fixed by:
commit 7681f31ec9cdacab4fd10570be924f2cef6669ba
Author: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
Date: Wed Feb 13 18:21:31 2019 -0500
xen/pciback: Don't disable PCI_COMMAND on PCI device reset.
I'm aware that this change is incompatible with qemu < 2.5, but that's
now quite old. Do you think it makes sense to apply this change to
some stable branches?
Ben.
--
Ben Hutchings, Software Developer Codethink Ltd
https://www.codethink.co.uk/ Dale House, 35 Dale Street
Manchester, M1 2HF, United Kingdom
Commit 0a1eb2d474ed ("fs/proc: Stop reporting eip and esp in
/proc/PID/stat") stopped reporting eip/esp and commit fd7d56270b52
("fs/proc: Report eip/esp in /prod/PID/stat for coredumping")
reintroduced the feature to fix a regression with userspace core dump
handlers (such as minicoredumper).
Because PF_DUMPCORE is only set for the primary thread, this didn't fix
the original problem for secondary threads. Allow reporting the eip/esp
for all threads by checking for PF_EXITING as well. This is set for all
the other threads when they are killed. coredump_wait() waits for all
the tasks to become inactive before proceeding to invoke a core dumper.
Fixes: fd7d56270b526ca3 ("fs/proc: Report eip/esp in /prod/PID/stat for coredumping")
Reported-by: Jan Luebbe <jlu(a)pengutronix.de>
Signed-off-by: John Ogness <john.ogness(a)linutronix.de>
---
This is a rework of Jan's v1 patch that allows accessing eip/esp of all
the threads without risk of the task still executing on a CPU.
The code chagnes are the same as v2. With v3 I included a "Fixes" tag,
fixed a typo in the commit message, and Cc'd stable.
fs/proc/array.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 2edbb657f859..55180501b915 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -462,7 +462,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
* a program is not able to use ptrace(2) in that case. It is
* safe because the task has stopped executing permanently.
*/
- if (permitted && (task->flags & PF_DUMPCORE)) {
+ if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
if (try_get_task_stack(task)) {
eip = KSTK_EIP(task);
esp = KSTK_ESP(task);
--
2.11.0
This is a note to let you know that I've just added the patch titled
xhci: Convert xhci_handshake() to use readl_poll_timeout_atomic()
to my usb git tree which can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
in the usb-linus branch.
The patch will show up in the next release of the linux-next tree
(usually sometime within the next 24 hours during the week.)
The patch will hopefully also be merged in Linus's tree for the
next -rc kernel release.
If you have any questions about this process, please let me know.
>From f7fac17ca925faa03fc5eb854c081a24075f8bad Mon Sep 17 00:00:00 2001
From: Andrey Smirnov <andrew.smirnov(a)gmail.com>
Date: Wed, 22 May 2019 14:34:01 +0300
Subject: xhci: Convert xhci_handshake() to use readl_poll_timeout_atomic()
Xhci_handshake() implements the algorithm already captured by
readl_poll_timeout_atomic(). Convert the former to use the latter to
avoid repetition.
Turned out this patch also fixes a bug on the AMD Stoneyridge platform
where usleep(1) sometimes takes over 10ms.
This means a 5 second timeout can easily take over 15 seconds which will
trigger the watchdog and reboot the system.
[Add info about patch fixing a bug to commit message -Mathias]
Signed-off-by: Andrey Smirnov <andrew.smirnov(a)gmail.com>
Tested-by: Raul E Rangel <rrangel(a)chromium.org>
Reviewed-by: Raul E Rangel <rrangel(a)chromium.org>
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Mathias Nyman <mathias.nyman(a)linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
---
drivers/usb/host/xhci.c | 22 ++++++++++------------
1 file changed, 10 insertions(+), 12 deletions(-)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 048a675bbc52..20db378a6012 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -9,6 +9,7 @@
*/
#include <linux/pci.h>
+#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/log2.h>
#include <linux/module.h>
@@ -52,7 +53,6 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
return false;
}
-/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* xhci_handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
@@ -69,18 +69,16 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
{
u32 result;
+ int ret;
- do {
- result = readl(ptr);
- if (result == ~(u32)0) /* card removed */
- return -ENODEV;
- result &= mask;
- if (result == done)
- return 0;
- udelay(1);
- usec--;
- } while (usec > 0);
- return -ETIMEDOUT;
+ ret = readl_poll_timeout_atomic(ptr, result,
+ (result & mask) == done ||
+ result == U32_MAX,
+ 1, usec);
+ if (result == U32_MAX) /* card removed */
+ return -ENODEV;
+
+ return ret;
}
/*
--
2.21.0
In the case of a normal sync update, the preparation of framebuffers (be
it calling drm_atomic_helper_prepare_planes() or doing setups with
drm_framebuffer_get()) are performed in the new_state and the respective
cleanups are performed in the old_state.
In the case of async updates, the preparation is also done in the
new_state but the cleanups are done in the new_state (because updates
are performed in place, i.e. in the current state).
The current code blocks async udpates when the fb is changed, turning
async updates into sync updates, slowing down cursor updates and
introducing regressions in igt tests with errors of type:
"CRITICAL: completed 97 cursor updated in a period of 30 flips, we
expect to complete approximately 15360 updates, with the threshold set
at 7680"
Fb changes in async updates were prevented to avoid the following scenario:
- Async update, oldfb = NULL, newfb = fb1, prepare fb1, cleanup fb1
- Async update, oldfb = fb1, newfb = fb2, prepare fb2, cleanup fb2
- Non-async commit, oldfb = fb2, newfb = fb1, prepare fb1, cleanup fb2 (wrong)
Where we have a single call to prepare fb2 but double cleanup call to fb2.
To solve the above problems, instead of blocking async fb changes, we
place the old framebuffer in the new_state object, so when the code
performs cleanups in the new_state it will cleanup the old_fb and we
will have the following scenario instead:
- Async update, oldfb = NULL, newfb = fb1, prepare fb1, no cleanup
- Async update, oldfb = fb1, newfb = fb2, prepare fb2, cleanup fb1
- Non-async commit, oldfb = fb2, newfb = fb1, prepare fb1, cleanup fb2
Where calls to prepare/cleanup are balanced.
Cc: <stable(a)vger.kernel.org> # v4.14+
Fixes: 25dc194b34dd ("drm: Block fb changes for async plane updates")
Suggested-by: Boris Brezillon <boris.brezillon(a)collabora.com>
Signed-off-by: Helen Koike <helen.koike(a)collabora.com>
Reviewed-by: Boris Brezillon <boris.brezillon(a)collabora.com>
Reviewed-by: Nicholas Kazlauskas <nicholas.kazlauskas(a)amd.com>
---
Hello,
I added a TODO in drm_atomic_helper_async_commit() regarding doing a
full state swap(), Boris and Nicholas, let me know if this is ok and if
I can keep your Reviewed-by tags)
As mentioned in the cover letter, I tested in almost all platforms with
igt plane_cursor_legacy and kms_cursor_legacy and I didn't see any
regressions. But I couldn't test on MSM and AMD because I don't have
the hardware I would appreciate if anyone could help me testing those.
Thanks!
Helen
Changes in v3:
- Add Reviewed-by tags
- Add TODO in drm_atomic_helper_async_commit()
Changes in v2:
- Change the order of the patch in the series, add this as the last one.
- Add documentation
- s/ballanced/balanced
drivers/gpu/drm/drm_atomic_helper.c | 22 ++++++++++++----------
include/drm/drm_modeset_helper_vtables.h | 5 +++++
2 files changed, 17 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2453678d1186..de5812c362b5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1608,15 +1608,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
- /*
- * FIXME: Since prepare_fb and cleanup_fb are always called on
- * the new_plane_state for async updates we need to block framebuffer
- * changes. This prevents use of a fb that's been cleaned up and
- * double cleanups from occuring.
- */
- if (old_plane_state->fb != new_plane_state->fb)
- return -EINVAL;
-
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
@@ -1647,6 +1638,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check);
* drm_atomic_async_check() succeeds. Async commits are not supposed to swap
* the states like normal sync commits, but just do in-place changes on the
* current state.
+ *
+ * TODO: Implement full swap instead of doing in-place changes.
*/
void drm_atomic_helper_async_commit(struct drm_device *dev,
struct drm_atomic_state *state)
@@ -1657,6 +1650,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
int i;
for_each_new_plane_in_state(state, plane, plane_state, i) {
+ struct drm_framebuffer *new_fb = plane_state->fb;
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
funcs = plane->helper_private;
funcs->atomic_async_update(plane, plane_state);
@@ -1665,11 +1661,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
* plane->state in-place, make sure at least common
* properties have been properly updated.
*/
- WARN_ON_ONCE(plane->state->fb != plane_state->fb);
+ WARN_ON_ONCE(plane->state->fb != new_fb);
WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
+
+ /*
+ * Make sure the FBs have been swapped so that cleanups in the
+ * new_state performs a cleanup in the old FB.
+ */
+ WARN_ON_ONCE(plane_state->fb != old_fb);
}
}
EXPORT_SYMBOL(drm_atomic_helper_async_commit);
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index cfb7be40bed7..ce582e8e8f2f 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1174,6 +1174,11 @@ struct drm_plane_helper_funcs {
* current one with the new plane configurations in the new
* plane_state.
*
+ * Drivers should also swap the framebuffers between plane state
+ * and new_state. This is required because prepare and cleanup calls
+ * are performed on the new_state object, then to cleanup the old
+ * framebuffer, it needs to be placed inside the new_state object.
+ *
* FIXME:
* - It only works for single plane updates
* - Async Pageflips are not supported yet
--
2.20.1