The patch titled
Subject: mm/khugepaged: fix crashes due to misaccounted holes
has been added to the -mm tree. Its filename is
mm-khugepaged-fix-crashes-due-to-misaccounted-holes.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/mm-khugepaged-fix-crashes-due-to-m…
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/mm-khugepaged-fix-crashes-due-to-m…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Hugh Dickins <hughd(a)google.com>
Subject: mm/khugepaged: fix crashes due to misaccounted holes
Huge tmpfs testing on a shortish file mapped into a pmd-rounded extent hit
shmem_evict_inode()'s WARN_ON(inode->i_blocks) followed by clear_inode()'s
BUG_ON(inode->i_data.nrpages) when the file was later closed and unlinked.
khugepaged's collapse_shmem() was forgetting to update mapping->nrpages on
the rollback path, after it had added but then needs to undo some holes.
There is indeed an irritating asymmetry between shmem_charge(), whose
callers want it to increment nrpages after successfully accounting blocks,
and shmem_uncharge(), when __delete_from_page_cache() already decremented
nrpages itself: oh well, just add a comment on that to them both.
And shmem_recalc_inode() is supposed to be called when the accounting is
expected to be in balance (so it can deduce from imbalance that reclaim
discarded some pages): so change shmem_charge() to update nrpages earlier
(though it's rare for the difference to matter at all).
Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261523450.2275@eggly.anvils
Fixes: 800d8c63b2e98 ("shmem: add huge pages support")
Fixes: f3f0e1d2150b2 ("khugepaged: add support of collapse for tmpfs/shmem pages")
Signed-off-by: Hugh Dickins <hughd(a)google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov(a)linux.intel.com>
Cc: Jerome Glisse <jglisse(a)redhat.com>
Cc: Konstantin Khlebnikov <khlebnikov(a)yandex-team.ru>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org> [4.8+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
--- a/mm/khugepaged.c~mm-khugepaged-fix-crashes-due-to-misaccounted-holes
+++ a/mm/khugepaged.c
@@ -1506,9 +1506,12 @@ xa_unlocked:
khugepaged_pages_collapsed++;
} else {
struct page *page;
+
/* Something went wrong: roll back page cache changes */
- shmem_uncharge(mapping->host, nr_none);
xas_lock_irq(&xas);
+ mapping->nrpages -= nr_none;
+ shmem_uncharge(mapping->host, nr_none);
+
xas_set(&xas, start);
xas_for_each(&xas, page, end - 1) {
page = list_first_entry_or_null(&pagelist,
--- a/mm/shmem.c~mm-khugepaged-fix-crashes-due-to-misaccounted-holes
+++ a/mm/shmem.c
@@ -297,12 +297,14 @@ bool shmem_charge(struct inode *inode, l
if (!shmem_inode_acct_block(inode, pages))
return false;
+ /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
+ inode->i_mapping->nrpages += pages;
+
spin_lock_irqsave(&info->lock, flags);
info->alloced += pages;
inode->i_blocks += pages * BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
spin_unlock_irqrestore(&info->lock, flags);
- inode->i_mapping->nrpages += pages;
return true;
}
@@ -312,6 +314,8 @@ void shmem_uncharge(struct inode *inode,
struct shmem_inode_info *info = SHMEM_I(inode);
unsigned long flags;
+ /* nrpages adjustment done by __delete_from_page_cache() or caller */
+
spin_lock_irqsave(&info->lock, flags);
info->alloced -= pages;
inode->i_blocks -= pages * BLOCKS_PER_PAGE;
_
Patches currently in -mm which might be from hughd(a)google.com are
mm-huge_memory-rename-freeze_page-to-unmap_page.patch
mm-huge_memory-splitting-set-mappingindex-before-unfreeze.patch
mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch
mm-khugepaged-collapse_shmem-stop-if-punched-or-truncated.patch
mm-khugepaged-fix-crashes-due-to-misaccounted-holes.patch
mm-khugepaged-collapse_shmem-remember-to-clear-holes.patch
mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
mm-khugepaged-collapse_shmem-without-freezing-new_page.patch
mm-khugepaged-collapse_shmem-do-not-crash-on-compound.patch
mm-khugepaged-fix-the-xas_create_range-error-path.patch
mm-put_and_wait_on_page_locked-while-page-is-migrated.patch
The patch titled
Subject: mm/khugepaged: collapse_shmem() stop if punched or truncated
has been added to the -mm tree. Its filename is
mm-khugepaged-collapse_shmem-stop-if-punched-or-truncated.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/mm-khugepaged-collapse_shmem-stop-…
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/mm-khugepaged-collapse_shmem-stop-…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Hugh Dickins <hughd(a)google.com>
Subject: mm/khugepaged: collapse_shmem() stop if punched or truncated
Huge tmpfs testing showed that although collapse_shmem() recognizes a
concurrently truncated or hole-punched page correctly, its handling of
holes was liable to refill an emptied extent. Add check to stop that.
Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261522040.2275@eggly.anvils
Fixes: f3f0e1d2150b2 ("khugepaged: add support of collapse for tmpfs/shmem pages")
Signed-off-by: Hugh Dickins <hughd(a)google.com>
Reviewed-by: Matthew Wilcox <willy(a)infradead.org>
Cc: Kirill A. Shutemov <kirill.shutemov(a)linux.intel.com>
Cc: Jerome Glisse <jglisse(a)redhat.com>
Cc: Konstantin Khlebnikov <khlebnikov(a)yandex-team.ru>
Cc: <stable(a)vger.kernel.org> [4.8+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
--- a/mm/khugepaged.c~mm-khugepaged-collapse_shmem-stop-if-punched-or-truncated
+++ a/mm/khugepaged.c
@@ -1359,6 +1359,17 @@ static void collapse_shmem(struct mm_str
VM_BUG_ON(index != xas.xa_index);
if (!page) {
+ /*
+ * Stop if extent has been truncated or hole-punched,
+ * and is now completely empty.
+ */
+ if (index == start) {
+ if (!xas_next_entry(&xas, end - 1)) {
+ result = SCAN_TRUNCATED;
+ break;
+ }
+ xas_set(&xas, index);
+ }
if (!shmem_charge(mapping->host, 1)) {
result = SCAN_FAIL;
break;
_
Patches currently in -mm which might be from hughd(a)google.com are
mm-huge_memory-rename-freeze_page-to-unmap_page.patch
mm-huge_memory-splitting-set-mappingindex-before-unfreeze.patch
mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch
mm-khugepaged-collapse_shmem-stop-if-punched-or-truncated.patch
mm-khugepaged-fix-crashes-due-to-misaccounted-holes.patch
mm-khugepaged-collapse_shmem-remember-to-clear-holes.patch
mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
mm-khugepaged-collapse_shmem-without-freezing-new_page.patch
mm-khugepaged-collapse_shmem-do-not-crash-on-compound.patch
mm-khugepaged-fix-the-xas_create_range-error-path.patch
mm-put_and_wait_on_page_locked-while-page-is-migrated.patch
The patch titled
Subject: mm/huge_memory: fix lockdep complaint on 32-bit i_size_read()
has been added to the -mm tree. Its filename is
mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/mm-huge_memory-fix-lockdep-complai…
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/mm-huge_memory-fix-lockdep-complai…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Hugh Dickins <hughd(a)google.com>
Subject: mm/huge_memory: fix lockdep complaint on 32-bit i_size_read()
Huge tmpfs testing, on 32-bit kernel with lockdep enabled, showed that
__split_huge_page() was using i_size_read() while holding the irq-safe
lru_lock and page tree lock, but the 32-bit i_size_read() uses an
irq-unsafe seqlock which should not be nested inside them.
Instead, read the i_size earlier in split_huge_page_to_list(), and pass
the end offset down to __split_huge_page(): all while holding head page
lock, which is enough to prevent truncation of that extent before the page
tree lock has been taken.
Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261520070.2275@eggly.anvils
Fixes: baa355fd33142 ("thp: file pages support for split_huge_page()")
Signed-off-by: Hugh Dickins <hughd(a)google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov(a)linux.intel.com>
Cc: Jerome Glisse <jglisse(a)redhat.com>
Cc: Konstantin Khlebnikov <khlebnikov(a)yandex-team.ru>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org> [4.8+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
--- a/mm/huge_memory.c~mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read
+++ a/mm/huge_memory.c
@@ -2439,12 +2439,11 @@ static void __split_huge_page_tail(struc
}
static void __split_huge_page(struct page *page, struct list_head *list,
- unsigned long flags)
+ pgoff_t end, unsigned long flags)
{
struct page *head = compound_head(page);
struct zone *zone = page_zone(head);
struct lruvec *lruvec;
- pgoff_t end = -1;
int i;
lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
@@ -2452,9 +2451,6 @@ static void __split_huge_page(struct pag
/* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(head);
- if (!PageAnon(page))
- end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
-
for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
__split_huge_page_tail(head, i, lruvec, list);
/* Some pages can be beyond i_size: drop them from page cache */
@@ -2626,6 +2622,7 @@ int split_huge_page_to_list(struct page
int count, mapcount, extra_pins, ret;
bool mlocked;
unsigned long flags;
+ pgoff_t end;
VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -2648,6 +2645,7 @@ int split_huge_page_to_list(struct page
ret = -EBUSY;
goto out;
}
+ end = -1;
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
@@ -2661,6 +2659,15 @@ int split_huge_page_to_list(struct page
anon_vma = NULL;
i_mmap_lock_read(mapping);
+
+ /*
+ *__split_huge_page() may need to trim off pages beyond EOF:
+ * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
+ * which cannot be nested inside the page tree lock. So note
+ * end now: i_size itself may be changed at any moment, but
+ * head page lock is good enough to serialize the trimming.
+ */
+ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
}
/*
@@ -2707,7 +2714,7 @@ int split_huge_page_to_list(struct page
if (mapping)
__dec_node_page_state(page, NR_SHMEM_THPS);
spin_unlock(&pgdata->split_queue_lock);
- __split_huge_page(page, list, flags);
+ __split_huge_page(page, list, end, flags);
if (PageSwapCache(head)) {
swp_entry_t entry = { .val = page_private(head) };
_
Patches currently in -mm which might be from hughd(a)google.com are
mm-huge_memory-rename-freeze_page-to-unmap_page.patch
mm-huge_memory-splitting-set-mappingindex-before-unfreeze.patch
mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch
mm-khugepaged-collapse_shmem-stop-if-punched-or-truncated.patch
mm-khugepaged-fix-crashes-due-to-misaccounted-holes.patch
mm-khugepaged-collapse_shmem-remember-to-clear-holes.patch
mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
mm-khugepaged-collapse_shmem-without-freezing-new_page.patch
mm-khugepaged-collapse_shmem-do-not-crash-on-compound.patch
mm-khugepaged-fix-the-xas_create_range-error-path.patch
mm-put_and_wait_on_page_locked-while-page-is-migrated.patch
The patch titled
Subject: mm/huge_memory: splitting set mapping+index before unfreeze
has been added to the -mm tree. Its filename is
mm-huge_memory-splitting-set-mappingindex-before-unfreeze.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/mm-huge_memory-splitting-set-mappi…
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/mm-huge_memory-splitting-set-mappi…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Hugh Dickins <hughd(a)google.com>
Subject: mm/huge_memory: splitting set mapping+index before unfreeze
Huge tmpfs stress testing has occasionally hit shmem_undo_range()'s
VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page).
Move the setting of mapping and index up before the page_ref_unfreeze() in
__split_huge_page_tail() to fix this: so that a page cache lookup cannot
get a reference while the tail's mapping and index are unstable.
In fact, might as well move them up before the smp_wmb(): I don't see an
actual need for that, but if I'm missing something, this way round is
safer than the other, and no less efficient.
You might argue that VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page) is
misplaced, and should be left until after the trylock_page(); but left as
is has not crashed since, and gives more stringent assurance.
Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261516380.2275@eggly.anvils
Fixes: e9b61f19858a5 ("thp: reintroduce split_huge_page()")
Requires: 605ca5ede764 ("mm/huge_memory.c: reorder operations in __split_huge_page_tail()")
Signed-off-by: Hugh Dickins <hughd(a)google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov(a)linux.intel.com>
Cc: Konstantin Khlebnikov <khlebnikov(a)yandex-team.ru>
Cc: Jerome Glisse <jglisse(a)redhat.com>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org> [4.8+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
--- a/mm/huge_memory.c~mm-huge_memory-splitting-set-mappingindex-before-unfreeze
+++ a/mm/huge_memory.c
@@ -2402,6 +2402,12 @@ static void __split_huge_page_tail(struc
(1L << PG_unevictable) |
(1L << PG_dirty)));
+ /* ->mapping in first tail page is compound_mapcount */
+ VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
+ page_tail);
+ page_tail->mapping = head->mapping;
+ page_tail->index = head->index + tail;
+
/* Page flags must be visible before we make the page non-compound. */
smp_wmb();
@@ -2422,12 +2428,6 @@ static void __split_huge_page_tail(struc
if (page_is_idle(head))
set_page_idle(page_tail);
- /* ->mapping in first tail page is compound_mapcount */
- VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
- page_tail);
- page_tail->mapping = head->mapping;
-
- page_tail->index = head->index + tail;
page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
/*
_
Patches currently in -mm which might be from hughd(a)google.com are
mm-huge_memory-rename-freeze_page-to-unmap_page.patch
mm-huge_memory-splitting-set-mappingindex-before-unfreeze.patch
mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch
mm-khugepaged-collapse_shmem-stop-if-punched-or-truncated.patch
mm-khugepaged-fix-crashes-due-to-misaccounted-holes.patch
mm-khugepaged-collapse_shmem-remember-to-clear-holes.patch
mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
mm-khugepaged-collapse_shmem-without-freezing-new_page.patch
mm-khugepaged-collapse_shmem-do-not-crash-on-compound.patch
mm-khugepaged-fix-the-xas_create_range-error-path.patch
mm-put_and_wait_on_page_locked-while-page-is-migrated.patch
The patch titled
Subject: mm/huge_memory: rename freeze_page() to unmap_page()
has been added to the -mm tree. Its filename is
mm-huge_memory-rename-freeze_page-to-unmap_page.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/mm-huge_memory-rename-freeze_page-…
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/mm-huge_memory-rename-freeze_page-…
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Hugh Dickins <hughd(a)google.com>
Subject: mm/huge_memory: rename freeze_page() to unmap_page()
The term "freeze" is used in several ways in the kernel, and in mm it has
the particular meaning of forcing page refcount temporarily to 0.
freeze_page() is just too confusing a name for a function that unmaps a
page: rename it unmap_page(), and rename unfreeze_page() remap_page().
Went to change the mention of freeze_page() added later in mm/rmap.c, but
found it to be incorrect: ordinary page reclaim reaches there too; but the
substance of the comment still seems correct, so edit it down.
Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261514080.2275@eggly.anvils
Fixes: e9b61f19858a5 ("thp: reintroduce split_huge_page()")
Signed-off-by: Hugh Dickins <hughd(a)google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov(a)linux.intel.com>
Cc: Jerome Glisse <jglisse(a)redhat.com>
Cc: Konstantin Khlebnikov <khlebnikov(a)yandex-team.ru>
Cc: Matthew Wilcox <willy(a)infradead.org>
Cc: <stable(a)vger.kernel.org> [4.8+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
--- a/mm/huge_memory.c~mm-huge_memory-rename-freeze_page-to-unmap_page
+++ a/mm/huge_memory.c
@@ -2350,7 +2350,7 @@ void vma_adjust_trans_huge(struct vm_are
}
}
-static void freeze_page(struct page *page)
+static void unmap_page(struct page *page)
{
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
@@ -2365,7 +2365,7 @@ static void freeze_page(struct page *pag
VM_BUG_ON_PAGE(!unmap_success, page);
}
-static void unfreeze_page(struct page *page)
+static void remap_page(struct page *page)
{
int i;
if (PageTransHuge(page)) {
@@ -2483,7 +2483,7 @@ static void __split_huge_page(struct pag
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
- unfreeze_page(head);
+ remap_page(head);
for (i = 0; i < HPAGE_PMD_NR; i++) {
struct page *subpage = head + i;
@@ -2664,7 +2664,7 @@ int split_huge_page_to_list(struct page
}
/*
- * Racy check if we can split the page, before freeze_page() will
+ * Racy check if we can split the page, before unmap_page() will
* split PMDs
*/
if (!can_split_huge_page(head, &extra_pins)) {
@@ -2673,7 +2673,7 @@ int split_huge_page_to_list(struct page
}
mlocked = PageMlocked(page);
- freeze_page(head);
+ unmap_page(head);
VM_BUG_ON_PAGE(compound_mapcount(head), head);
/* Make sure the page is not on per-CPU pagevec as it takes pin */
@@ -2727,7 +2727,7 @@ int split_huge_page_to_list(struct page
fail: if (mapping)
xa_unlock(&mapping->i_pages);
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
- unfreeze_page(head);
+ remap_page(head);
ret = -EBUSY;
}
--- a/mm/rmap.c~mm-huge_memory-rename-freeze_page-to-unmap_page
+++ a/mm/rmap.c
@@ -1627,16 +1627,9 @@ static bool try_to_unmap_one(struct page
address + PAGE_SIZE);
} else {
/*
- * We should not need to notify here as we reach this
- * case only from freeze_page() itself only call from
- * split_huge_page_to_list() so everything below must
- * be true:
- * - page is not anonymous
- * - page is locked
- *
- * So as it is a locked file back page thus it can not
- * be remove from the page cache and replace by a new
- * page before mmu_notifier_invalidate_range_end so no
+ * This is a locked file-backed page, thus it cannot
+ * be removed from the page cache and replaced by a new
+ * page before mmu_notifier_invalidate_range_end, so no
* concurrent thread might update its page table to
* point at new page while a device still is using this
* page.
_
Patches currently in -mm which might be from hughd(a)google.com are
mm-huge_memory-rename-freeze_page-to-unmap_page.patch
mm-huge_memory-splitting-set-mappingindex-before-unfreeze.patch
mm-huge_memory-fix-lockdep-complaint-on-32-bit-i_size_read.patch
mm-khugepaged-collapse_shmem-stop-if-punched-or-truncated.patch
mm-khugepaged-fix-crashes-due-to-misaccounted-holes.patch
mm-khugepaged-collapse_shmem-remember-to-clear-holes.patch
mm-khugepaged-minor-reorderings-in-collapse_shmem.patch
mm-khugepaged-collapse_shmem-without-freezing-new_page.patch
mm-khugepaged-collapse_shmem-do-not-crash-on-compound.patch
mm-khugepaged-fix-the-xas_create_range-error-path.patch
mm-put_and_wait_on_page_locked-while-page-is-migrated.patch
From: Dexuan Cui <decui(a)microsoft.com>
We can concurrently try to open the same sub-channel from 2 paths:
path #1: vmbus_onoffer() -> vmbus_process_offer() -> handle_sc_creation().
path #2: storvsc_probe() -> storvsc_connect_to_vsp() ->
-> storvsc_channel_init() -> handle_multichannel_storage() ->
-> vmbus_are_subchannels_present() -> handle_sc_creation().
They conflict with each other, but it was not an issue before the recent
commit ae6935ed7d42 ("vmbus: split ring buffer allocation from open"),
because at the beginning of vmbus_open() we checked newchannel->state so
only one path could succeed, and the other would return with -EINVAL.
After ae6935ed7d42, the failing path frees the channel's ringbuffer by
vmbus_free_ring(), and this causes a panic later.
Commit ae6935ed7d42 itself is good, and it just reveals the longstanding
race. We can resolve the issue by removing path #2, i.e. removing the
second vmbus_are_subchannels_present() in handle_multichannel_storage().
BTW, the comment "Check to see if sub-channels have already been created"
in handle_multichannel_storage() is incorrect: when we unload the driver,
we first close the sub-channel(s) and then close the primary channel, next
the host sends rescind-offer message(s) so primary->sc_list will become
empty. This means the first vmbus_are_subchannels_present() in
handle_multichannel_storage() is never useful.
Fixes: ae6935ed7d42 ("vmbus: split ring buffer allocation from open")
Cc: stable(a)vger.kernel.org
Cc: Long Li <longli(a)microsoft.com>
Cc: Stephen Hemminger <sthemmin(a)microsoft.com>
Cc: K. Y. Srinivasan <kys(a)microsoft.com>
Cc: Haiyang Zhang <haiyangz(a)microsoft.com>
Signed-off-by: Dexuan Cui <decui(a)microsoft.com>
Signed-off-by: K. Y. Srinivasan <kys(a)microsoft.com>
---
drivers/scsi/storvsc_drv.c | 61 +++++++++++++++++++-------------------
1 file changed, 30 insertions(+), 31 deletions(-)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f03dc03a42c3..8f88348ebe42 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -446,7 +446,6 @@ struct storvsc_device {
bool destroy;
bool drain_notify;
- bool open_sub_channel;
atomic_t num_outstanding_req;
struct Scsi_Host *host;
@@ -636,33 +635,38 @@ static inline struct storvsc_device *get_in_stor_device(
static void handle_sc_creation(struct vmbus_channel *new_sc)
{
struct hv_device *device = new_sc->primary_channel->device_obj;
+ struct device *dev = &device->device;
struct storvsc_device *stor_device;
struct vmstorage_channel_properties props;
+ int ret;
stor_device = get_out_stor_device(device);
if (!stor_device)
return;
- if (stor_device->open_sub_channel == false)
- return;
-
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
- vmbus_open(new_sc,
- storvsc_ringbuffer_size,
- storvsc_ringbuffer_size,
- (void *)&props,
- sizeof(struct vmstorage_channel_properties),
- storvsc_on_channel_callback, new_sc);
+ ret = vmbus_open(new_sc,
+ storvsc_ringbuffer_size,
+ storvsc_ringbuffer_size,
+ (void *)&props,
+ sizeof(struct vmstorage_channel_properties),
+ storvsc_on_channel_callback, new_sc);
- if (new_sc->state == CHANNEL_OPENED_STATE) {
- stor_device->stor_chns[new_sc->target_cpu] = new_sc;
- cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
+ /* In case vmbus_open() fails, we don't use the sub-channel. */
+ if (ret != 0) {
+ dev_err(dev, "Failed to open sub-channel: err=%d\n", ret);
+ return;
}
+
+ /* Add the sub-channel to the array of available channels. */
+ stor_device->stor_chns[new_sc->target_cpu] = new_sc;
+ cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
}
static void handle_multichannel_storage(struct hv_device *device, int max_chns)
{
+ struct device *dev = &device->device;
struct storvsc_device *stor_device;
int num_cpus = num_online_cpus();
int num_sc;
@@ -679,21 +683,11 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
request = &stor_device->init_request;
vstor_packet = &request->vstor_packet;
- stor_device->open_sub_channel = true;
/*
* Establish a handler for dealing with subchannels.
*/
vmbus_set_sc_create_callback(device->channel, handle_sc_creation);
- /*
- * Check to see if sub-channels have already been created. This
- * can happen when this driver is re-loaded after unloading.
- */
-
- if (vmbus_are_subchannels_present(device->channel))
- return;
-
- stor_device->open_sub_channel = false;
/*
* Request the host to create sub-channels.
*/
@@ -710,23 +704,29 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
+ if (ret != 0) {
+ dev_err(dev, "Failed to create sub-channel: err=%d\n", ret);
return;
+ }
t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
- if (t == 0)
+ if (t == 0) {
+ dev_err(dev, "Failed to create sub-channel: timed out\n");
return;
+ }
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
+ vstor_packet->status != 0) {
+ dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
+ vstor_packet->operation, vstor_packet->status);
return;
+ }
/*
- * Now that we created the sub-channels, invoke the check; this
- * may trigger the callback.
+ * We need to do nothing here, because vmbus_process_offer()
+ * invokes channel->sc_creation_callback, which will open and use
+ * the sub-channel(s).
*/
- stor_device->open_sub_channel = true;
- vmbus_are_subchannels_present(device->channel);
}
static void cache_wwn(struct storvsc_device *stor_device,
@@ -1794,7 +1794,6 @@ static int storvsc_probe(struct hv_device *device,
}
stor_device->destroy = false;
- stor_device->open_sub_channel = false;
init_waitqueue_head(&stor_device->waiting_to_drain);
stor_device->device = device;
stor_device->host = host;
--
2.19.1
The patch titled
Subject: zram: writeback throttle
has been added to the -mm tree. Its filename is
zram-writeback-throttle.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/zram-writeback-throttle.patch
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/zram-writeback-throttle.patch
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Minchan Kim <minchan(a)kernel.org>
Subject: zram: writeback throttle
On small memory systems there are lots of write IOs so if we use a flash
device as swap there would be serious flash wearout. To overcome this
problem, system developers need to design a write limitation strategy to
guarantee flash health for the entire product life.
This patch creates a new knob "writeback_limit" on zram. With that, if
the current writeback IO count (/sys/block/zramX/io_stat) exceeds the
limitation, zram stops further writeback until the admin can reset the
limit.
Link: http://lkml.kernel.org/r/20181127055429.251614-8-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan(a)kernel.org>
Cc: Joey Pabalinas <joeypabalinas(a)gmail.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work(a)gmail.com>
Cc: <stable(a)vger.kernel.org> [4.14+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
Documentation/ABI/testing/sysfs-block-zram | 9 +++
Documentation/blockdev/zram.txt | 2
drivers/block/zram/zram_drv.c | 47 ++++++++++++++++++-
drivers/block/zram/zram_drv.h | 2
4 files changed, 59 insertions(+), 1 deletion(-)
--- a/Documentation/ABI/testing/sysfs-block-zram~zram-writeback-throttle
+++ a/Documentation/ABI/testing/sysfs-block-zram
@@ -121,3 +121,12 @@ Description:
The bd_stat file is read-only and represents backing device's
statistics (bd_count, bd_reads, bd_writes) in a format
similar to block layer statistics file format.
+
+What: /sys/block/zram<id>/writeback_limit
+Date: November 2018
+Contact: Minchan Kim <minchan(a)kernel.org>
+Description:
+ The writeback_limit file is read-write and specifies the maximum
+ amount of writeback ZRAM can do. The limit could be changed
+ in run time and "0" means disable the limit.
+ No limit is the initial state.
--- a/Documentation/blockdev/zram.txt~zram-writeback-throttle
+++ a/Documentation/blockdev/zram.txt
@@ -164,6 +164,8 @@ reset WO trigger device r
mem_used_max WO reset the `mem_used_max' counter (see later)
mem_limit WO specifies the maximum amount of memory ZRAM can use
to store the compressed data
+writeback_limit WO specifies the maximum amount of write IO zram can
+ write out to backing device as 4KB unit
max_comp_streams RW the number of possible concurrent compress operations
comp_algorithm RW show and change the compression algorithm
compact WO trigger memory compaction
--- a/drivers/block/zram/zram_drv.c~zram-writeback-throttle
+++ a/drivers/block/zram/zram_drv.c
@@ -330,6 +330,40 @@ next:
}
#ifdef CONFIG_ZRAM_WRITEBACK
+
+static ssize_t writeback_limit_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u64 val;
+ ssize_t ret = -EINVAL;
+
+ if (kstrtoull(buf, 10, &val))
+ return ret;
+
+ down_read(&zram->init_lock);
+ atomic64_set(&zram->stats.bd_wb_limit, val);
+ if (val == 0 || val > atomic64_read(&zram->stats.bd_writes))
+ zram->stop_writeback = false;
+ up_read(&zram->init_lock);
+ ret = len;
+
+ return ret;
+}
+
+static ssize_t writeback_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = atomic64_read(&zram->stats.bd_wb_limit);
+ up_read(&zram->init_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+}
+
static void reset_bdev(struct zram *zram)
{
struct block_device *bdev;
@@ -571,6 +605,7 @@ static ssize_t writeback_store(struct de
char mode_buf[8];
unsigned long mode = -1UL;
unsigned long blk_idx = 0;
+ u64 wb_count, wb_limit;
sz = strscpy(mode_buf, buf, sizeof(mode_buf));
if (sz <= 0)
@@ -612,6 +647,11 @@ static ssize_t writeback_store(struct de
bvec.bv_len = PAGE_SIZE;
bvec.bv_offset = 0;
+ if (zram->stop_writeback) {
+ ret = -EIO;
+ break;
+ }
+
if (!blk_idx) {
blk_idx = alloc_block_bdev(zram);
if (!blk_idx) {
@@ -670,7 +710,7 @@ static ssize_t writeback_store(struct de
continue;
}
- atomic64_inc(&zram->stats.bd_writes);
+ wb_count = atomic64_inc_return(&zram->stats.bd_writes);
/*
* We released zram_slot_lock so need to check if the slot was
* changed. If there is freeing for the slot, we can catch it
@@ -694,6 +734,9 @@ static ssize_t writeback_store(struct de
zram_set_element(zram, index, blk_idx);
blk_idx = 0;
atomic64_inc(&zram->stats.pages_stored);
+ wb_limit = atomic64_read(&zram->stats.bd_wb_limit);
+ if (wb_limit != 0 && wb_count >= wb_limit)
+ zram->stop_writeback = true;
next:
zram_slot_unlock(zram, index);
}
@@ -1767,6 +1810,7 @@ static DEVICE_ATTR_RW(comp_algorithm);
#ifdef CONFIG_ZRAM_WRITEBACK
static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
+static DEVICE_ATTR_RW(writeback_limit);
#endif
static struct attribute *zram_disk_attrs[] = {
@@ -1782,6 +1826,7 @@ static struct attribute *zram_disk_attrs
#ifdef CONFIG_ZRAM_WRITEBACK
&dev_attr_backing_dev.attr,
&dev_attr_writeback.attr,
+ &dev_attr_writeback_limit.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
--- a/drivers/block/zram/zram_drv.h~zram-writeback-throttle
+++ a/drivers/block/zram/zram_drv.h
@@ -86,6 +86,7 @@ struct zram_stats {
atomic64_t bd_count; /* no. of pages in backing device */
atomic64_t bd_reads; /* no. of reads from backing device */
atomic64_t bd_writes; /* no. of writes from backing device */
+ atomic64_t bd_wb_limit; /* writeback limit of backing device */
#endif
};
@@ -113,6 +114,7 @@ struct zram {
*/
bool claim; /* Protected by bdev->bd_mutex */
struct file *backing_dev;
+ bool stop_writeback;
#ifdef CONFIG_ZRAM_WRITEBACK
struct block_device *bdev;
unsigned int old_block_size;
_
Patches currently in -mm which might be from minchan(a)kernel.org are
zram-fix-lockdep-warning-of-free-block-handling.patch
zram-fix-double-free-backing-device.patch
zram-refactoring-flags-and-writeback-stuff.patch
zram-introduce-zram_idle-flag.patch
zram-support-idle-huge-page-writeback.patch
zram-add-bd_stat-statistics.patch
zram-writeback-throttle.patch
The patch titled
Subject: zram: add bd_stat statistics
has been added to the -mm tree. Its filename is
zram-add-bd_stat-statistics.patch
This patch should soon appear at
http://ozlabs.org/~akpm/mmots/broken-out/zram-add-bd_stat-statistics.patch
and later at
http://ozlabs.org/~akpm/mmotm/broken-out/zram-add-bd_stat-statistics.patch
Before you just go and hit "reply", please:
a) Consider who else should be cc'ed
b) Prefer to cc a suitable mailing list as well
c) Ideally: find the original patch on the mailing list and do a
reply-to-all to that, adding suitable additional cc's
*** Remember to use Documentation/process/submit-checklist.rst when testing your code ***
The -mm tree is included into linux-next and is updated
there every 3-4 working days
------------------------------------------------------
From: Minchan Kim <minchan(a)kernel.org>
Subject: zram: add bd_stat statistics
bd_stat represents things that happened in the backing device. Currently
it supports bd_counts, bd_reads and bd_writes which are helpful to
understand wearout of flash and memory saving.
Link: http://lkml.kernel.org/r/20181127055429.251614-7-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan(a)kernel.org>
Cc: Joey Pabalinas <joeypabalinas(a)gmail.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work(a)gmail.com>
Cc: <stable(a)vger.kernel.org> [4.14+]
Signed-off-by: Andrew Morton <akpm(a)linux-foundation.org>
---
Documentation/ABI/testing/sysfs-block-zram | 8 +++++
Documentation/blockdev/zram.txt | 11 +++++++
drivers/block/zram/zram_drv.c | 29 +++++++++++++++++++
drivers/block/zram/zram_drv.h | 5 +++
4 files changed, 53 insertions(+)
--- a/Documentation/ABI/testing/sysfs-block-zram~zram-add-bd_stat-statistics
+++ a/Documentation/ABI/testing/sysfs-block-zram
@@ -113,3 +113,11 @@ Contact: Minchan Kim <minchan(a)kernel.org
Description:
The writeback file is write-only and trigger idle and/or
huge page writeback to backing device.
+
+What: /sys/block/zram<id>/bd_stat
+Date: November 2018
+Contact: Minchan Kim <minchan(a)kernel.org>
+Description:
+ The bd_stat file is read-only and represents backing device's
+ statistics (bd_count, bd_reads, bd_writes) in a format
+ similar to block layer statistics file format.
--- a/Documentation/blockdev/zram.txt~zram-add-bd_stat-statistics
+++ a/Documentation/blockdev/zram.txt
@@ -221,6 +221,17 @@ line of text and contains the following
pages_compacted the number of pages freed during compaction
huge_pages the number of incompressible pages
+File /sys/block/zram<id>/bd_stat
+
+The stat file represents device's backing device statistics. It consists of
+a single line of text and contains the following stats separated by whitespace:
+ bd_count size of data written in backing device.
+ Unit: 4K bytes
+ bd_reads the number of reads from backing device
+ Unit: 4K bytes
+ bd_writes the number of writes to backing device
+ Unit: 4K bytes
+
9) Deactivate:
swapoff /dev/zram0
umount /dev/zram1
--- a/drivers/block/zram/zram_drv.c~zram-add-bd_stat-statistics
+++ a/drivers/block/zram/zram_drv.c
@@ -502,6 +502,7 @@ retry:
if (test_and_set_bit(blk_idx, zram->bitmap))
goto retry;
+ atomic64_inc(&zram->stats.bd_count);
return blk_idx;
}
@@ -511,6 +512,7 @@ static void free_block_bdev(struct zram
was_set = test_and_clear_bit(blk_idx, zram->bitmap);
WARN_ON_ONCE(!was_set);
+ atomic64_dec(&zram->stats.bd_count);
}
static void zram_page_end_io(struct bio *bio)
@@ -668,6 +670,7 @@ static ssize_t writeback_store(struct de
continue;
}
+ atomic64_inc(&zram->stats.bd_writes);
/*
* We released zram_slot_lock so need to check if the slot was
* changed. If there is freeing for the slot, we can catch it
@@ -757,6 +760,7 @@ static int read_from_bdev_sync(struct zr
static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
unsigned long entry, struct bio *parent, bool sync)
{
+ atomic64_inc(&zram->stats.bd_reads);
if (sync)
return read_from_bdev_sync(zram, bvec, entry, parent);
else
@@ -1013,6 +1017,25 @@ static ssize_t mm_stat_show(struct devic
return ret;
}
+#ifdef CONFIG_ZRAM_WRITEBACK
+static ssize_t bd_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+ ret = scnprintf(buf, PAGE_SIZE,
+ "%8llu %8llu %8llu\n",
+ (u64)atomic64_read(&zram->stats.bd_count),
+ (u64)atomic64_read(&zram->stats.bd_reads),
+ (u64)atomic64_read(&zram->stats.bd_writes));
+ up_read(&zram->init_lock);
+
+ return ret;
+}
+#endif
+
static ssize_t debug_stat_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1033,6 +1056,9 @@ static ssize_t debug_stat_show(struct de
static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
+#ifdef CONFIG_ZRAM_WRITEBACK
+static DEVICE_ATTR_RO(bd_stat);
+#endif
static DEVICE_ATTR_RO(debug_stat);
static void zram_meta_free(struct zram *zram, u64 disksize)
@@ -1759,6 +1785,9 @@ static struct attribute *zram_disk_attrs
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
+#ifdef CONFIG_ZRAM_WRITEBACK
+ &dev_attr_bd_stat.attr,
+#endif
&dev_attr_debug_stat.attr,
NULL,
};
--- a/drivers/block/zram/zram_drv.h~zram-add-bd_stat-statistics
+++ a/drivers/block/zram/zram_drv.h
@@ -82,6 +82,11 @@ struct zram_stats {
atomic_long_t max_used_pages; /* no. of maximum pages stored */
atomic64_t writestall; /* no. of write slow paths */
atomic64_t miss_free; /* no. of missed free */
+#ifdef CONFIG_ZRAM_WRITEBACK
+ atomic64_t bd_count; /* no. of pages in backing device */
+ atomic64_t bd_reads; /* no. of reads from backing device */
+ atomic64_t bd_writes; /* no. of writes from backing device */
+#endif
};
struct zram {
_
Patches currently in -mm which might be from minchan(a)kernel.org are
zram-fix-lockdep-warning-of-free-block-handling.patch
zram-fix-double-free-backing-device.patch
zram-refactoring-flags-and-writeback-stuff.patch
zram-introduce-zram_idle-flag.patch
zram-support-idle-huge-page-writeback.patch
zram-add-bd_stat-statistics.patch
zram-writeback-throttle.patch