From: Kairui Song kasong@tencent.com
There are some problems with the code implementations of THP fallback. suitable_orders could be zero, and calling highest_order on a zero value returns an overflowed size. And the order check loop is updating the index value on every loop which may cause the index to be aligned by a larger value while the loop shrinks the order. And it forgot to try order 0 after the final loop.
This is usually fine because shmem_add_to_page_cache ensures the shmem mapping is still sane, but it might cause many potential issues like allocating random folios into the random position in the map or return -ENOMEM by accident. This triggered some strange userspace errors [1], and shouldn't have happened in the first place.
Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-mm/CAMgjq7DqgAmj25nDUwwu1U2cSGSn8n4-Hqpgottedy... [1] Fixes: e7a2ab7b3bb5d ("mm: shmem: add mTHP support for anonymous shmem") Signed-off-by: Kairui Song kasong@tencent.com --- mm/shmem.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c index b50ce7dbc84a..25303711f123 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1824,6 +1824,9 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault unsigned long pages; int order;
+ if (!orders) + return 0; + if (vma) { orders = thp_vma_suitable_orders(vma, vmf->address, orders); if (!orders) @@ -1888,27 +1891,28 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) orders = 0;
- if (orders > 0) { - suitable_orders = shmem_suitable_orders(inode, vmf, - mapping, index, orders); + suitable_orders = shmem_suitable_orders(inode, vmf, + mapping, index, orders);
+ if (suitable_orders) { order = highest_order(suitable_orders); - while (suitable_orders) { + do { pages = 1UL << order; - index = round_down(index, pages); - folio = shmem_alloc_folio(gfp, order, info, index); - if (folio) + folio = shmem_alloc_folio(gfp, order, info, round_down(index, pages)); + if (folio) { + index = round_down(index, pages); goto allocated; + }
if (pages == HPAGE_PMD_NR) count_vm_event(THP_FILE_FALLBACK); count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK); order = next_order(&suitable_orders, order); - } - } else { - pages = 1; - folio = shmem_alloc_folio(gfp, 0, info, index); + } while (suitable_orders); } + + pages = 1; + folio = shmem_alloc_folio(gfp, 0, info, index); if (!folio) return ERR_PTR(-ENOMEM);