Fix callers that previously skipped calling arch_sync_kernel_mappings() if an error occurred during a pgtable update. The call is still required to sync any pgtable updates that may have occurred prior to hitting the error condition.
These are theoretical bugs discovered during code review.
Cc: stable@vger.kernel.org Fixes: 2ba3e6947aed ("mm/vmalloc: track which page-table levels were modified") Fixes: 0c95cba49255 ("mm: apply_to_pte_range warn and fail if a large pte is encountered") Signed-off-by: Ryan Roberts ryan.roberts@arm.com --- mm/memory.c | 6 ++++-- mm/vmalloc.c | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c index 539c0f7c6d54..a15f7dd500ea 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3040,8 +3040,10 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, next = pgd_addr_end(addr, end); if (pgd_none(*pgd) && !create) continue; - if (WARN_ON_ONCE(pgd_leaf(*pgd))) - return -EINVAL; + if (WARN_ON_ONCE(pgd_leaf(*pgd))) { + err = -EINVAL; + break; + } if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { if (!create) continue; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 6111ce900ec4..68950b1824d0 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -604,13 +604,13 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, mask |= PGTBL_PGD_MODIFIED; err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); if (err) - return err; + break; } while (pgd++, addr = next, addr != end);
if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end);
- return 0; + return err; }
/*
On 2/5/25 20:39, Ryan Roberts wrote:
Fix callers that previously skipped calling arch_sync_kernel_mappings() if an error occurred during a pgtable update. The call is still required to sync any pgtable updates that may have occurred prior to hitting the error condition.
These are theoretical bugs discovered during code review.
Cc: stable@vger.kernel.org Fixes: 2ba3e6947aed ("mm/vmalloc: track which page-table levels were modified") Fixes: 0c95cba49255 ("mm: apply_to_pte_range warn and fail if a large pte is encountered") Signed-off-by: Ryan Roberts ryan.roberts@arm.com
This change could stand on its own and LGTM.
Reviewed-by: Anshuman Khandual anshuman.khandual@arm.com
mm/memory.c | 6 ++++-- mm/vmalloc.c | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c index 539c0f7c6d54..a15f7dd500ea 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3040,8 +3040,10 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, next = pgd_addr_end(addr, end); if (pgd_none(*pgd) && !create) continue;
if (WARN_ON_ONCE(pgd_leaf(*pgd)))
return -EINVAL;
if (WARN_ON_ONCE(pgd_leaf(*pgd))) {
err = -EINVAL;
break;
if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { if (!create) continue;}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 6111ce900ec4..68950b1824d0 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -604,13 +604,13 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, mask |= PGTBL_PGD_MODIFIED; err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); if (err)
return err;
} while (pgd++, addr = next, addr != end);break;
if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end);
- return 0;
- return err;
} /*
linux-stable-mirror@lists.linaro.org