From: Jann Horn jannh@google.com
commit 2ba99c5e08812494bc57f319fb562f527d9bacd8 upstream.
Since commit 70cbc3cc78a99 ("mm: gup: fix the fast GUP race against THP collapse"), the lockless_pages_from_mm() fastpath rechecks the pmd_t to ensure that the page table was not removed by khugepaged in between.
However, lockless_pages_from_mm() still requires that the page table is not concurrently freed. Fix it by sending IPIs (if the architecture uses semi-RCU-style page table freeing) before freeing/reusing page tables.
Link: https://lkml.kernel.org/r/20221129154730.2274278-2-jannh@google.com Link: https://lkml.kernel.org/r/20221128180252.1684965-2-jannh@google.com Link: https://lkml.kernel.org/r/20221125213714.4115729-2-jannh@google.com Fixes: ba76149f47d8 ("thp: khugepaged") Signed-off-by: Jann Horn jannh@google.com Reviewed-by: Yang Shi shy828301@gmail.com Acked-by: David Hildenbrand david@redhat.com Cc: John Hubbard jhubbard@nvidia.com Cc: Peter Xu peterx@redhat.com Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org [manual backport: two of the three places in khugepaged that can free ptes were refactored into a common helper between 5.15 and 6.0; TLB flushing was refactored between 5.4 and 5.10] Signed-off-by: Jann Horn jannh@google.com Signed-off-by: Sasha Levin sashal@kernel.org --- include/asm-generic/tlb.h | 4 ++++ mm/khugepaged.c | 3 +++ mm/mmu_gather.c | 5 +++++ 3 files changed, 12 insertions(+)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 268674c1d568..b06240b67199 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -190,12 +190,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #define tlb_needs_table_invalidate() (true) #endif
+void tlb_remove_table_sync_one(void); + #else
#ifdef tlb_needs_table_invalidate #error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE #endif
+static inline void tlb_remove_table_sync_one(void) { } + #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 55631cd73939..a8f2605cbd0d 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1060,6 +1060,7 @@ static void collapse_huge_page(struct mm_struct *mm, _pmd = pmdp_collapse_flush(vma, address, pmd); spin_unlock(pmd_ptl); mmu_notifier_invalidate_range_end(&range); + tlb_remove_table_sync_one();
spin_lock(pte_ptl); isolated = __collapse_huge_page_isolate(vma, address, pte); @@ -1407,6 +1408,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) /* step 4: collapse pmd */ _pmd = pmdp_collapse_flush(vma, haddr, pmd); mm_dec_nr_ptes(mm); + tlb_remove_table_sync_one(); pte_free(mm, pmd_pgtable(_pmd));
i_mmap_unlock_write(vma->vm_file->f_mapping); @@ -1494,6 +1496,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) /* assume page table is clear */ _pmd = pmdp_collapse_flush(vma, addr, pmd); mm_dec_nr_ptes(mm); + tlb_remove_table_sync_one(); pte_free(mm, pmd_pgtable(_pmd)); } up_write(&mm->mmap_sem); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 7c1b8f67af7b..341aa036b03c 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -117,6 +117,11 @@ static void tlb_remove_table_smp_sync(void *arg) /* Simply deliver the interrupt */ }
+void tlb_remove_table_sync_one(void) +{ + smp_call_function(tlb_remove_table_smp_sync, NULL, 1); +} + static void tlb_remove_table_one(void *table) { /*