Update the clear_soft_dirty() and clear_soft_dirty_pmd() to optionally clear and return the status if page is dirty.
Signed-off-by: Muhammad Usama Anjum usama.anjum@collabora.com --- fs/proc/task_mmu.c | 84 +-------------------------------- include/linux/mm_inline.h | 99 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+), 82 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f8cd58846a28..94d5761cc369 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1076,86 +1076,6 @@ struct clear_refs_private { enum clear_refs_types type; };
-#ifdef CONFIG_MEM_SOFT_DIRTY - -static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) -{ - struct page *page; - - if (!pte_write(pte)) - return false; - if (!is_cow_mapping(vma->vm_flags)) - return false; - if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) - return false; - page = vm_normal_page(vma, addr, pte); - if (!page) - return false; - return page_maybe_dma_pinned(page); -} - -static inline void clear_soft_dirty(struct vm_area_struct *vma, - unsigned long addr, pte_t *pte) -{ - /* - * The soft-dirty tracker uses #PF-s to catch writes - * to pages, so write-protect the pte as well. See the - * Documentation/admin-guide/mm/soft-dirty.rst for full description - * of how soft-dirty works. - */ - pte_t ptent = *pte; - - if (pte_present(ptent)) { - pte_t old_pte; - - if (pte_is_pinned(vma, addr, ptent)) - return; - old_pte = ptep_modify_prot_start(vma, addr, pte); - ptent = pte_wrprotect(old_pte); - ptent = pte_clear_soft_dirty(ptent); - ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); - } else if (is_swap_pte(ptent)) { - ptent = pte_swp_clear_soft_dirty(ptent); - set_pte_at(vma->vm_mm, addr, pte, ptent); - } -} -#else -static inline void clear_soft_dirty(struct vm_area_struct *vma, - unsigned long addr, pte_t *pte) -{ -} -#endif - -#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmdp) -{ - pmd_t old, pmd = *pmdp; - - if (pmd_present(pmd)) { - /* See comment in change_huge_pmd() */ - old = pmdp_invalidate(vma, addr, pmdp); - if (pmd_dirty(old)) - pmd = pmd_mkdirty(pmd); - if (pmd_young(old)) - pmd = pmd_mkyoung(pmd); - - pmd = pmd_wrprotect(pmd); - pmd = pmd_clear_soft_dirty(pmd); - - set_pmd_at(vma->vm_mm, addr, pmdp, pmd); - } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { - pmd = pmd_swp_clear_soft_dirty(pmd); - set_pmd_at(vma->vm_mm, addr, pmdp, pmd); - } -} -#else -static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, - unsigned long addr, pmd_t *pmdp) -{ -} -#endif - static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -1168,7 +1088,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { if (cp->type == CLEAR_REFS_SOFT_DIRTY) { - clear_soft_dirty_pmd(vma, addr, pmd); + check_soft_dirty_pmd(vma, addr, pmd, true); goto out; }
@@ -1194,7 +1114,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, ptent = *pte;
if (cp->type == CLEAR_REFS_SOFT_DIRTY) { - clear_soft_dirty(vma, addr, pte); + check_soft_dirty(vma, addr, pte, true); continue; }
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 7b25b53c474a..65014c347a94 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -360,4 +360,103 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, #endif }
+#ifdef CONFIG_MEM_SOFT_DIRTY +static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + struct page *page; + + if (!pte_write(pte)) + return false; + if (!is_cow_mapping(vma->vm_flags)) + return false; + if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) + return false; + page = vm_normal_page(vma, addr, pte); + if (!page) + return false; + return page_maybe_dma_pinned(page); +} + +static inline bool check_soft_dirty(struct vm_area_struct *vma, + unsigned long addr, pte_t *pte, bool clear) +{ + /* + * The soft-dirty tracker uses #PF-s to catch writes + * to pages, so write-protect the pte as well. See the + * Documentation/admin-guide/mm/soft-dirty.rst for full description + * of how soft-dirty works. + */ + pte_t ptent = *pte; + int dirty = 0; + + if (pte_present(ptent)) { + pte_t old_pte; + + dirty = pte_soft_dirty(ptent); + + if (dirty && clear && !pte_is_pinned(vma, addr, ptent)) { + old_pte = ptep_modify_prot_start(vma, addr, pte); + ptent = pte_wrprotect(old_pte); + ptent = pte_clear_soft_dirty(ptent); + ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); + } + } else if (is_swap_pte(ptent)) { + dirty = pte_swp_soft_dirty(ptent); + + if (dirty && clear) { + ptent = pte_swp_clear_soft_dirty(ptent); + set_pte_at(vma->vm_mm, addr, pte, ptent); + } + } + + return !!dirty; +} +#else +static inline bool check_soft_dirty(struct vm_area_struct *vma, + unsigned long addr, pte_t *pte, bool clear) +{ + return false; +} +#endif + +#if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +static inline bool check_soft_dirty_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp, bool clear) +{ + pmd_t old, pmd = *pmdp; + int dirty = 0; + + if (pmd_present(pmd)) { + dirty = pmd_soft_dirty(pmd); + if (dirty && clear) { + /* See comment in change_huge_pmd() */ + old = pmdp_invalidate(vma, addr, pmdp); + if (pmd_dirty(old)) + pmd = pmd_mkdirty(pmd); + if (pmd_young(old)) + pmd = pmd_mkyoung(pmd); + + pmd = pmd_wrprotect(pmd); + pmd = pmd_clear_soft_dirty(pmd); + + set_pmd_at(vma->vm_mm, addr, pmdp, pmd); + } + } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { + dirty = pmd_swp_soft_dirty(pmd); + + if (dirty && clear) { + pmd = pmd_swp_clear_soft_dirty(pmd); + set_pmd_at(vma->vm_mm, addr, pmdp, pmd); + } + } + return !!dirty; +} +#else +static inline bool check_soft_dirty_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp, bool clear) +{ + return false; +} +#endif + #endif