On Tue, Aug 22, 2023 at 01:54:55AM +0000, Joel Fernandes (Google) wrote:
For the stack move happening in shift_arg_pages(), the move is happening within the same VMA which spans the old and new ranges.
In case the aligned address happens to fall within that VMA, allow such moves and don't abort the optimization.
In the mremap case, we cannot allow any such moves as will end up destroying some part of the mapping (either the source of the move, or part of the existing mapping). So just avoid it for mremap.
Signed-off-by: Joel Fernandes (Google) joel@joelfernandes.org
fs/exec.c | 2 +- include/linux/mm.h | 2 +- mm/mremap.c | 29 +++++++++++++++-------------- 3 files changed, 17 insertions(+), 16 deletions(-)
diff --git a/fs/exec.c b/fs/exec.c index 1a827d55ba94..244925307958 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -712,7 +712,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) * process cleanup to remove whatever mess we made. */ if (length != move_page_tables(vma, old_start,
vma, new_start, length, false))
vma, new_start, length, false, true))
return -ENOMEM;
lru_add_drain();
diff --git a/include/linux/mm.h b/include/linux/mm.h index 406ab9ea818f..e635d1fc73b6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2458,7 +2458,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len,
bool need_rmap_locks);
bool need_rmap_locks, bool for_stack);
It's a nit, but it'd be nice to not have 'mystery meat' booleans foo(bar, baz, true, false, true); always ends up being a pain to track down.
However I think probably something better than that (flags or wrapper functions) might be too much noise here so perhaps we can live with this!
/*
- Flags used by change_protection(). For now we make it a bitmap so
diff --git a/mm/mremap.c b/mm/mremap.c index 035fbf542a8f..06baa13bd2c8 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -490,12 +490,13 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma, }
/*
- A helper to check if a previous mapping exists. Required for
- move_page_tables() and realign_addr() to determine if a previous mapping
- exists before we can do realignment optimizations.
- A helper to check if aligning down is OK. The aligned address should fall
- on *no mapping*. For the stack moving down, that's a special move within
- the VMA that is created to span the source and destination of the move,
*/
- so we make an exception for it.
static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align,
unsigned long mask)
unsigned long mask, bool for_stack)
{ unsigned long addr_masked = addr_to_align & mask;
@@ -504,7 +505,7 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali * of the corresponding VMA, we can't align down or we will destroy part * of the current mapping. */
- if (vma->vm_start != addr_to_align)
- if (!for_stack && vma->vm_start != addr_to_align) return false;
I'm a little confused by this exception, is it very specifically for the shift_arg_pages() case where can assume we are safe to just discard the lower portion of the stack?
Wouldn't the find_vma_intersection() line below fail in this case? I may be missing something here :)
/* @@ -517,7 +518,7 @@ static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_ali /* Opportunistically realign to specified boundary for faster copy. */ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma, unsigned long *new_addr, struct vm_area_struct *new_vma,
unsigned long mask)
unsigned long mask, bool for_stack)
{ /* Skip if the addresses are already aligned. */ if ((*old_addr & ~mask) == 0) @@ -528,8 +529,8 @@ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old return;
/* Ensure realignment doesn't cause overlap with existing mappings. */
- if (!can_align_down(old_vma, *old_addr, mask) ||
!can_align_down(new_vma, *new_addr, mask))
if (!can_align_down(old_vma, *old_addr, mask, for_stack) ||
!can_align_down(new_vma, *new_addr, mask, for_stack))
return;
*old_addr = *old_addr & mask;
@@ -539,7 +540,7 @@ static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len,
bool need_rmap_locks)
bool need_rmap_locks, bool for_stack)
{ unsigned long extent, old_end; struct mmu_notifier_range range; @@ -559,9 +560,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma, * If possible, realign addresses to PMD boundary for faster copy. * Only realign if the mremap copying hits a PMD boundary. */
- if ((vma != new_vma)
&& (len >= PMD_SIZE - (old_addr & ~PMD_MASK)))
try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK);
if (len >= PMD_SIZE - (old_addr & ~PMD_MASK))
try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK,
for_stack);
flush_cache_range(vma, old_addr, old_end); mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
@@ -708,7 +709,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, }
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
need_rmap_locks);
if (moved_len < old_len) { err = -ENOMEM; } else if (vma->vm_ops && vma->vm_ops->mremap) {need_rmap_locks, false);
@@ -722,7 +723,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, * and then proceed to unmap new area instead of old. */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
true);
vma = new_vma; old_len = new_len; old_addr = new_addr;true, false);
-- 2.42.0.rc1.204.g551eb34607-goog