* Vlastimil Babka vbabka@suse.cz [230929 06:08]:
On 9/27/23 18:07, Liam R. Howlett wrote:
When the calling function fails after the dup_anon_vma(), the duplication of the anon_vma is not being undone. Add the necessary unlink_anon_vma() call to the error paths that are missing them.
This issue showed up during inspection of the error path in vma_merge() for an unrelated vma iterator issue.
Users may experience increased memory usage, which may be problematic as the failure would likely be caused by a low memory situation.
Fixes: d4af56c5c7c6 ("mm: start tracking VMAs with maple tree") Cc: stable@vger.kernel.org Cc: Jann Horn jannh@google.com Signed-off-by: Liam R. Howlett Liam.Howlett@oracle.com
mm/mmap.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c index b5bc4ca9bdc4..2f0ee489db8a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -587,7 +587,7 @@ static inline void vma_complete(struct vma_prepare *vp,
- Returns: 0 on success.
*/ static inline int dup_anon_vma(struct vm_area_struct *dst,
struct vm_area_struct *src)
struct vm_area_struct *src, struct vm_area_struct **dup)
{ /* * Easily overlooked: when mprotect shifts the boundary, make sure the @@ -597,6 +597,7 @@ static inline int dup_anon_vma(struct vm_area_struct *dst, if (src->anon_vma && !dst->anon_vma) { vma_assert_write_locked(dst); dst->anon_vma = src->anon_vma;
return anon_vma_clone(dst, src);*dup = dst;
So the code is simpler that way and seems current callers are fine, but shouldn't we rather only assign *dup if the clone succeeds?
Fair point. I'll address this in v3.
} @@ -624,6 +625,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *next) {
- struct vm_area_struct *anon_dup = NULL; bool remove_next = false; struct vma_prepare vp;
@@ -633,7 +635,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, remove_next = true; vma_start_write(next);
ret = dup_anon_vma(vma, next);
if (ret) return ret; }ret = dup_anon_vma(vma, next, &anon_dup);
@@ -661,6 +663,8 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, return 0; nomem:
- if (anon_dup)
return -ENOMEM;unlink_anon_vmas(anon_dup);
} @@ -860,6 +864,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, { struct vm_area_struct *curr, *next, *res; struct vm_area_struct *vma, *adjust, *remove, *remove2;
- struct vm_area_struct *anon_dup = NULL; struct vma_prepare vp; pgoff_t vma_pgoff; int err = 0;
@@ -927,18 +932,18 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, vma_start_write(next); remove = next; /* case 1 */ vma_end = next->vm_end;
err = dup_anon_vma(prev, next);
if (curr) { /* case 6 */ vma_start_write(curr); remove = curr; remove2 = next; if (!next->anon_vma)err = dup_anon_vma(prev, next, &anon_dup);
err = dup_anon_vma(prev, curr);
} } else if (merge_prev) { /* case 2 */ if (curr) { vma_start_write(curr);err = dup_anon_vma(prev, curr, &anon_dup);
err = dup_anon_vma(prev, curr);
err = dup_anon_vma(prev, curr, &anon_dup); if (end == curr->vm_end) { /* case 7 */ remove = curr; } else { /* case 5 */
@@ -954,7 +959,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, vma_end = addr; adjust = next; adj_start = -(prev->vm_end - addr);
err = dup_anon_vma(next, prev);
} else { /* * Note that cases 3 and 8 are the ONLY ones where preverr = dup_anon_vma(next, prev, &anon_dup);
@@ -1018,6 +1023,9 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, return res; prealloc_fail:
- if (anon_dup)
unlink_anon_vmas(anon_dup);
anon_vma_fail: if (merge_prev) vma_next(vmi);