Skip to content

Commit

Permalink
ipc/shm, mm: drop do_vma_munmap()
Browse files Browse the repository at this point in the history
The do_vma_munmap() wrapper existed for callers that didn't have a vma
iterator and needed to check the vma mseal status prior to calling the
underlying munmap().  All callers now use a vma iterator and since the
mseal check has been moved to do_vmi_align_munmap() and the vmas are
aligned, this function can just be called instead.

do_vmi_align_munmap() can no longer be static as ipc/shm is using it and
it is exported via the mm.h header.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Liam R. Howlett <[email protected]>
Reviewed-by: Lorenzo Stoakes <[email protected]>
Cc: Bert Karwatzki <[email protected]>
Cc: Jeff Xu <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Lorenzo Stoakes <[email protected]>
Cc: Mark Brown <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Paul Moore <[email protected]>
Cc: Sidhartha Kumar <[email protected]>
Cc: Suren Baghdasaryan <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
howlett authored and akpm00 committed Sep 4, 2024
1 parent 13d77e0 commit 63fc66f
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 43 deletions.
6 changes: 3 additions & 3 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -3287,14 +3287,14 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct mm_struct *mm, unsigned long start,
unsigned long end, struct list_head *uf, bool unlock);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);

#ifdef CONFIG_MMU
extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct list_head *uf, bool unlock);
extern int __mm_populate(unsigned long addr, unsigned long len,
int ignore_errors);
static inline void mm_populate(unsigned long addr, unsigned long len)
Expand Down
8 changes: 4 additions & 4 deletions ipc/shm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1778,8 +1778,8 @@ long ksys_shmdt(char __user *shmaddr)
*/
file = vma->vm_file;
size = i_size_read(file_inode(vma->vm_file));
do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
NULL, false);
do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
vma->vm_end, NULL, false);
/*
* We discovered the size of the shm segment, so
* break out of here and fall through to the next
Expand All @@ -1803,8 +1803,8 @@ long ksys_shmdt(char __user *shmaddr)
if ((vma->vm_ops == &shm_vm_ops) &&
((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
(vma->vm_file == file)) {
do_vma_munmap(&vmi, vma, vma->vm_start, vma->vm_end,
NULL, false);
do_vmi_align_munmap(&vmi, vma, mm, vma->vm_start,
vma->vm_end, NULL, false);
}

vma = vma_next(&vmi);
Expand Down
33 changes: 6 additions & 27 deletions mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -169,11 +169,12 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
* do_vma_munmap() will drop the lock on success, so update it
* before calling do_vma_munmap().
* do_vmi_align_munmap() will drop the lock on success, so
* update it before calling do_vma_munmap().
*/
mm->brk = brk;
if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
/* unlock = */ true))
goto out;

goto success_unlocked;
Expand Down Expand Up @@ -1479,9 +1480,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vma->vm_file = get_file(file);
/*
* call_mmap() may map PTE, so ensure there are no existing PTEs
* call the vm_ops close function if one exists.
* and call the vm_ops close function if one exists.
*/
vms_clean_up_area(&vms, &mas_detach, true);
vms_clean_up_area(&vms, &mas_detach);
error = call_mmap(file, vma);
if (error)
goto unmap_and_free_vma;
Expand Down Expand Up @@ -1744,28 +1745,6 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
return ret;
}

/*
* do_vma_munmap() - Unmap a full or partial vma.
* @vmi: The vma iterator pointing at the vma
* @vma: The first vma to be munmapped
* @start: the start of the address to unmap
* @end: The end of the address to unmap
* @uf: The userfaultfd list_head
* @unlock: Drop the lock on success
*
* unmaps a VMA mapping when the vma iterator is already in position.
* Does not handle alignment.
*
* Return: 0 on success drops the lock of so directed, error on failure and will
* still hold the lock.
*/
int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, struct list_head *uf,
bool unlock)
{
return do_vmi_align_munmap(vmi, vma, vma->vm_mm, start, end, uf, unlock);
}

/*
* do_brk_flags() - Increase the brk vma if the flags match.
* @vmi: The vma iterator
Expand Down
12 changes: 6 additions & 6 deletions mm/vma.c
Original file line number Diff line number Diff line change
Expand Up @@ -658,8 +658,8 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
*/
mas_set(mas_detach, 1);
lru_add_drain();
tlb_gather_mmu(&tlb, vms->mm);
update_hiwater_rss(vms->mm);
tlb_gather_mmu(&tlb, vms->vma->vm_mm);
update_hiwater_rss(vms->vma->vm_mm);
unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
vms->vma_count, mm_wr_locked);

Expand All @@ -672,14 +672,14 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
}

void vms_clean_up_area(struct vma_munmap_struct *vms,
struct ma_state *mas_detach, bool mm_wr_locked)
struct ma_state *mas_detach)
{
struct vm_area_struct *vma;

if (!vms->nr_pages)
return;

vms_clear_ptes(vms, mas_detach, mm_wr_locked);
vms_clear_ptes(vms, mas_detach, true);
mas_set(mas_detach, 0);
mas_for_each(mas_detach, vma, ULONG_MAX)
if (vma->vm_ops && vma->vm_ops->close)
Expand All @@ -702,7 +702,7 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
struct vm_area_struct *vma;
struct mm_struct *mm;

mm = vms->mm;
mm = current->mm;
mm->map_count -= vms->vma_count;
mm->locked_vm -= vms->locked_vm;
if (vms->unlock)
Expand Down Expand Up @@ -770,7 +770,7 @@ int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
* its limit temporarily, to help free resources as expected.
*/
if (vms->end < vms->vma->vm_end &&
vms->mm->map_count >= sysctl_max_map_count)
vms->vma->vm_mm->map_count >= sysctl_max_map_count)
goto map_count_exceeded;

/* Don't bother splitting the VMA if we can't unmap it anyway */
Expand Down
4 changes: 1 addition & 3 deletions mm/vma.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ struct unlink_vma_file_batch {
*/
struct vma_munmap_struct {
struct vma_iterator *vmi;
struct mm_struct *mm;
struct vm_area_struct *vma; /* The first vma to munmap */
struct vm_area_struct *prev; /* vma before the munmap area */
struct vm_area_struct *next; /* vma after the munmap area */
Expand Down Expand Up @@ -114,7 +113,6 @@ static inline void init_vma_munmap(struct vma_munmap_struct *vms,
unsigned long start, unsigned long end, struct list_head *uf,
bool unlock)
{
vms->mm = current->mm;
vms->vmi = vmi;
vms->vma = vma;
if (vma) {
Expand Down Expand Up @@ -142,7 +140,7 @@ void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
struct ma_state *mas_detach);

void vms_clean_up_area(struct vma_munmap_struct *vms,
struct ma_state *mas_detach, bool mm_wr_locked);
struct ma_state *mas_detach);

/*
* reattach_vmas() - Undo any munmap work and free resources
Expand Down

0 comments on commit 63fc66f

Please sign in to comment.