mm: mremap: downgrade mmap_sem to read when shrinking
Other than munmap, mremap might be used to shrink memory mapping too. So, it may hold write mmap_sem for long time when shrinking large mapping, as what commit ("mm: mmap: zap pages with read mmap_sem in munmap") described. The mremap() will not manipulate vmas anymore after __do_munmap() call for the mapping shrink use case, so it is safe to downgrade to read mmap_sem. So, the same optimization, which downgrades mmap_sem to read for zapping pages, is also feasible and reasonable to this case. The period of holding exclusive mmap_sem for shrinking large mapping would be reduced significantly with this optimization. MREMAP_FIXED and MREMAP_MAYMOVE are more complicated to adopt this optimization since they need manipulate vmas after do_munmap(), downgrading mmap_sem may create race window. Simple mapping shrink is the low hanging fruit, and it may cover the most cases of unmap with munmap together. [akpm@linux-foundation.org: tweak comment] [yang.shi@linux.alibaba.com: fix unsigned compare against 0 issue] Link: http://lkml.kernel.org/r/1538687672-17795-2-git-send-email-yang.shi@linux.alibaba.com Link: http://lkml.kernel.org/r/1538067582-60038-1-git-send-email-yang.shi@linux.alibaba.com Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Matthew Wilcox <willy@infradead.org> Cc: Laurent Dufour <ldufour@linux.vnet.ibm.com> Cc: Colin Ian King <colin.king@canonical.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3c0513243a
commit
85a06835f6
@ -2306,6 +2306,8 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr,
|
||||
unsigned long len, unsigned long prot, unsigned long flags,
|
||||
vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
|
||||
struct list_head *uf);
|
||||
extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
|
||||
struct list_head *uf, bool downgrade);
|
||||
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
|
||||
struct list_head *uf);
|
||||
|
||||
|
@ -2687,8 +2687,8 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
* work. This now handles partial unmappings.
|
||||
* Jeremy Fitzhardinge <jeremy@goop.org>
|
||||
*/
|
||||
static int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
|
||||
struct list_head *uf, bool downgrade)
|
||||
int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
|
||||
struct list_head *uf, bool downgrade)
|
||||
{
|
||||
unsigned long end;
|
||||
struct vm_area_struct *vma, *prev, *last;
|
||||
|
20
mm/mremap.c
20
mm/mremap.c
@ -521,6 +521,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
|
||||
unsigned long ret = -EINVAL;
|
||||
unsigned long charged = 0;
|
||||
bool locked = false;
|
||||
bool downgraded = false;
|
||||
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
|
||||
LIST_HEAD(uf_unmap_early);
|
||||
LIST_HEAD(uf_unmap);
|
||||
@ -557,12 +558,20 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
|
||||
/*
|
||||
* Always allow a shrinking remap: that just unmaps
|
||||
* the unnecessary pages..
|
||||
* do_munmap does all the needed commit accounting
|
||||
* __do_munmap does all the needed commit accounting, and
|
||||
* downgrades mmap_sem to read if so directed.
|
||||
*/
|
||||
if (old_len >= new_len) {
|
||||
ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap);
|
||||
if (ret && old_len != new_len)
|
||||
int retval;
|
||||
|
||||
retval = __do_munmap(mm, addr+new_len, old_len - new_len,
|
||||
&uf_unmap, true);
|
||||
if (retval < 0 && old_len != new_len) {
|
||||
ret = retval;
|
||||
goto out;
|
||||
/* Returning 1 indicates mmap_sem is downgraded to read. */
|
||||
} else if (retval == 1)
|
||||
downgraded = true;
|
||||
ret = addr;
|
||||
goto out;
|
||||
}
|
||||
@ -627,7 +636,10 @@ out:
|
||||
vm_unacct_memory(charged);
|
||||
locked = 0;
|
||||
}
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
if (downgraded)
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
else
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
if (locked && new_len > old_len)
|
||||
mm_populate(new_addr + old_len, new_len - old_len);
|
||||
userfaultfd_unmap_complete(mm, &uf_unmap_early);
|
||||
|
Loading…
Reference in New Issue
Block a user