From f447ae0acd3901bc5bec183c1b66f10b8bfeff91 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 17 Apr 2018 16:33:10 +0200 Subject: [PATCH 01/23] mm: prepare for FAULT_FLAG_SPECULATIVE When speculating faults (without holding mmap_sem) we need to validate that the vma against which we loaded pages is still valid when we're ready to install the new PTE. Therefore, replace the pte_offset_map_lock() calls that (re)take the PTL with pte_map_lock() which can fail in case we find the VMA changed since we started the fault. Signed-off-by: Peter Zijlstra (Intel) [Port to 4.12 kernel] [Remove the comment about the fault_env structure which has been implemented as the vm_fault structure in the kernel] [move pte_map_lock()'s definition upper in the file] [move the define of FAULT_FLAG_SPECULATIVE later in the series] [review error path in do_swap_page(), do_anonymous_page() and wp_page_copy()] Signed-off-by: Laurent Dufour Change-Id: Id6dfae130fbfdd4bb92aa6415d6f1db7ef833266 [vinmenon@codeaurora.org: fix trivial merge conflicts] Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:10 Signed-off-by: Vinayak Menon Signed-off-by: Charan Teja Reddy --- mm/memory.c | 88 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 59 insertions(+), 29 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 65a527f11b36..050fa9958e69 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2140,6 +2140,13 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL_GPL(apply_to_page_range); +static inline bool pte_map_lock(struct vm_fault *vmf) +{ + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + return true; +} + /* * handle_pte_fault chooses page fault handler according to an entry which was * read non-atomically. Before making any commitment, on those architectures @@ -2347,25 +2354,26 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) int page_copied = 0; struct mem_cgroup *memcg; struct mmu_notifier_range range; + int ret = VM_FAULT_OOM; if (unlikely(anon_vma_prepare(vma))) - goto oom; + goto out; if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { new_page = alloc_zeroed_user_highpage_movable(vma, vmf->address); if (!new_page) - goto oom; + goto out; } else { new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); if (!new_page) - goto oom; + goto out; cow_user_page(new_page, old_page, vmf->address, vma); } if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false)) - goto oom_free_new; + goto out_free_new; __SetPageUptodate(new_page); @@ -2377,7 +2385,10 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) /* * Re-check the pte - we dropped the lock */ - vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + ret = VM_FAULT_RETRY; + goto out_uncharge; + } if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { @@ -2464,12 +2475,14 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) put_page(old_page); } return page_copied ? VM_FAULT_WRITE : 0; -oom_free_new: +out_uncharge: + mem_cgroup_cancel_charge(new_page, memcg, false); +out_free_new: put_page(new_page); -oom: +out: if (old_page) put_page(old_page); - return VM_FAULT_OOM; + return ret; } /** @@ -2491,8 +2504,8 @@ oom: vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) { WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); - vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; /* * We might have raced with another page fault while we released the * pte_offset_map_lock. @@ -2614,8 +2627,11 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); lock_page(vmf->page); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + unlock_page(vmf->page); + put_page(vmf->page); + return VM_FAULT_RETRY; + } if (!pte_same(*vmf->pte, vmf->orig_pte)) { unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -2826,11 +2842,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (!page) { /* - * Back out if somebody else faulted in this pte - * while we released the pte lock. + * Back out if the VMA has changed in our back during + * a speculative page fault or if somebody else + * faulted in this pte while we released the pte lock. */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) { + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + ret = VM_FAULT_RETRY; + goto out; + } + if (likely(pte_same(*vmf->pte, vmf->orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); @@ -2883,10 +2904,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } /* - * Back out if somebody else already faulted in this pte. + * Back out if the VMA has changed in our back during a speculative + * page fault or if somebody else already faulted in this pte. */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) { + ret = VM_FAULT_RETRY; + goto out_cancel_cgroup; + } if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) goto out_nomap; @@ -2964,8 +2988,9 @@ unlock: out: return ret; out_nomap: - mem_cgroup_cancel_charge(page, memcg, false); pte_unmap_unlock(vmf->pte, vmf->ptl); +out_cancel_cgroup: + mem_cgroup_cancel_charge(page, memcg, false); out_page: unlock_page(page); out_release: @@ -3016,8 +3041,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) !mm_forbids_zeropage(vma->vm_mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), vma->vm_page_prot)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; if (!pte_none(*vmf->pte)) goto unlock; ret = check_stable_address_space(vma->vm_mm); @@ -3053,14 +3078,16 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); - if (!pte_none(*vmf->pte)) + if (!pte_map_lock(vmf)) { + ret = VM_FAULT_RETRY; goto release; + } + if (!pte_none(*vmf->pte)) + goto unlock_and_release; ret = check_stable_address_space(vma->vm_mm); if (ret) - goto release; + goto unlock_and_release; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { @@ -3082,10 +3109,12 @@ setpte: unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; +unlock_and_release: + pte_unmap_unlock(vmf->pte, vmf->ptl); release: mem_cgroup_cancel_charge(page, memcg, false); put_page(page); - goto unlock; + return ret; oom_free_page: put_page(page); oom: @@ -3200,8 +3229,9 @@ map_pte: * pte_none() under vmf->ptl protection when we return to * alloc_set_pte(). */ - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + if (!pte_map_lock(vmf)) + return VM_FAULT_RETRY; + return 0; } From 1d0d2c5e715af2dc30955778dda2dd2280631148 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:11 +0200 Subject: [PATCH 02/23] mm: introduce pte_spinlock for FAULT_FLAG_SPECULATIVE When handling page fault without holding the mmap_sem the fetch of the pte lock pointer and the locking will have to be done while ensuring that the VMA is not touched in our back. So move the fetch and locking operations in a dedicated function. Change-Id: If93ab95b1d22b7195e1c15b57315021f6be7c394 Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:11 Signed-off-by: Vinayak Menon --- mm/memory.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 050fa9958e69..cd9faf7cf7b0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2140,6 +2140,13 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL_GPL(apply_to_page_range); +static inline bool pte_spinlock(struct vm_fault *vmf) +{ + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + spin_lock(vmf->ptl); + return true; +} + static inline bool pte_map_lock(struct vm_fault *vmf) { vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, @@ -3730,8 +3737,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * validation through pte_unmap_same(). It's of NUMA type but * the pfn may be screwed if the read is non atomic. */ - vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); - spin_lock(vmf->ptl); + if (!pte_spinlock(vmf)) + return VM_FAULT_RETRY; if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; @@ -3924,8 +3931,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); - spin_lock(vmf->ptl); + if (!pte_spinlock(vmf)) + return VM_FAULT_RETRY; entry = vmf->orig_pte; if (unlikely(!pte_same(*vmf->pte, entry))) goto unlock; From 7224f45453000022bb07762cbae3dbc46ff0b13c Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:12 +0200 Subject: [PATCH 03/23] mm: make pte_unmap_same compatible with SPF pte_unmap_same() is making the assumption that the page table are still around because the mmap_sem is held. This is no more the case when running a speculative page fault and additional check must be made to ensure that the final page table are still there. This is now done by calling pte_spinlock() to check for the VMA's consistency while locking for the page tables. This is requiring passing a vm_fault structure to pte_unmap_same() which is containing all the needed parameters. As pte_spinlock() may fail in the case of a speculative page fault, if the VMA has been touched in our back, pte_unmap_same() should now return 3 cases : 1. pte are the same (0) 2. pte are different (VM_FAULT_PTNOTSAME) 3. a VMA's changes has been detected (VM_FAULT_RETRY) The case 2 is handled by the introduction of a new VM_FAULT flag named VM_FAULT_PTNOTSAME which is then trapped in cow_user_page(). If VM_FAULT_RETRY is returned, it is passed up to the callers to retry the page fault while holding the mmap_sem. Change-Id: Iaccfa0d877334f4343f8b0ec3400af5070ff5864 Acked-by: David Rientjes Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:12 [vinmenon@codeaurora.org: trivial merge conflicts] [vinmenon@codeaurora.org: 5.4: moved PTNOTSAME to mm_types.h] Signed-off-by: Vinayak Menon Signed-off-by: Charan Teja Reddy --- include/linux/mm_types.h | 1 + mm/memory.c | 39 ++++++++++++++++++++++++++++----------- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 533d73c47c0f..80dde223ab78 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -701,6 +701,7 @@ enum vm_fault_reason { VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800, VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000, VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000, + VM_FAULT_PTNOTSAME = (__force vm_fault_t)0x004000, VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000, }; diff --git a/mm/memory.c b/mm/memory.c index cd9faf7cf7b0..b743b1c5ce50 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2161,21 +2161,29 @@ static inline bool pte_map_lock(struct vm_fault *vmf) * parts, do_swap_page must check under lock before unmapping the pte and * proceeding (but do_wp_page is only called after already making such a check; * and do_anonymous_page can safely check later on). + * + * pte_unmap_same() returns: + * 0 if the PTE are the same + * VM_FAULT_PTNOTSAME if the PTE are different + * VM_FAULT_RETRY if the VMA has changed in our back during + * a speculative page fault handling. */ -static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, - pte_t *page_table, pte_t orig_pte) +static inline int pte_unmap_same(struct vm_fault *vmf) { - int same = 1; + int ret = 0; + #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) if (sizeof(pte_t) > sizeof(unsigned long)) { - spinlock_t *ptl = pte_lockptr(mm, pmd); - spin_lock(ptl); - same = pte_same(*page_table, orig_pte); - spin_unlock(ptl); + if (pte_spinlock(vmf)) { + if (!pte_same(*vmf->pte, vmf->orig_pte)) + ret = VM_FAULT_PTNOTSAME; + spin_unlock(vmf->ptl); + } else + ret = VM_FAULT_RETRY; } #endif - pte_unmap(page_table); - return same; + pte_unmap(vmf->pte); + return ret; } static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) @@ -2799,10 +2807,19 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) pte_t pte; int locked; int exclusive = 0; - vm_fault_t ret = 0; + vm_fault_t ret; - if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) + ret = pte_unmap_same(vmf); + if (ret) { + /* + * If pte != orig_pte, this means another thread did the + * swap operation in our back. + * So nothing else to do. + */ + if (ret == VM_FAULT_PTNOTSAME) + ret = 0; goto out; + } entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { From f9eb248e24b322e81da8fb834350bfdaff14b341 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:13 +0200 Subject: [PATCH 04/23] mm: introduce INIT_VMA() Some VMA struct fields need to be initialized once the VMA structure is allocated. Currently this only concerns anon_vma_chain field but some other will be added to support the speculative page fault. Instead of spreading the initialization calls all over the code, let's introduce a dedicated inline function. Change-Id: I9f6b29dc74055354318b548e2b6b22c37d4c61bb Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:13 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon [charante@codeaurora.org: merge conflict fixes] Signed-off-by: Charan Teja Reddy --- include/linux/mm.h | 7 ++++++- kernel/fork.c | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 15d93dff908e..16155e7351e4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -529,6 +529,11 @@ struct vm_operations_struct { unsigned long addr); }; +static inline void INIT_VMA(struct vm_area_struct *vma) +{ + INIT_LIST_HEAD(&vma->anon_vma_chain); +} + static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) { static const struct vm_operations_struct dummy_vm_ops = {}; @@ -536,7 +541,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) memset(vma, 0, sizeof(*vma)); vma->vm_mm = mm; vma->vm_ops = &dummy_vm_ops; - INIT_LIST_HEAD(&vma->anon_vma_chain); + INIT_VMA(vma); } static inline void vma_set_anonymous(struct vm_area_struct *vma) diff --git a/kernel/fork.c b/kernel/fork.c index f89e8c85f2b8..33496e0cdcff 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -359,7 +359,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) if (new) { *new = *orig; - INIT_LIST_HEAD(&new->anon_vma_chain); + INIT_VMA(new); } return new; } From 662540931e57cbfac485c1ba3c9363e15574be1b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 17 Apr 2018 16:33:14 +0200 Subject: [PATCH 05/23] mm: VMA sequence count Wrap the VMA modifications (vma_adjust/unmap_page_range) with sequence counts such that we can easily test if a VMA is changed. The unmap_page_range() one allows us to make assumptions about page-tables; when we find the seqcount hasn't changed we can assume page-tables are still valid. The flip side is that we cannot distinguish between a vma_adjust() and the unmap_page_range() -- where with the former we could have re-checked the vma bounds against the address. Signed-off-by: Peter Zijlstra (Intel) [Port to 4.12 kernel] [Build depends on CONFIG_SPECULATIVE_PAGE_FAULT] [Introduce vm_write_* inline function depending on CONFIG_SPECULATIVE_PAGE_FAULT] [Fix lock dependency between mapping->i_mmap_rwsem and vma->vm_sequence by using vm_raw_write* functions] [Fix a lock dependency warning in mmap_region() when entering the error path] [move sequence initialisation INIT_VMA()] Signed-off-by: Laurent Dufour Change-Id: Ibc23ef3b9dbb80323c0f24cb06da34b4c3a8fa71 Patch-mainline: linux-mm @ 17 Apr 2018 16:33:14 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon [charante@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Charan Teja Reddy --- include/linux/mm.h | 44 ++++++++++++++++++++++++++++++++++++++++ include/linux/mm_types.h | 4 +++- mm/memory.c | 2 ++ mm/mmap.c | 31 ++++++++++++++++++++++++++++ 4 files changed, 80 insertions(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 16155e7351e4..d3b9371cae11 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -532,6 +532,9 @@ struct vm_operations_struct { static inline void INIT_VMA(struct vm_area_struct *vma) { INIT_LIST_HEAD(&vma->anon_vma_chain); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + seqcount_init(&vma->vm_sequence); +#endif } static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) @@ -1472,6 +1475,47 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address, int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +static inline void vm_write_begin(struct vm_area_struct *vma) +{ + write_seqcount_begin(&vma->vm_sequence); +} +static inline void vm_write_begin_nested(struct vm_area_struct *vma, + int subclass) +{ + write_seqcount_begin_nested(&vma->vm_sequence, subclass); +} +static inline void vm_write_end(struct vm_area_struct *vma) +{ + write_seqcount_end(&vma->vm_sequence); +} +static inline void vm_raw_write_begin(struct vm_area_struct *vma) +{ + raw_write_seqcount_begin(&vma->vm_sequence); +} +static inline void vm_raw_write_end(struct vm_area_struct *vma) +{ + raw_write_seqcount_end(&vma->vm_sequence); +} +#else +static inline void vm_write_begin(struct vm_area_struct *vma) +{ +} +static inline void vm_write_begin_nested(struct vm_area_struct *vma, + int subclass) +{ +} +static inline void vm_write_end(struct vm_area_struct *vma) +{ +} +static inline void vm_raw_write_begin(struct vm_area_struct *vma) +{ +} +static inline void vm_raw_write_end(struct vm_area_struct *vma) +{ +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + extern void truncate_pagecache(struct inode *inode, loff_t new); extern void truncate_setsize(struct inode *inode, loff_t newsize); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 80dde223ab78..f010d19c2b18 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -361,7 +361,9 @@ struct vm_area_struct { struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; - +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + seqcount_t vm_sequence; +#endif ANDROID_KABI_RESERVE(1); ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); diff --git a/mm/memory.c b/mm/memory.c index b743b1c5ce50..eaf7f23b05f6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1247,6 +1247,7 @@ void unmap_page_range(struct mmu_gather *tlb, unsigned long next; BUG_ON(addr >= end); + vm_write_begin(vma); tlb_start_vma(tlb, vma); pgd = pgd_offset(vma->vm_mm, addr); do { @@ -1256,6 +1257,7 @@ void unmap_page_range(struct mmu_gather *tlb, next = zap_p4d_range(tlb, vma, pgd, addr, next, details); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); + vm_write_end(vma); } diff --git a/mm/mmap.c b/mm/mmap.c index f4ee016a017f..4bd0f13255d4 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -728,6 +728,30 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, long adjust_next = 0; int remove_next = 0; + /* + * Why using vm_raw_write*() functions here to avoid lockdep's warning ? + * + * Locked is complaining about a theoretical lock dependency, involving + * 3 locks: + * mapping->i_mmap_rwsem --> vma->vm_sequence --> fs_reclaim + * + * Here are the major path leading to this dependency : + * 1. __vma_adjust() mmap_sem -> vm_sequence -> i_mmap_rwsem + * 2. move_vmap() mmap_sem -> vm_sequence -> fs_reclaim + * 3. __alloc_pages_nodemask() fs_reclaim -> i_mmap_rwsem + * 4. unmap_mapping_range() i_mmap_rwsem -> vm_sequence + * + * So there is no way to solve this easily, especially because in + * unmap_mapping_range() the i_mmap_rwsem is grab while the impacted + * VMAs are not yet known. + * However, the way the vm_seq is used is guarantying that we will + * never block on it since we just check for its value and never wait + * for it to move, see vma_has_changed() and handle_speculative_fault(). + */ + vm_raw_write_begin(vma); + if (next) + vm_raw_write_begin(next); + if (next && !insert) { struct vm_area_struct *exporter = NULL, *importer = NULL; @@ -938,6 +962,7 @@ again: anon_vma_merge(vma, next); mm->map_count--; mpol_put(vma_policy(next)); + vm_raw_write_end(next); vm_area_free(next); /* * In mprotect's case 6 (see comments on vma_merge), @@ -952,6 +977,8 @@ again: * "vma->vm_next" gap must be updated. */ next = vma->vm_next; + if (next) + vm_raw_write_begin(next); } else { /* * For the scope of the comment "next" and @@ -998,6 +1025,10 @@ again: if (insert && file) uprobe_mmap(insert); + if (next && next != vma) + vm_raw_write_end(next); + vm_raw_write_end(vma); + validate_mm(mm); return 0; From 06219e6c87ad8b309d0d8e010dbd06e0e74e889b Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:15 +0200 Subject: [PATCH 06/23] mm: protect VMA modifications using VMA sequence count The VMA sequence count has been introduced to allow fast detection of VMA modification when running a page fault handler without holding the mmap_sem. This patch provides protection against the VMA modification done in : - madvise() - mpol_rebind_policy() - vma_replace_policy() - change_prot_numa() - mlock(), munlock() - mprotect() - mmap_region() - collapse_huge_page() - userfaultd registering services In addition, VMA fields which will be read during the speculative fault path needs to be written using WRITE_ONCE to prevent write to be split and intermediate values to be pushed to other CPUs. Change-Id: Ic36046b7254e538b6baf7144c50ae577ee7f2074 Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:15 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon [charante@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Charan Teja Reddy --- fs/proc/task_mmu.c | 5 ++++- fs/userfaultfd.c | 17 ++++++++++++---- mm/khugepaged.c | 3 +++ mm/madvise.c | 10 ++++++++- mm/mempolicy.c | 51 ++++++++++++++++++++++++++++++---------------- mm/mlock.c | 13 +++++++----- mm/mmap.c | 22 ++++++++++++-------- mm/mprotect.c | 4 +++- mm/swap_state.c | 10 ++++++--- 9 files changed, 94 insertions(+), 41 deletions(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 7a4397ce4559..d443986dfc33 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1277,8 +1277,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, goto out_mm; } for (vma = mm->mmap; vma; vma = vma->vm_next) { - vma->vm_flags &= ~VM_SOFTDIRTY; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, + vma->vm_flags & ~VM_SOFTDIRTY); vma_set_page_prot(vma); + vm_write_end(vma); } downgrade_write(&mm->mmap_sem); break; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ebf17d7e1093..ab846136c05c 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -675,8 +675,11 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) octx = vma->vm_userfaultfd_ctx.ctx; if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { + vm_write_begin(vma); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; - vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING); + WRITE_ONCE(vma->vm_flags, + vma->vm_flags & ~(VM_UFFD_WP | VM_UFFD_MISSING)); + vm_write_end(vma); return 0; } @@ -919,8 +922,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file) else prev = vma; } - vma->vm_flags = new_flags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + vm_write_end(vma); } up_write(&mm->mmap_sem); mmput(mm); @@ -1487,8 +1492,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ - vma->vm_flags = new_flags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx.ctx = ctx; + vm_write_end(vma); skip: prev = vma; @@ -1650,8 +1657,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ - vma->vm_flags = new_flags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + vm_write_end(vma); skip: prev = vma; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a8a57bebb5fa..3505f09eaf1b 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1026,6 +1026,7 @@ static void collapse_huge_page(struct mm_struct *mm, if (mm_find_pmd(mm, address) != pmd) goto out; + vm_write_begin(vma); anon_vma_lock_write(vma->anon_vma); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, @@ -1062,6 +1063,7 @@ static void collapse_huge_page(struct mm_struct *mm, pmd_populate(mm, pmd, pmd_pgtable(_pmd)); spin_unlock(pmd_ptl); anon_vma_unlock_write(vma->anon_vma); + vm_write_end(vma); result = SCAN_FAIL; goto out; } @@ -1097,6 +1099,7 @@ static void collapse_huge_page(struct mm_struct *mm, set_pmd_at(mm, address, pmd, _pmd); update_mmu_cache_pmd(vma, address, pmd); spin_unlock(pmd_ptl); + vm_write_end(vma); *hpage = NULL; diff --git a/mm/madvise.c b/mm/madvise.c index 360a2d46499e..75eeb9a4ce1d 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -166,7 +166,9 @@ success: /* * vm_flags is protected by the mmap_sem held in write mode. */ - vma->vm_flags = new_flags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, new_flags); + vm_write_end(vma); out_convert_errno: /* @@ -483,9 +485,11 @@ static void madvise_cold_page_range(struct mmu_gather *tlb, .tlb = tlb, }; + vm_write_begin(vma); tlb_start_vma(tlb, vma); walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); tlb_end_vma(tlb, vma); + vm_write_end(vma); } static long madvise_cold(struct vm_area_struct *vma, @@ -516,9 +520,11 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb, .tlb = tlb, }; + vm_write_begin(vma); tlb_start_vma(tlb, vma); walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); tlb_end_vma(tlb, vma); + vm_write_end(vma); } static inline bool can_do_pageout(struct vm_area_struct *vma) @@ -721,10 +727,12 @@ static int madvise_free_single_vma(struct vm_area_struct *vma, update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(&range); + vm_write_begin(vma); tlb_start_vma(&tlb, vma); walk_page_range(vma->vm_mm, range.start, range.end, &madvise_free_walk_ops, &tlb); tlb_end_vma(&tlb, vma); + vm_write_end(vma); mmu_notifier_invalidate_range_end(&range); tlb_finish_mmu(&tlb, range.start, range.end); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 024a2465cd3f..afd307262ecb 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -380,8 +380,11 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) struct vm_area_struct *vma; down_write(&mm->mmap_sem); - for (vma = mm->mmap; vma; vma = vma->vm_next) + for (vma = mm->mmap; vma; vma = vma->vm_next) { + vm_write_begin(vma); mpol_rebind_policy(vma->vm_policy, new); + vm_write_end(vma); + } up_write(&mm->mmap_sem); } @@ -596,9 +599,11 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, { int nr_updated; + vm_write_begin(vma); nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); if (nr_updated) count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); + vm_write_end(vma); return nr_updated; } @@ -711,6 +716,7 @@ static int vma_replace_policy(struct vm_area_struct *vma, if (IS_ERR(new)) return PTR_ERR(new); + vm_write_begin(vma); if (vma->vm_ops && vma->vm_ops->set_policy) { err = vma->vm_ops->set_policy(vma, new); if (err) @@ -718,11 +724,17 @@ static int vma_replace_policy(struct vm_area_struct *vma, } old = vma->vm_policy; - vma->vm_policy = new; /* protected by mmap_sem */ + /* + * The speculative page fault handler accesses this field without + * hodling the mmap_sem. + */ + WRITE_ONCE(vma->vm_policy, new); + vm_write_end(vma); mpol_put(old); return 0; err_out: + vm_write_end(vma); mpol_put(new); return err; } @@ -1703,23 +1715,28 @@ COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid, struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr) { - struct mempolicy *pol = NULL; + struct mempolicy *pol; - if (vma) { - if (vma->vm_ops && vma->vm_ops->get_policy) { - pol = vma->vm_ops->get_policy(vma, addr); - } else if (vma->vm_policy) { - pol = vma->vm_policy; + if (!vma) + return NULL; - /* - * shmem_alloc_page() passes MPOL_F_SHARED policy with - * a pseudo vma whose vma->vm_ops=NULL. Take a reference - * count on these policies which will be dropped by - * mpol_cond_put() later - */ - if (mpol_needs_cond_ref(pol)) - mpol_get(pol); - } + if (vma->vm_ops && vma->vm_ops->get_policy) + return vma->vm_ops->get_policy(vma, addr); + + /* + * This could be called without holding the mmap_sem in the + * speculative page fault handler's path. + */ + pol = READ_ONCE(vma->vm_policy); + if (pol) { + /* + * shmem_alloc_page() passes MPOL_F_SHARED policy with + * a pseudo vma whose vma->vm_ops=NULL. Take a reference + * count on these policies which will be dropped by + * mpol_cond_put() later + */ + if (mpol_needs_cond_ref(pol)) + mpol_get(pol); } return pol; diff --git a/mm/mlock.c b/mm/mlock.c index 646acba3045b..395bf08f86d8 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -445,7 +445,9 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, void munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, vma->vm_flags & VM_LOCKED_CLEAR_MASK); + vm_write_end(vma); while (start < end) { struct page *page; @@ -569,10 +571,11 @@ success: * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, populate_vma_page_range will bring it back. */ - - if (lock) - vma->vm_flags = newflags; - else + if (lock) { + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, newflags); + vm_write_end(vma); + } else munlock_vma_pages_range(vma, start, end); out: diff --git a/mm/mmap.c b/mm/mmap.c index 4bd0f13255d4..6847cadf5715 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -879,17 +879,18 @@ again: } if (start != vma->vm_start) { - vma->vm_start = start; + WRITE_ONCE(vma->vm_start, start); start_changed = true; } if (end != vma->vm_end) { - vma->vm_end = end; + WRITE_ONCE(vma->vm_end, end); end_changed = true; } - vma->vm_pgoff = pgoff; + WRITE_ONCE(vma->vm_pgoff, pgoff); if (adjust_next) { - next->vm_start += adjust_next << PAGE_SHIFT; - next->vm_pgoff += adjust_next; + WRITE_ONCE(next->vm_start, + next->vm_start + (adjust_next << PAGE_SHIFT)); + WRITE_ONCE(next->vm_pgoff, next->vm_pgoff + adjust_next); } if (root) { @@ -1868,12 +1869,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr, out: perf_event_mmap(vma); + vm_write_begin(vma); vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) - vma->vm_flags &= VM_LOCKED_CLEAR_MASK; + WRITE_ONCE(vma->vm_flags, + vma->vm_flags & VM_LOCKED_CLEAR_MASK); else mm->locked_vm += (len >> PAGE_SHIFT); } @@ -1888,9 +1891,10 @@ out: * then new mapped in-place (which must be aimed as * a completely new data area). */ - vma->vm_flags |= VM_SOFTDIRTY; + WRITE_ONCE(vma->vm_flags, vma->vm_flags | VM_SOFTDIRTY); vma_set_page_prot(vma); + vm_write_end(vma); return addr; @@ -2525,8 +2529,8 @@ int expand_downwards(struct vm_area_struct *vma, mm->locked_vm += grow; vm_stat_account(mm, vma->vm_flags, grow); anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_start = address; - vma->vm_pgoff -= grow; + WRITE_ONCE(vma->vm_start, address); + WRITE_ONCE(vma->vm_pgoff, vma->vm_pgoff - grow); anon_vma_interval_tree_post_update_vma(vma); vma_gap_update(vma); spin_unlock(&mm->page_table_lock); diff --git a/mm/mprotect.c b/mm/mprotect.c index 65d317058e9e..5c1a70a80f8f 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -454,12 +454,14 @@ success: * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ - vma->vm_flags = newflags; + vm_write_begin(vma); + WRITE_ONCE(vma->vm_flags, newflags); dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); vma_set_page_prot(vma); change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0); + vm_write_end(vma); /* * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major diff --git a/mm/swap_state.c b/mm/swap_state.c index 8e7ce9a9bc5e..13b751a9d6a2 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -534,7 +534,11 @@ static unsigned long swapin_nr_pages(unsigned long offset) * This has been extended to use the NUMA policies from the mm triggering * the readahead. * - * Caller must hold read mmap_sem if vmf->vma is not NULL. + * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL. + * This is needed to ensure the VMA will not be freed in our back. In the case + * of the speculative page fault handler, this cannot happen, even if we don't + * hold the mmap_sem. Callees are assumed to take care of reading VMA's fields + * using READ_ONCE() to read consistent values. */ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) @@ -631,9 +635,9 @@ static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, unsigned long *start, unsigned long *end) { - *start = max3(lpfn, PFN_DOWN(vma->vm_start), + *start = max3(lpfn, PFN_DOWN(READ_ONCE(vma->vm_start)), PFN_DOWN(faddr & PMD_MASK)); - *end = min3(rpfn, PFN_DOWN(vma->vm_end), + *end = min3(rpfn, PFN_DOWN(READ_ONCE(vma->vm_end)), PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); } From 728a43dfff0cda8e95447709484b7d8f54a9a3ee Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:16 +0200 Subject: [PATCH 07/23] mm: protect mremap() against SPF hanlder If a thread is remapping an area while another one is faulting on the destination area, the SPF handler may fetch the vma from the RB tree before the pte has been moved by the other thread. This means that the moved ptes will overwrite those create by the page fault handler leading to page leaked. CPU 1 CPU2 enter mremap() unmap the dest area copy_vma() Enter speculative page fault handler >> at this time the dest area is present in the RB tree fetch the vma matching dest area create a pte as the VMA matched Exit the SPF handler move_ptes() > it is assumed that the dest area is empty, > the move ptes overwrite the page mapped by the CPU2. To prevent that, when the VMA matching the dest area is extended or created by copy_vma(), it should be marked as non available to the SPF handler. The usual way to so is to rely on vm_write_begin()/end(). This is already in __vma_adjust() called by copy_vma() (through vma_merge()). But __vma_adjust() is calling vm_write_end() before returning which create a window for another thread. This patch adds a new parameter to vma_merge() which is passed down to vma_adjust(). The assumption is that copy_vma() is returning a vma which should be released by calling vm_raw_write_end() by the callee once the ptes have been moved. Change-Id: Icd338ad6e9b3c97b7334d3b8d30a8badfa2a4efa Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:16 [vinmenon@codeaurora.org: changes in vma_merge arguments related to the anon vma user name which is not suppported upstream.] Signed-off-by: Vinayak Menon --- include/linux/mm.h | 23 +++++++++++++++----- mm/mmap.c | 53 +++++++++++++++++++++++++++++++++++----------- mm/mremap.c | 13 ++++++++++++ 3 files changed, 72 insertions(+), 17 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index d3b9371cae11..fe33752e2890 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2325,16 +2325,29 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node); extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, - struct vm_area_struct *expand); + struct vm_area_struct *expand, bool keep_locked); static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) { - return __vma_adjust(vma, start, end, pgoff, insert, NULL); + return __vma_adjust(vma, start, end, pgoff, insert, NULL, false); } -extern struct vm_area_struct *vma_merge(struct mm_struct *, + +extern struct vm_area_struct *__vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, - unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t, - struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *); + unsigned long vm_flags, struct anon_vma *anon, struct file *file, + pgoff_t pgoff, struct mempolicy *mpol, struct vm_userfaultfd_ctx uff, + const char __user *user, bool keep_locked); + +static inline struct vm_area_struct *vma_merge(struct mm_struct *mm, + struct vm_area_struct *prev, unsigned long addr, unsigned long end, + unsigned long vm_flags, struct anon_vma *anon, struct file *file, + pgoff_t off, struct mempolicy *pol, struct vm_userfaultfd_ctx uff, + const char __user *user) +{ + return __vma_merge(mm, prev, addr, end, vm_flags, anon, file, off, + pol, uff, user, false); +} + extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); extern int __split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); diff --git a/mm/mmap.c b/mm/mmap.c index 6847cadf5715..bdb0043fd935 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -716,7 +716,7 @@ static inline void __vma_unlink_prev(struct mm_struct *mm, */ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert, - struct vm_area_struct *expand) + struct vm_area_struct *expand, bool keep_locked) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; @@ -832,8 +832,12 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); - if (error) + if (error) { + if (next && next != vma) + vm_raw_write_end(next); + vm_raw_write_end(vma); return error; + } } } again: @@ -1028,7 +1032,8 @@ again: if (next && next != vma) vm_raw_write_end(next); - vm_raw_write_end(vma); + if (!keep_locked) + vm_raw_write_end(vma); validate_mm(mm); @@ -1169,13 +1174,13 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, * parameter) may establish ptes with the wrong permissions of NNNN * instead of the right permissions of XXXX. */ -struct vm_area_struct *vma_merge(struct mm_struct *mm, +struct vm_area_struct *__vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, struct vm_userfaultfd_ctx vm_userfaultfd_ctx, - const char __user *anon_name) + const char __user *anon_name, bool keep_locked) { pgoff_t pglen = (end - addr) >> PAGE_SHIFT; struct vm_area_struct *area, *next; @@ -1225,10 +1230,11 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, /* cases 1, 6 */ err = __vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL, - prev); + prev, keep_locked); } else /* cases 2, 5, 7 */ err = __vma_adjust(prev, prev->vm_start, - end, prev->vm_pgoff, NULL, prev); + end, prev->vm_pgoff, NULL, prev, + keep_locked); if (err) return NULL; khugepaged_enter_vma_merge(prev, vm_flags); @@ -1246,10 +1252,12 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm, anon_name)) { if (prev && addr < prev->vm_end) /* case 4 */ err = __vma_adjust(prev, prev->vm_start, - addr, prev->vm_pgoff, NULL, next); + addr, prev->vm_pgoff, NULL, next, + keep_locked); else { /* cases 3, 8 */ err = __vma_adjust(area, addr, next->vm_end, - next->vm_pgoff - pglen, NULL, next); + next->vm_pgoff - pglen, NULL, next, + keep_locked); /* * In case 3 area is already equal to next and * this is a noop, but in case 8 "area" has @@ -3271,9 +3279,21 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) return NULL; /* should never get here */ - new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, - vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), - vma->vm_userfaultfd_ctx, vma_get_anon_name(vma)); + + /* There is 3 cases to manage here in + * AAAA AAAA AAAA AAAA + * PPPP.... PPPP......NNNN PPPP....NNNN PP........NN + * PPPPPPPP(A) PPPP..NNNNNNNN(B) PPPPPPPPPPPP(1) NULL + * PPPPPPPPNNNN(2) + * PPPPNNNNNNNN(3) + * + * new_vma == prev in case A,1,2 + * new_vma == next in case B,3 + */ + new_vma = __vma_merge(mm, prev, addr, addr + len, vma->vm_flags, + vma->anon_vma, vma->vm_file, pgoff, + vma_policy(vma), vma->vm_userfaultfd_ctx, + vma_get_anon_name(vma), true); if (new_vma) { /* * Source vma may have been merged into new_vma @@ -3311,6 +3331,15 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); + /* + * As the VMA is linked right now, it may be hit by the + * speculative page fault handler. But we don't want it to + * to start mapping page in this area until the caller has + * potentially move the pte from the moved VMA. To prevent + * that we protect it right now, and let the caller unprotect + * it once the move is done. + */ + vm_raw_write_begin(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; } diff --git a/mm/mremap.c b/mm/mremap.c index 1d98281f7204..7b841ad88982 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -357,6 +357,14 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (!new_vma) return -ENOMEM; + /* new_vma is returned protected by copy_vma, to prevent speculative + * page fault to be done in the destination area before we move the pte. + * Now, we must also protect the source VMA since we don't want pages + * to be mapped in our back while we are copying the PTEs. + */ + if (vma != new_vma) + vm_raw_write_begin(vma); + moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); if (moved_len < old_len) { @@ -373,6 +381,8 @@ static unsigned long move_vma(struct vm_area_struct *vma, */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, true); + if (vma != new_vma) + vm_raw_write_end(vma); vma = new_vma; old_len = new_len; old_addr = new_addr; @@ -381,7 +391,10 @@ static unsigned long move_vma(struct vm_area_struct *vma, mremap_userfaultfd_prep(new_vma, uf); arch_remap(mm, old_addr, old_addr + old_len, new_addr, new_addr + new_len); + if (vma != new_vma) + vm_raw_write_end(vma); } + vm_raw_write_end(new_vma); /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { From ff6cfddf7a20ecb94397f8ebd925e06933945c10 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:17 +0200 Subject: [PATCH 08/23] mm: protect SPF handler against anon_vma changes The speculative page fault handler must be protected against anon_vma changes. This is because page_add_new_anon_rmap() is called during the speculative path. In addition, don't try speculative page fault if the VMA don't have an anon_vma structure allocated because its allocation should be protected by the mmap_sem. In __vma_adjust() when importer->anon_vma is set, there is no need to protect against speculative page faults since speculative page fault is aborted if the vma->anon_vma is not set. When calling page_add_new_anon_rmap() vma->anon_vma is necessarily valid since we checked for it when locking the pte and the anon_vma is removed once the pte is unlocked. So even if the speculative page fault handler is running concurrently with do_unmap(), as the pte is locked in unmap_region() - through unmap_vmas() - and the anon_vma unlinked later, because we check for the vma sequence counter which is updated in unmap_page_range() before locking the pte, and then in free_pgtables() so when locking the pte the change will be detected. Change-Id: I6c1f3b5c811d1ddd7b3f769082e8bbd40f5b52a0 Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:17 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon --- mm/memory.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/memory.c b/mm/memory.c index eaf7f23b05f6..0b0c90b5e9d5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -397,7 +397,9 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, * Hide vma from rmap and truncate_pagecache before freeing * pgtables */ + vm_write_begin(vma); unlink_anon_vmas(vma); + vm_write_end(vma); unlink_file_vma(vma); if (is_vm_hugetlb_page(vma)) { @@ -411,7 +413,9 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, && !is_vm_hugetlb_page(next)) { vma = next; next = vma->vm_next; + vm_write_begin(vma); unlink_anon_vmas(vma); + vm_write_end(vma); unlink_file_vma(vma); } free_pgd_range(tlb, addr, vma->vm_end, From 5d1dccddd720cc20ebc8cb9c68fa4118181500f7 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:18 +0200 Subject: [PATCH 09/23] mm: cache some VMA fields in the vm_fault structure When handling speculative page fault, the vma->vm_flags and vma->vm_page_prot fields are read once the page table lock is released. So there is no more guarantee that these fields would not change in our back. They will be saved in the vm_fault structure before the VMA is checked for changes. This patch also set the fields in hugetlb_no_page() and __collapse_huge_page_swapin even if it is not need for the callee. Signed-off-by: Laurent Dufour Change-Id: I9821f02ea32ef220b57b8bfd817992bbf71bbb1d Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:18 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon [charante@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Charan Teja Reddy --- include/linux/mm.h | 4 ++-- mm/huge_memory.c | 6 +++--- mm/hugetlb.c | 2 ++ mm/khugepaged.c | 2 ++ mm/memory.c | 53 ++++++++++++++++++++++++---------------------- mm/migrate.c | 2 +- 6 files changed, 38 insertions(+), 31 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index fe33752e2890..e346d714b454 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -838,9 +838,9 @@ void free_compound_page(struct page *page); * pte_mkwrite. But get_user_pages can cause write faults for mappings * that do not have writing enabled, when used by access_process_vm. */ -static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) +static inline pte_t maybe_mkwrite(pte_t pte, unsigned long vma_flags) { - if (likely(vma->vm_flags & VM_WRITE)) + if (likely(vma_flags & VM_WRITE)) pte = pte_mkwrite(pte); return pte; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c4ebfcd93721..4683d83123ec 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1268,8 +1268,8 @@ static vm_fault_t do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { pte_t entry; - entry = mk_pte(pages[i], vma->vm_page_prot); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = mk_pte(pages[i], vmf->vma_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); memcg = (void *)page_private(pages[i]); set_page_private(pages[i], 0); page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); @@ -2252,7 +2252,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_swp_mksoft_dirty(entry); } else { entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); - entry = maybe_mkwrite(entry, vma); + entry = maybe_mkwrite(entry, vma->vm_flags); if (!write) entry = pte_wrprotect(entry); if (!young) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 66ee1a894546..1a7f0890cf58 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3950,6 +3950,8 @@ retry: .vma = vma, .address = haddr, .flags = flags, + .vma_flags = vma->vm_flags, + .vma_page_prot = vma->vm_page_prot, /* * Hard to debug if it ends up being * used by a callee that assumes diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3505f09eaf1b..8e5007f724ec 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -900,6 +900,8 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, .flags = FAULT_FLAG_ALLOW_RETRY, .pmd = pmd, .pgoff = linear_page_index(vma, address), + .vma_flags = vma->vm_flags, + .vma_page_prot = vma->vm_page_prot, }; /* we only decide to swapin, if there is enough young ptes */ diff --git a/mm/memory.c b/mm/memory.c index 0b0c90b5e9d5..5d7616f79aa0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1630,7 +1630,8 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, goto out_unlock; } entry = pte_mkyoung(*pte); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = maybe_mkwrite(pte_mkdirty(entry), + vma->vm_flags); if (ptep_set_access_flags(vma, addr, pte, entry, 1)) update_mmu_cache(vma, addr, pte); } @@ -1645,7 +1646,7 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, if (mkwrite) { entry = pte_mkyoung(entry); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = maybe_mkwrite(pte_mkdirty(entry), vma->vm_flags); } set_pte_at(mm, addr, pte, entry); @@ -2343,7 +2344,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf) flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = pte_mkyoung(vmf->orig_pte); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) update_mmu_cache(vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -2421,8 +2422,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) inc_mm_counter_fast(mm, MM_ANONPAGES); } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); - entry = mk_pte(new_page, vma->vm_page_prot); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = mk_pte(new_page, vmf->vma_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); /* * Clear the pte entry and flush it first, before updating the * pte with the new entry. This will avoid a race condition @@ -2487,7 +2488,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * Don't let another task, with possibly unlocked vma, * keep the mlocked page. */ - if (page_copied && (vma->vm_flags & VM_LOCKED)) { + if (page_copied && (vmf->vma_flags & VM_LOCKED)) { lock_page(old_page); /* LRU manipulation */ if (PageMlocked(old_page)) munlock_vma_page(old_page); @@ -2524,7 +2525,7 @@ out: */ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) { - WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); + WARN_ON_ONCE(!(vmf->vma_flags & VM_SHARED)); if (!pte_map_lock(vmf)) return VM_FAULT_RETRY; /* @@ -2627,7 +2628,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) * We should not cow pages in a shared writeable mapping. * Just mark the pages writable and/or call ops->pfn_mkwrite. */ - if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == + if ((vmf->vma_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) return wp_pfn_shared(vmf); @@ -2686,7 +2687,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) return VM_FAULT_WRITE; } unlock_page(vmf->page); - } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == + } else if (unlikely((vmf->vma_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { return wp_page_shared(vmf); } @@ -2961,9 +2962,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); - pte = mk_pte(page, vma->vm_page_prot); + pte = mk_pte(page, vmf->vma_page_prot); if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { - pte = maybe_mkwrite(pte_mkdirty(pte), vma); + pte = maybe_mkwrite(pte_mkdirty(pte), vmf->vma_flags); vmf->flags &= ~FAULT_FLAG_WRITE; ret |= VM_FAULT_WRITE; exclusive = RMAP_EXCLUSIVE; @@ -2988,7 +2989,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) swap_free(entry); if (mem_cgroup_swap_full(page) || - (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) + (vmf->vma_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); unlock_page(page); if (page != swapcache && swapcache) { @@ -3046,7 +3047,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) pte_t entry; /* File mapping without ->vm_ops ? */ - if (vma->vm_flags & VM_SHARED) + if (vmf->vma_flags & VM_SHARED) return VM_FAULT_SIGBUS; /* @@ -3070,7 +3071,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), - vma->vm_page_prot)); + vmf->vma_page_prot)); if (!pte_map_lock(vmf)) return VM_FAULT_RETRY; if (!pte_none(*vmf->pte)) @@ -3104,8 +3105,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) */ __SetPageUptodate(page); - entry = mk_pte(page, vma->vm_page_prot); - if (vma->vm_flags & VM_WRITE) + entry = mk_pte(page, vmf->vma_page_prot); + if (vmf->vma_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); if (!pte_map_lock(vmf)) { @@ -3312,7 +3313,7 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) for (i = 0; i < HPAGE_PMD_NR; i++) flush_icache_page(vma, page + i); - entry = mk_huge_pmd(page, vma->vm_page_prot); + entry = mk_huge_pmd(page, vmf->vma_page_prot); if (write) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); @@ -3388,15 +3389,15 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, return VM_FAULT_NOPAGE; flush_icache_page(vma, page); - entry = mk_pte(page, vma->vm_page_prot); + entry = mk_pte(page, vmf->vma_page_prot); if (write) - entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags); if (vmf->flags & FAULT_FLAG_PREFAULT_OLD) entry = pte_mkold(entry); /* copy-on-write page */ - if (write && !(vma->vm_flags & VM_SHARED)) { + if (write && !(vmf->vma_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); @@ -3436,7 +3437,7 @@ vm_fault_t finish_fault(struct vm_fault *vmf) /* Did we COW the page? */ if ((vmf->flags & FAULT_FLAG_WRITE) && - !(vmf->vma->vm_flags & VM_SHARED)) + !(vmf->vma_flags & VM_SHARED)) page = vmf->cow_page; else page = vmf->page; @@ -3715,7 +3716,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf) } } else if (!(vmf->flags & FAULT_FLAG_WRITE)) ret = do_read_fault(vmf); - else if (!(vma->vm_flags & VM_SHARED)) + else if (!(vmf->vma_flags & VM_SHARED)) ret = do_cow_fault(vmf); else ret = do_shared_fault(vmf); @@ -3772,7 +3773,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * accessible ptes, some can allow access by kernel mode. */ old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); - pte = pte_modify(old_pte, vma->vm_page_prot); + pte = pte_modify(old_pte, vmf->vma_page_prot); pte = pte_mkyoung(pte); if (was_writable) pte = pte_mkwrite(pte); @@ -3806,7 +3807,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * Flag if the page is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ - if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) + if (page_mapcount(page) > 1 && (vmf->vma_flags & VM_SHARED)) flags |= TNF_SHARED; last_cpupid = page_cpupid_last(page); @@ -3851,7 +3852,7 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); /* COW handled on pte level: split pmd */ - VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); + VM_BUG_ON_VMA(vmf->vma_flags & VM_SHARED, vmf->vma); __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); return VM_FAULT_FALLBACK; @@ -3998,6 +3999,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, .flags = flags, .pgoff = linear_page_index(vma, address), .gfp_mask = __get_fault_gfp_mask(vma), + .vma_flags = vma->vm_flags, + .vma_page_prot = vma->vm_page_prot, }; unsigned int dirty = flags & FAULT_FLAG_WRITE; struct mm_struct *mm = vma->vm_mm; diff --git a/mm/migrate.c b/mm/migrate.c index 6bb39c65c9cc..5909ffcc76f6 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -241,7 +241,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, */ entry = pte_to_swp_entry(*pvmw.pte); if (is_write_migration_entry(entry)) - pte = maybe_mkwrite(pte, vma); + pte = maybe_mkwrite(pte, vma->vm_flags); if (unlikely(is_zone_device_page(new))) { if (is_device_private_page(new)) { From b7efa36bf548cec5875e9caf56a24f59685deff4 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:19 +0200 Subject: [PATCH 10/23] mm/migrate: Pass vm_fault pointer to migrate_misplaced_page() migrate_misplaced_page() is only called during the page fault handling so it's better to pass the pointer to the struct vm_fault instead of the vma. This way during the speculative page fault path the saved vma->vm_flags could be used. Change-Id: I254a7c9d91dca9ee8a9afd5eccd6de9af5dc8bc0 Acked-by: David Rientjes Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:19 Signed-off-by: Vinayak Menon --- include/linux/migrate.h | 4 ++-- mm/memory.c | 2 +- mm/migrate.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 72120061b7d4..8a7b150c2270 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -124,14 +124,14 @@ static inline void __ClearPageMovable(struct page *page) #ifdef CONFIG_NUMA_BALANCING extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, - struct vm_area_struct *vma, int node); + struct vm_fault *vmf, int node); #else static inline bool pmd_trans_migrating(pmd_t pmd) { return false; } static inline int migrate_misplaced_page(struct page *page, - struct vm_area_struct *vma, int node) + struct vm_fault *vmf, int node) { return -EAGAIN; /* can't migrate now */ } diff --git a/mm/memory.c b/mm/memory.c index 5d7616f79aa0..7d04fae7a792 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3821,7 +3821,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) } /* Migrate to the requested node */ - migrated = migrate_misplaced_page(page, vma, target_nid); + migrated = migrate_misplaced_page(page, vmf, target_nid); if (migrated) { page_nid = target_nid; flags |= TNF_MIGRATED; diff --git a/mm/migrate.c b/mm/migrate.c index 5909ffcc76f6..f5b5376b6624 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1968,7 +1968,7 @@ bool pmd_trans_migrating(pmd_t pmd) * node. Caller is expected to have an elevated reference count on * the page that will be dropped by this function before returning. */ -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, +int migrate_misplaced_page(struct page *page, struct vm_fault *vmf, int node) { pg_data_t *pgdat = NODE_DATA(node); @@ -1981,7 +1981,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, * with execute permissions as they are probably shared libraries. */ if (page_mapcount(page) != 1 && page_is_file_cache(page) && - (vma->vm_flags & VM_EXEC)) + (vmf->vma_flags & VM_EXEC)) goto out; /* From 2536f11b7d9bb7a6974ab163b0c482bb689748e5 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:20 +0200 Subject: [PATCH 11/23] mm: introduce __lru_cache_add_active_or_unevictable The speculative page fault handler which is run without holding the mmap_sem is calling lru_cache_add_active_or_unevictable() but the vm_flags is not guaranteed to remain constant. Introducing __lru_cache_add_active_or_unevictable() which has the vma flags value parameter instead of the vma pointer. Change-Id: I68decbe0f80847403127c45c97565e47512532e9 Acked-by: David Rientjes Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:20 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon [charante@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Charan Teja Reddy --- include/linux/swap.h | 10 ++++++++-- mm/memory.c | 8 ++++---- mm/swap.c | 6 +++--- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 063c0c1e112b..e5288634c38b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -344,8 +344,14 @@ extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); -extern void lru_cache_add_active_or_unevictable(struct page *page, - struct vm_area_struct *vma); +extern void __lru_cache_add_active_or_unevictable(struct page *page, + unsigned long vma_flags); + +static inline void lru_cache_add_active_or_unevictable(struct page *page, + struct vm_area_struct *vma) +{ + return __lru_cache_add_active_or_unevictable(page, vma->vm_flags); +} /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); diff --git a/mm/memory.c b/mm/memory.c index 7d04fae7a792..ed99efeefa11 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2433,7 +2433,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ptep_clear_flush_notify(vma, vmf->address, vmf->pte); page_add_new_anon_rmap(new_page, vma, vmf->address, false); mem_cgroup_commit_charge(new_page, memcg, false, false); - lru_cache_add_active_or_unevictable(new_page, vma); + __lru_cache_add_active_or_unevictable(new_page, vmf->vma_flags); /* * We call the notify macro here because, when using secondary * mmu page tables (such as kvm shadow page tables), we want the @@ -2980,7 +2980,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (unlikely(page != swapcache && swapcache)) { page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); - lru_cache_add_active_or_unevictable(page, vma); + __lru_cache_add_active_or_unevictable(page, vmf->vma_flags); } else { do_page_add_anon_rmap(page, vma, vmf->address, exclusive); mem_cgroup_commit_charge(page, memcg, true, false); @@ -3131,7 +3131,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); - lru_cache_add_active_or_unevictable(page, vma); + __lru_cache_add_active_or_unevictable(page, vmf->vma_flags); setpte: set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); @@ -3401,7 +3401,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); - lru_cache_add_active_or_unevictable(page, vma); + __lru_cache_add_active_or_unevictable(page, vmf->vma_flags); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, false); diff --git a/mm/swap.c b/mm/swap.c index 38c3fa4308e2..7da393f516e7 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -452,12 +452,12 @@ void lru_cache_add(struct page *page) * directly back onto it's zone's unevictable list, it does NOT use a * per cpu pagevec. */ -void lru_cache_add_active_or_unevictable(struct page *page, - struct vm_area_struct *vma) +void __lru_cache_add_active_or_unevictable(struct page *page, + unsigned long vma_flags) { VM_BUG_ON_PAGE(PageLRU(page), page); - if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) + if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) SetPageActive(page); else if (!TestSetPageMlocked(page)) { /* From ec5147dea1b8f02e576a06531d52098045513eda Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:21 +0200 Subject: [PATCH 12/23] mm: introduce __vm_normal_page() When dealing with the speculative fault path we should use the VMA's field cached value stored in the vm_fault structure. Currently vm_normal_page() is using the pointer to the VMA to fetch the vm_flags value. This patch provides a new __vm_normal_page() which is receiving the vm_flags flags value as parameter. Note: The speculative path is turned on for architecture providing support for special PTE flag. So only the first block of vm_normal_page is used during the speculative path. Change-Id: I0f2c4ab1212fbca449bdf6e7993dafa0d41044bc Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:21 Signed-off-by: Vinayak Menon [charante@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Charan Teja Reddy --- include/linux/mm.h | 10 ++++++++-- mm/memory.c | 24 +++++++++++++++--------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index e346d714b454..d05ef12f6f6e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1447,8 +1447,14 @@ struct zap_details { pgoff_t last_index; /* Highest page->index to unmap */ }; -struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte); +struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, unsigned long vma_flags); +static inline struct page *vm_normal_page(struct vm_area_struct *vma, + unsigned long addr, pte_t pte) +{ + return _vm_normal_page(vma, addr, pte, vma->vm_flags); +} + struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); diff --git a/mm/memory.c b/mm/memory.c index ed99efeefa11..8136d0f55871 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -553,7 +553,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, } /* - * vm_normal_page -- This function gets the "struct page" associated with a pte. + * __vm_normal_page -- This function gets the "struct page" associated with + * a pte. * * "Special" mappings do not wish to be associated with a "struct page" (either * it doesn't exist, or it exists but they don't want to touch it). In this @@ -594,8 +595,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, * PFNMAP mappings in order to support COWable mappings. * */ -struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, - pte_t pte) +struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte, unsigned long vma_flags) { unsigned long pfn = pte_pfn(pte); @@ -604,7 +605,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, goto check_pfn; if (vma->vm_ops && vma->vm_ops->find_special_page) return vma->vm_ops->find_special_page(vma, addr); - if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP)) return NULL; if (is_zero_pfn(pfn)) return NULL; @@ -616,9 +617,13 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, } /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ + /* + * This part should never get called when CONFIG_SPECULATIVE_PAGE_FAULT + * is set. This is mainly because we can't rely on vm_start. + */ - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { - if (vma->vm_flags & VM_MIXEDMAP) { + if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) { + if (vma_flags & VM_MIXEDMAP) { if (!pfn_valid(pfn)) return NULL; goto out; @@ -627,7 +632,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, off = (addr - vma->vm_start) >> PAGE_SHIFT; if (pfn == vma->vm_pgoff + off) return NULL; - if (!is_cow_mapping(vma->vm_flags)) + if (!is_cow_mapping(vma_flags)) return NULL; } } @@ -2619,7 +2624,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); + vmf->page = _vm_normal_page(vma, vmf->address, vmf->orig_pte, + vmf->vma_flags); if (!vmf->page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a @@ -3780,7 +3786,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); update_mmu_cache(vma, vmf->address, vmf->pte); - page = vm_normal_page(vma, vmf->address, pte); + page = _vm_normal_page(vma, vmf->address, pte, vmf->vma_flags); if (!page) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; From d56126780c1e35d3f022d0c46be099262589e618 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:22 +0200 Subject: [PATCH 13/23] mm: introduce __page_add_new_anon_rmap() When dealing with speculative page fault handler, we may race with VMA being split or merged. In this case the vma->vm_start and vm->vm_end fields may not match the address the page fault is occurring. This can only happens when the VMA is split but in that case, the anon_vma pointer of the new VMA will be the same as the original one, because in __split_vma the new->anon_vma is set to src->anon_vma when *new = *vma. So even if the VMA boundaries are not correct, the anon_vma pointer is still valid. If the VMA has been merged, then the VMA in which it has been merged must have the same anon_vma pointer otherwise the merge can't be done. So in all the case we know that the anon_vma is valid, since we have checked before starting the speculative page fault that the anon_vma pointer is valid for this VMA and since there is an anon_vma this means that at one time a page has been backed and that before the VMA is cleaned, the page table lock would have to be grab to clean the PTE, and the anon_vma field is checked once the PTE is locked. This patch introduce a new __page_add_new_anon_rmap() service which doesn't check for the VMA boundaries, and create a new inline one which do the check. When called from a page fault handler, if this is not a speculative one, there is a guarantee that vm_start and vm_end match the faulting address, so this check is useless. In the context of the speculative page fault handler, this check may be wrong but anon_vma is still valid as explained above. Change-Id: I72c47830181579f8c9618df879077d321653b5f1 Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:22 [vinmenon@codeaurora.org: trivial merge conflict fixes + checkpatch fixes] Signed-off-by: Vinayak Menon --- include/linux/rmap.h | 12 ++++++++++-- mm/memory.c | 8 ++++---- mm/rmap.c | 5 ++--- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 4e5bf0925534..7268a54b8956 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -179,8 +179,16 @@ void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, int); -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long, bool); +void __page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, + unsigned long address, bool compound); +static inline void page_add_new_anon_rmap(struct page *page, + struct vm_area_struct *vma, + unsigned long address, bool compound) +{ + VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); + __page_add_new_anon_rmap(page, vma, address, compound); +} + void page_add_file_rmap(struct page *, bool); void page_remove_rmap(struct page *, bool); diff --git a/mm/memory.c b/mm/memory.c index 8136d0f55871..7b45b393d18e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2436,7 +2436,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * thread doing COW. */ ptep_clear_flush_notify(vma, vmf->address, vmf->pte); - page_add_new_anon_rmap(new_page, vma, vmf->address, false); + __page_add_new_anon_rmap(new_page, vma, vmf->address, false); mem_cgroup_commit_charge(new_page, memcg, false, false); __lru_cache_add_active_or_unevictable(new_page, vmf->vma_flags); /* @@ -2984,7 +2984,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) /* ksm created a completely new copy */ if (unlikely(page != swapcache && swapcache)) { - page_add_new_anon_rmap(page, vma, vmf->address, false); + __page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); __lru_cache_add_active_or_unevictable(page, vmf->vma_flags); } else { @@ -3135,7 +3135,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) } inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, vmf->address, false); + __page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); __lru_cache_add_active_or_unevictable(page, vmf->vma_flags); setpte: @@ -3405,7 +3405,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, /* copy-on-write page */ if (write && !(vmf->vma_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, vmf->address, false); + __page_add_new_anon_rmap(page, vma, vmf->address, false); mem_cgroup_commit_charge(page, memcg, false, false); __lru_cache_add_active_or_unevictable(page, vmf->vma_flags); } else { diff --git a/mm/rmap.c b/mm/rmap.c index c1cca1fd6706..fe4a3b2af774 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1138,7 +1138,7 @@ void do_page_add_anon_rmap(struct page *page, } /** - * page_add_new_anon_rmap - add pte mapping to a new anonymous page + * __page_add_new_anon_rmap - add pte mapping to a new anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped @@ -1148,12 +1148,11 @@ void do_page_add_anon_rmap(struct page *page, * This means the inc-and-test can be bypassed. * Page does not have to be locked. */ -void page_add_new_anon_rmap(struct page *page, +void __page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, bool compound) { int nr = compound ? hpage_nr_pages(page) : 1; - VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); __SetPageSwapBacked(page); if (compound) { VM_BUG_ON_PAGE(!PageTransHuge(page), page); From 9db216b5c9b0f9199b904d1e8ccd72c36518607f Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:23 +0200 Subject: [PATCH 14/23] mm: protect mm_rb tree with a rwlock This change is inspired by the Peter's proposal patch [1] which was protecting the VMA using SRCU. Unfortunately, SRCU is not scaling well in that particular case, and it is introducing major performance degradation due to excessive scheduling operations. To allow access to the mm_rb tree without grabbing the mmap_sem, this patch is protecting it access using a rwlock. As the mm_rb tree is a O(log n) search it is safe to protect it using such a lock. The VMA cache is not protected by the new rwlock and it should not be used without holding the mmap_sem. To allow the picked VMA structure to be used once the rwlock is released, a use count is added to the VMA structure. When the VMA is allocated it is set to 1. Each time the VMA is picked with the rwlock held its use count is incremented. Each time the VMA is released it is decremented. When the use count hits zero, this means that the VMA is no more used and should be freed. This patch is preparing for 2 kind of VMA access : - as usual, under the control of the mmap_sem, - without holding the mmap_sem for the speculative page fault handler. Access done under the control the mmap_sem doesn't require to grab the rwlock to protect read access to the mm_rb tree, but access in write must be done under the protection of the rwlock too. This affects inserting and removing of elements in the RB tree. The patch is introducing 2 new functions: - vma_get() to find a VMA based on an address by holding the new rwlock. - vma_put() to release the VMA when its no more used. These services are designed to be used when access are made to the RB tree without holding the mmap_sem. When a VMA is removed from the RB tree, its vma->vm_rb field is cleared and we rely on the WMB done when releasing the rwlock to serialize the write with the RMB done in a later patch to check for the VMA's validity. When free_vma is called, the file associated with the VMA is closed immediately, but the policy and the file structure remained in used until the VMA's use count reach 0, which may happens later when exiting an in progress speculative page fault. [1] https://patchwork.kernel.org/patch/5108281/ Change-Id: I9ecc922b8efa4b28975cc6a8e9531284c24ac14e Cc: Peter Zijlstra (Intel) Cc: Matthew Wilcox Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:23 [vinmenon@codeaurora.org: fix the return of put_vma] Signed-off-by: Vinayak Menon Signed-off-by: Charan Teja Reddy --- include/linux/mm.h | 1 + include/linux/mm_types.h | 4 ++ kernel/fork.c | 3 ++ mm/init-mm.c | 3 ++ mm/internal.h | 6 +++ mm/mmap.c | 114 +++++++++++++++++++++++++++++---------- 6 files changed, 103 insertions(+), 28 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index d05ef12f6f6e..625509a67ff8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -534,6 +534,7 @@ static inline void INIT_VMA(struct vm_area_struct *vma) INIT_LIST_HEAD(&vma->anon_vma_chain); #ifdef CONFIG_SPECULATIVE_PAGE_FAULT seqcount_init(&vma->vm_sequence); + atomic_set(&vma->vm_ref_count, 1); #endif } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index f010d19c2b18..e1c2d68bccd9 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -363,6 +363,7 @@ struct vm_area_struct { struct vm_userfaultfd_ctx vm_userfaultfd_ctx; #ifdef CONFIG_SPECULATIVE_PAGE_FAULT seqcount_t vm_sequence; + atomic_t vm_ref_count; /* see vma_get(), vma_put() */ #endif ANDROID_KABI_RESERVE(1); ANDROID_KABI_RESERVE(2); @@ -387,6 +388,9 @@ struct mm_struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; u64 vmacache_seqnum; /* per-thread vmacache */ +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + rwlock_t mm_rb_lock; +#endif #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/kernel/fork.c b/kernel/fork.c index 33496e0cdcff..611df503e361 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1017,6 +1017,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm->mmap = NULL; mm->mm_rb = RB_ROOT; mm->vmacache_seqnum = 0; +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + rwlock_init(&mm->mm_rb_lock); +#endif atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); init_rwsem(&mm->mmap_sem); diff --git a/mm/init-mm.c b/mm/init-mm.c index 19603302a77f..0a4abdcf8886 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -28,6 +28,9 @@ */ struct mm_struct init_mm = { .mm_rb = RB_ROOT, +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + .mm_rb_lock = __RW_LOCK_UNLOCKED(init_mm.mm_rb_lock), +#endif .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), diff --git a/mm/internal.h b/mm/internal.h index 7dd7fbb577a9..10834044681e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,6 +36,12 @@ void page_writeback_init(void); vm_fault_t do_swap_page(struct vm_fault *vmf); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +extern struct vm_area_struct *get_vma(struct mm_struct *mm, + unsigned long addr); +extern void put_vma(struct vm_area_struct *vma); +#endif + void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); diff --git a/mm/mmap.c b/mm/mmap.c index bdb0043fd935..0fcbe5a72e64 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -165,6 +165,27 @@ void unlink_file_vma(struct vm_area_struct *vma) } } +static void __free_vma(struct vm_area_struct *vma) +{ + if (vma->vm_file) + fput(vma->vm_file); + mpol_put(vma_policy(vma)); + vm_area_free(vma); +} + +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +void put_vma(struct vm_area_struct *vma) +{ + if (atomic_dec_and_test(&vma->vm_ref_count)) + __free_vma(vma); +} +#else +static inline void put_vma(struct vm_area_struct *vma) +{ + __free_vma(vma); +} +#endif + /* * Close a vm structure and free it, returning the next. */ @@ -175,10 +196,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) might_sleep(); if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); - if (vma->vm_file) - fput(vma->vm_file); - mpol_put(vma_policy(vma)); - vm_area_free(vma); + put_vma(vma); return next; } @@ -431,6 +449,13 @@ static void validate_mm(struct mm_struct *mm) RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks, struct vm_area_struct, vm_rb, unsigned long, rb_subtree_gap, vma_compute_gap) +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +#define mm_rb_write_lock(mm) write_lock(&(mm)->mm_rb_lock) +#define mm_rb_write_unlock(mm) write_unlock(&(mm)->mm_rb_lock) +#else +#define mm_rb_write_lock(mm) do { } while (0) +#define mm_rb_write_unlock(mm) do { } while (0) +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ /* * Update augmented rbtree rb_subtree_gap values after vma->vm_start or @@ -447,26 +472,37 @@ static void vma_gap_update(struct vm_area_struct *vma) } static inline void vma_rb_insert(struct vm_area_struct *vma, - struct rb_root *root) + struct mm_struct *mm) { + struct rb_root *root = &mm->mm_rb; + /* All rb_subtree_gap values must be consistent prior to insertion */ validate_mm_rb(root, NULL); rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); } -static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) +static void __vma_rb_erase(struct vm_area_struct *vma, struct mm_struct *mm) { + struct rb_root *root = &mm->mm_rb; /* * Note rb_erase_augmented is a fairly large inline function, * so make sure we instantiate it only once with our desired * augmented rbtree callbacks. */ + mm_rb_write_lock(mm); rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); + mm_rb_write_unlock(mm); /* wmb */ + + /* + * Ensure the removal is complete before clearing the node. + * Matched by vma_has_changed()/handle_speculative_fault(). + */ + RB_CLEAR_NODE(&vma->vm_rb); } static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, - struct rb_root *root, + struct mm_struct *mm, struct vm_area_struct *ignore) { /* @@ -474,21 +510,21 @@ static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, * with the possible exception of the "next" vma being erased if * next->vm_start was reduced. */ - validate_mm_rb(root, ignore); + validate_mm_rb(&mm->mm_rb, ignore); - __vma_rb_erase(vma, root); + __vma_rb_erase(vma, mm); } static __always_inline void vma_rb_erase(struct vm_area_struct *vma, - struct rb_root *root) + struct mm_struct *mm) { /* * All rb_subtree_gap values must be consistent prior to erase, * with the possible exception of the vma being erased. */ - validate_mm_rb(root, vma); + validate_mm_rb(&mm->mm_rb, vma); - __vma_rb_erase(vma, root); + __vma_rb_erase(vma, mm); } /* @@ -603,10 +639,12 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, * immediately update the gap to the correct value. Finally we * rebalance the rbtree after all augmented values have been set. */ + mm_rb_write_lock(mm); rb_link_node(&vma->vm_rb, rb_parent, rb_link); vma->rb_subtree_gap = 0; vma_gap_update(vma); - vma_rb_insert(vma, &mm->mm_rb); + vma_rb_insert(vma, mm); + mm_rb_write_unlock(mm); } static void __vma_link_file(struct vm_area_struct *vma) @@ -682,7 +720,7 @@ static __always_inline void __vma_unlink_common(struct mm_struct *mm, { struct vm_area_struct *next; - vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); + vma_rb_erase_ignore(vma, mm, ignore); next = vma->vm_next; if (has_prev) prev->vm_next = next; @@ -959,16 +997,13 @@ again: } if (remove_next) { - if (file) { + if (file) uprobe_munmap(next, next->vm_start, next->vm_end); - fput(file); - } if (next->anon_vma) anon_vma_merge(vma, next); mm->map_count--; - mpol_put(vma_policy(next)); vm_raw_write_end(next); - vm_area_free(next); + put_vma(next); /* * In mprotect's case 6 (see comments on vma_merge), * we must remove another next too. It would clutter @@ -2284,15 +2319,11 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, EXPORT_SYMBOL(get_unmapped_area); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ -struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +static struct vm_area_struct *__find_vma(struct mm_struct *mm, + unsigned long addr) { struct rb_node *rb_node; - struct vm_area_struct *vma; - - /* Check the cache first. */ - vma = vmacache_find(mm, addr); - if (likely(vma)) - return vma; + struct vm_area_struct *vma = NULL; rb_node = mm->mm_rb.rb_node; @@ -2310,13 +2341,40 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) rb_node = rb_node->rb_right; } + return vma; +} + +struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma; + + /* Check the cache first. */ + vma = vmacache_find(mm, addr); + if (likely(vma)) + return vma; + + vma = __find_vma(mm, addr); if (vma) vmacache_update(addr, vma); return vma; } - EXPORT_SYMBOL(find_vma); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma = NULL; + + read_lock(&mm->mm_rb_lock); + vma = __find_vma(mm, addr); + if (vma) + atomic_inc(&vma->vm_ref_count); + read_unlock(&mm->mm_rb_lock); + + return vma; +} +#endif + /* * Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ @@ -2684,7 +2742,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { - vma_rb_erase(vma, &mm->mm_rb); + vma_rb_erase(vma, mm); mm->map_count--; tail_vma = vma; vma = vma->vm_next; From 6e9deb2ea7ea1d63a5c1516c9acc1efeb182977e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 17 Apr 2018 16:33:24 +0200 Subject: [PATCH 15/23] mm: provide speculative fault infrastructure Provide infrastructure to do a speculative fault (not holding mmap_sem). The not holding of mmap_sem means we can race against VMA change/removal and page-table destruction. We use the SRCU VMA freeing to keep the VMA around. We use the VMA seqcount to detect change (including umapping / page-table deletion) and we use gup_fast() style page-table walking to deal with page-table races. Once we've obtained the page and are ready to update the PTE, we validate if the state we started the fault with is still valid, if not, we'll fail the fault with VM_FAULT_RETRY, otherwise we update the PTE and we're done. Signed-off-by: Peter Zijlstra (Intel) [Manage the newly introduced pte_spinlock() for speculative page fault to fail if the VMA is touched in our back] [Rename vma_is_dead() to vma_has_changed() and declare it here] [Fetch p4d and pud] [Set vmd.sequence in __handle_mm_fault()] [Abort speculative path when handle_userfault() has to be called] [Add additional VMA's flags checks in handle_speculative_fault()] [Clear FAULT_FLAG_ALLOW_RETRY in handle_speculative_fault()] [Don't set vmf->pte and vmf->ptl if pte_map_lock() failed] [Remove warning comment about waiting for !seq&1 since we don't want to wait] [Remove warning about no huge page support, mention it explictly] [Don't call do_fault() in the speculative path as __do_fault() calls vma->vm_ops->fault() which may want to release mmap_sem] [Only vm_fault pointer argument for vma_has_changed()] [Fix check against huge page, calling pmd_trans_huge()] [Use READ_ONCE() when reading VMA's fields in the speculative path] [Explicitly check for __HAVE_ARCH_PTE_SPECIAL as we can't support for processing done in vm_normal_page()] [Check that vma->anon_vma is already set when starting the speculative path] [Check for memory policy as we can't support MPOL_INTERLEAVE case due to the processing done in mpol_misplaced()] [Don't support VMA growing up or down] [Move check on vm_sequence just before calling handle_pte_fault()] [Don't build SPF services if !CONFIG_SPECULATIVE_PAGE_FAULT] [Add mem cgroup oom check] [Use READ_ONCE to access p*d entries] [Replace deprecated ACCESS_ONCE() by READ_ONCE() in vma_has_changed()] [Don't fetch pte again in handle_pte_fault() when running the speculative path] [Check PMD against concurrent collapsing operation] [Try spin lock the pte during the speculative path to avoid deadlock with other CPU's invalidating the TLB and requiring this CPU to catch the inter processor's interrupt] [Move define of FAULT_FLAG_SPECULATIVE here] [Introduce __handle_speculative_fault() and add a check against mm->mm_users in handle_speculative_fault() defined in mm.h] Signed-off-by: Laurent Dufour Change-Id: I45cbe79a96c7aa234cace421a36550e7fb0b9368 Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:24 [vinmenon@codeaurora.org: fix !CONFIG_NUMA build + checkpatch fixes] Signed-off-by: Vinayak Menon [charante@codeaurora.org: trivial merge conflicts] Signed-off-by: Charan Teja Reddy --- include/linux/hugetlb_inline.h | 2 +- include/linux/mm.h | 31 +++ include/linux/pagemap.h | 4 +- mm/internal.h | 16 +- mm/memory.c | 342 ++++++++++++++++++++++++++++++++- 5 files changed, 388 insertions(+), 7 deletions(-) diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h index 0660a03d37d9..9e25283d6fc9 100644 --- a/include/linux/hugetlb_inline.h +++ b/include/linux/hugetlb_inline.h @@ -8,7 +8,7 @@ static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma) { - return !!(vma->vm_flags & VM_HUGETLB); + return !!(READ_ONCE(vma->vm_flags) & VM_HUGETLB); } #else diff --git a/include/linux/mm.h b/include/linux/mm.h index 625509a67ff8..2cae596a0f2c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -393,6 +393,8 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */ #define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */ #define FAULT_FLAG_PREFAULT_OLD 0x400 /* Make faultaround ptes old */ +/* Speculative fault, not holding mmap_sem */ +#define FAULT_FLAG_SPECULATIVE 0x200 #define FAULT_FLAG_TRACE \ { FAULT_FLAG_WRITE, "WRITE" }, \ @@ -421,6 +423,10 @@ struct vm_fault { gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ unsigned long address; /* Faulting virtual address */ +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + unsigned int sequence; + pmd_t orig_pmd; /* value of PMD at the time of fault */ +#endif pmd_t *pmd; /* Pointer to pmd entry matching * the 'address' */ pud_t *pud; /* Pointer to pud entry matching @@ -1534,6 +1540,31 @@ int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags); + +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +extern int __handle_speculative_fault(struct mm_struct *mm, + unsigned long address, + unsigned int flags); +static inline int handle_speculative_fault(struct mm_struct *mm, + unsigned long address, + unsigned int flags) +{ + /* + * Try speculative page fault for multithreaded user space task only. + */ + if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1) + return VM_FAULT_RETRY; + return __handle_speculative_fault(mm, address, flags); +} +#else +static inline int handle_speculative_fault(struct mm_struct *mm, + unsigned long address, + unsigned int flags) +{ + return VM_FAULT_RETRY; +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 37a4d9e32cd3..4344a868ad07 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -450,8 +450,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, pgoff_t pgoff; if (unlikely(is_vm_hugetlb_page(vma))) return linear_hugepage_index(vma, address); - pgoff = (address - vma->vm_start) >> PAGE_SHIFT; - pgoff += vma->vm_pgoff; + pgoff = (address - READ_ONCE(vma->vm_start)) >> PAGE_SHIFT; + pgoff += READ_ONCE(vma->vm_pgoff); return pgoff; } diff --git a/mm/internal.h b/mm/internal.h index 10834044681e..d5dd3549559d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -40,7 +40,21 @@ vm_fault_t do_swap_page(struct vm_fault *vmf); extern struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr); extern void put_vma(struct vm_area_struct *vma); -#endif + +static inline bool vma_has_changed(struct vm_fault *vmf) +{ + int ret = RB_EMPTY_NODE(&vmf->vma->vm_rb); + unsigned int seq = READ_ONCE(vmf->vma->vm_sequence.sequence); + + /* + * Matches both the wmb in write_seqlock_{begin,end}() and + * the wmb in vma_rb_erase(). + */ + smp_rmb(); + + return ret || seq != vmf->sequence; +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); diff --git a/mm/memory.c b/mm/memory.c index 7b45b393d18e..7ce0f8d5565c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -542,7 +542,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, if (page) dump_page(page, "bad pte"); pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", - (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); + (void *)addr, READ_ONCE(vma->vm_flags), vma->anon_vma, mapping, index); pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n", vma->vm_file, vma->vm_ops ? vma->vm_ops->fault : NULL, @@ -2152,6 +2152,113 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL_GPL(apply_to_page_range); +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT +static bool pte_spinlock(struct vm_fault *vmf) +{ + bool ret = false; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + pmd_t pmdval; +#endif + + /* Check if vma is still valid */ + if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + spin_lock(vmf->ptl); + return true; + } + + local_irq_disable(); + if (vma_has_changed(vmf)) + goto out; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* + * We check if the pmd value is still the same to ensure that there + * is not a huge collapse operation in progress in our back. + */ + pmdval = READ_ONCE(*vmf->pmd); + if (!pmd_same(pmdval, vmf->orig_pmd)) + goto out; +#endif + + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + if (unlikely(!spin_trylock(vmf->ptl))) + goto out; + + if (vma_has_changed(vmf)) { + spin_unlock(vmf->ptl); + goto out; + } + + ret = true; +out: + local_irq_enable(); + return ret; +} + +static bool pte_map_lock(struct vm_fault *vmf) +{ + bool ret = false; + pte_t *pte; + spinlock_t *ptl; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + pmd_t pmdval; +#endif + + if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + return true; + } + + /* + * The first vma_has_changed() guarantees the page-tables are still + * valid, having IRQs disabled ensures they stay around, hence the + * second vma_has_changed() to make sure they are still valid once + * we've got the lock. After that a concurrent zap_pte_range() will + * block on the PTL and thus we're safe. + */ + local_irq_disable(); + if (vma_has_changed(vmf)) + goto out; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* + * We check if the pmd value is still the same to ensure that there + * is not a huge collapse operation in progress in our back. + */ + pmdval = READ_ONCE(*vmf->pmd); + if (!pmd_same(pmdval, vmf->orig_pmd)) + goto out; +#endif + + /* + * Same as pte_offset_map_lock() except that we call + * spin_trylock() in place of spin_lock() to avoid race with + * unmap path which may have the lock and wait for this CPU + * to invalidate TLB but this CPU has irq disabled. + * Since we are in a speculative patch, accept it could fail + */ + ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); + pte = pte_offset_map(vmf->pmd, vmf->address); + if (unlikely(!spin_trylock(ptl))) { + pte_unmap(pte); + goto out; + } + + if (vma_has_changed(vmf)) { + pte_unmap_unlock(pte, ptl); + goto out; + } + + vmf->pte = pte; + vmf->ptl = ptl; + ret = true; +out: + local_irq_enable(); + return ret; +} +#else static inline bool pte_spinlock(struct vm_fault *vmf) { vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); @@ -2165,6 +2272,7 @@ static inline bool pte_map_lock(struct vm_fault *vmf) vmf->address, &vmf->ptl); return true; } +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ /* * handle_pte_fault chooses page fault handler according to an entry which was @@ -3085,6 +3193,14 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) ret = check_stable_address_space(vma->vm_mm); if (ret) goto unlock; + /* + * Don't call the userfaultfd during the speculative path. + * We already checked for the VMA to not be managed through + * userfaultfd, but it may be set in our back once we have lock + * the pte. In such a case we can ignore it this time. + */ + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + goto setpte; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -3127,7 +3243,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) goto unlock_and_release; /* Deliver the page fault to userland, check inside PT lock */ - if (userfaultfd_missing(vma)) { + if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) && + userfaultfd_missing(vma)) { pte_unmap_unlock(vmf->pte, vmf->ptl); mem_cgroup_cancel_charge(page, memcg, false); put_page(page); @@ -3913,6 +4030,15 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) pte_t entry; if (unlikely(pmd_none(*vmf->pmd))) { + /* + * In the case of the speculative page fault handler we abort + * the speculative path immediately as the pmd is probably + * in the way to be converted in a huge one. We will try + * again holding the mmap_sem (which implies that the collapse + * operation is done). + */ + if (vmf->flags & FAULT_FLAG_SPECULATIVE) + return VM_FAULT_RETRY; /* * Leave __pte_alloc() until later: because vm_ops->fault may * want to allocate huge page, and if we expose page table @@ -3920,7 +4046,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) * concurrent faults and from rmap lookups. */ vmf->pte = NULL; - } else { + } else if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) { /* See comment in pte_alloc_one_map() */ if (pmd_devmap_trans_unstable(vmf->pmd)) return 0; @@ -3929,6 +4055,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) * pmd from under us anymore at this point because we hold the * mmap_sem read mode and khugepaged takes it in write mode. * So now it's safe to run pte_offset_map(). + * This is not applicable to the speculative page fault handler + * but in that case, the pte is fetched earlier in + * handle_speculative_fault(). */ vmf->pte = pte_offset_map(vmf->pmd, vmf->address); vmf->orig_pte = *vmf->pte; @@ -3951,6 +4080,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (!vmf->pte) { if (vma_is_anonymous(vmf->vma)) return do_anonymous_page(vmf); + else if (vmf->flags & FAULT_FLAG_SPECULATIVE) + return VM_FAULT_RETRY; else return do_fault(vmf); } @@ -4048,6 +4179,10 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, vmf.pmd = pmd_alloc(mm, vmf.pud, address); if (!vmf.pmd) return VM_FAULT_OOM; + +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + vmf.sequence = raw_read_seqcount(&vma->vm_sequence); +#endif if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) @@ -4081,6 +4216,207 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, return handle_pte_fault(&vmf); } +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + +#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL +/* This is required by vm_normal_page() */ +#error "Speculative page fault handler requires CONFIG_ARCH_HAS_PTE_SPECIAL" +#endif + +/* + * vm_normal_page() adds some processing which should be done while + * hodling the mmap_sem. + */ +int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, + unsigned int flags) +{ + struct vm_fault vmf = { + .address = address, + }; + pgd_t *pgd, pgdval; + p4d_t *p4d, p4dval; + pud_t pudval; + int seq, ret = VM_FAULT_RETRY; + struct vm_area_struct *vma; + + /* Clear flags that may lead to release the mmap_sem to retry */ + flags &= ~(FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_KILLABLE); + flags |= FAULT_FLAG_SPECULATIVE; + + vma = get_vma(mm, address); + if (!vma) + return ret; + + /* rmb <-> seqlock,vma_rb_erase() */ + seq = raw_read_seqcount(&vma->vm_sequence); + if (seq & 1) + goto out_put; + + /* + * Can't call vm_ops service has we don't know what they would do + * with the VMA. + * This include huge page from hugetlbfs. + */ + if (vma->vm_ops) + goto out_put; + + /* + * __anon_vma_prepare() requires the mmap_sem to be held + * because vm_next and vm_prev must be safe. This can't be guaranteed + * in the speculative path. + */ + if (unlikely(!vma->anon_vma)) + goto out_put; + + vmf.vma_flags = READ_ONCE(vma->vm_flags); + vmf.vma_page_prot = READ_ONCE(vma->vm_page_prot); + + /* Can't call userland page fault handler in the speculative path */ + if (unlikely(vmf.vma_flags & VM_UFFD_MISSING)) + goto out_put; + + if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) + /* + * This could be detected by the check address against VMA's + * boundaries but we want to trace it as not supported instead + * of changed. + */ + goto out_put; + + if (address < READ_ONCE(vma->vm_start) + || READ_ONCE(vma->vm_end) <= address) + goto out_put; + + if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, + flags & FAULT_FLAG_INSTRUCTION, + flags & FAULT_FLAG_REMOTE)) { + ret = VM_FAULT_SIGSEGV; + goto out_put; + } + + /* This is one is required to check that the VMA has write access set */ + if (flags & FAULT_FLAG_WRITE) { + if (unlikely(!(vmf.vma_flags & VM_WRITE))) { + ret = VM_FAULT_SIGSEGV; + goto out_put; + } + } else if (unlikely(!(vmf.vma_flags & (VM_READ|VM_EXEC|VM_WRITE)))) { + ret = VM_FAULT_SIGSEGV; + goto out_put; + } + +#ifdef CONFIG_NUMA + struct mempolicy *pol; + + /* + * MPOL_INTERLEAVE implies additional checks in + * mpol_misplaced() which are not compatible with the + *speculative page fault processing. + */ + pol = __get_vma_policy(vma, address); + if (!pol) + pol = get_task_policy(current); + if (pol && pol->mode == MPOL_INTERLEAVE) + goto out_put; +#endif + + /* + * Do a speculative lookup of the PTE entry. + */ + local_irq_disable(); + pgd = pgd_offset(mm, address); + pgdval = READ_ONCE(*pgd); + if (pgd_none(pgdval) || unlikely(pgd_bad(pgdval))) + goto out_walk; + + p4d = p4d_offset(pgd, address); + p4dval = READ_ONCE(*p4d); + if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval))) + goto out_walk; + + vmf.pud = pud_offset(p4d, address); + pudval = READ_ONCE(*vmf.pud); + if (pud_none(pudval) || unlikely(pud_bad(pudval))) + goto out_walk; + + /* Huge pages at PUD level are not supported. */ + if (unlikely(pud_trans_huge(pudval))) + goto out_walk; + + vmf.pmd = pmd_offset(vmf.pud, address); + vmf.orig_pmd = READ_ONCE(*vmf.pmd); + /* + * pmd_none could mean that a hugepage collapse is in progress + * in our back as collapse_huge_page() mark it before + * invalidating the pte (which is done once the IPI is catched + * by all CPU and we have interrupt disabled). + * For this reason we cannot handle THP in a speculative way since we + * can't safely indentify an in progress collapse operation done in our + * back on that PMD. + * Regarding the order of the following checks, see comment in + * pmd_devmap_trans_unstable() + */ + if (unlikely(pmd_devmap(vmf.orig_pmd) || + pmd_none(vmf.orig_pmd) || pmd_trans_huge(vmf.orig_pmd) || + is_swap_pmd(vmf.orig_pmd))) + goto out_walk; + + /* + * The above does not allocate/instantiate page-tables because doing so + * would lead to the possibility of instantiating page-tables after + * free_pgtables() -- and consequently leaking them. + * + * The result is that we take at least one !speculative fault per PMD + * in order to instantiate it. + */ + + vmf.pte = pte_offset_map(vmf.pmd, address); + vmf.orig_pte = READ_ONCE(*vmf.pte); + barrier(); /* See comment in handle_pte_fault() */ + if (pte_none(vmf.orig_pte)) { + pte_unmap(vmf.pte); + vmf.pte = NULL; + } + + vmf.vma = vma; + vmf.pgoff = linear_page_index(vma, address); + vmf.gfp_mask = __get_fault_gfp_mask(vma); + vmf.sequence = seq; + vmf.flags = flags; + + local_irq_enable(); + + /* + * We need to re-validate the VMA after checking the bounds, otherwise + * we might have a false positive on the bounds. + */ + if (read_seqcount_retry(&vma->vm_sequence, seq)) + goto out_put; + + mem_cgroup_enter_user_fault(); + ret = handle_pte_fault(&vmf); + mem_cgroup_exit_user_fault(); + + put_vma(vma); + + /* + * The task may have entered a memcg OOM situation but + * if the allocation error was handled gracefully (no + * VM_FAULT_OOM), there is no need to kill anything. + * Just clean up the OOM state peacefully. + */ + if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) + mem_cgroup_oom_synchronize(false); + return ret; + +out_walk: + local_irq_enable(); +out_put: + put_vma(vma); + return ret; +} +#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ + /* * By the time we get here, we already hold the mm semaphore * From 52e2e1f805cb0136bdd6664817204e18bed95637 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:25 +0200 Subject: [PATCH 16/23] mm: adding speculative page fault failure trace events This patch a set of new trace events to collect the speculative page fault event failures. Signed-off-by: Laurent Dufour Change-Id: I9dafba293edf40bdad4ae241d105ecdfb42579c1 Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:25 [vinmenon@codeaurora.org: trivial merge conflicts] Signed-off-by: Vinayak Menon --- include/trace/events/pagefault.h | 88 ++++++++++++++++++++++++++++++++ mm/memory.c | 65 ++++++++++++++++++----- 2 files changed, 139 insertions(+), 14 deletions(-) create mode 100644 include/trace/events/pagefault.h diff --git a/include/trace/events/pagefault.h b/include/trace/events/pagefault.h new file mode 100644 index 000000000000..a9643b3759f2 --- /dev/null +++ b/include/trace/events/pagefault.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM pagefault + +#if !defined(_TRACE_PAGEFAULT_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_PAGEFAULT_H + +#include +#include + +DECLARE_EVENT_CLASS(spf, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address), + + TP_STRUCT__entry( + __field(unsigned long, caller) + __field(unsigned long, vm_start) + __field(unsigned long, vm_end) + __field(unsigned long, address) + ), + + TP_fast_assign( + __entry->caller = caller; + __entry->vm_start = vma->vm_start; + __entry->vm_end = vma->vm_end; + __entry->address = address; + ), + + TP_printk("ip:%lx vma:%lx-%lx address:%lx", + __entry->caller, __entry->vm_start, __entry->vm_end, + __entry->address) +); + +DEFINE_EVENT(spf, spf_pte_lock, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_vma_changed, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_vma_noanon, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_vma_notsup, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_vma_access, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +DEFINE_EVENT(spf, spf_pmd_changed, + + TP_PROTO(unsigned long caller, + struct vm_area_struct *vma, unsigned long address), + + TP_ARGS(caller, vma, address) +); + +#endif /* _TRACE_PAGEFAULT_H */ + +/* This part must be outside protection */ +#include diff --git a/mm/memory.c b/mm/memory.c index 7ce0f8d5565c..02b85b22c3e6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -84,6 +84,9 @@ #include "internal.h" +#define CREATE_TRACE_POINTS +#include + #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif @@ -2168,8 +2171,10 @@ static bool pte_spinlock(struct vm_fault *vmf) } local_irq_disable(); - if (vma_has_changed(vmf)) + if (vma_has_changed(vmf)) { + trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); goto out; + } #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* @@ -2177,16 +2182,21 @@ static bool pte_spinlock(struct vm_fault *vmf) * is not a huge collapse operation in progress in our back. */ pmdval = READ_ONCE(*vmf->pmd); - if (!pmd_same(pmdval, vmf->orig_pmd)) + if (!pmd_same(pmdval, vmf->orig_pmd)) { + trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address); goto out; + } #endif vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); - if (unlikely(!spin_trylock(vmf->ptl))) + if (unlikely(!spin_trylock(vmf->ptl))) { + trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address); goto out; + } if (vma_has_changed(vmf)) { spin_unlock(vmf->ptl); + trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); goto out; } @@ -2219,8 +2229,10 @@ static bool pte_map_lock(struct vm_fault *vmf) * block on the PTL and thus we're safe. */ local_irq_disable(); - if (vma_has_changed(vmf)) + if (vma_has_changed(vmf)) { + trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); goto out; + } #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* @@ -2228,8 +2240,10 @@ static bool pte_map_lock(struct vm_fault *vmf) * is not a huge collapse operation in progress in our back. */ pmdval = READ_ONCE(*vmf->pmd); - if (!pmd_same(pmdval, vmf->orig_pmd)) + if (!pmd_same(pmdval, vmf->orig_pmd)) { + trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address); goto out; + } #endif /* @@ -2243,11 +2257,13 @@ static bool pte_map_lock(struct vm_fault *vmf) pte = pte_offset_map(vmf->pmd, vmf->address); if (unlikely(!spin_trylock(ptl))) { pte_unmap(pte); + trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address); goto out; } if (vma_has_changed(vmf)) { pte_unmap_unlock(pte, ptl); + trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address); goto out; } @@ -4249,47 +4265,60 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, /* rmb <-> seqlock,vma_rb_erase() */ seq = raw_read_seqcount(&vma->vm_sequence); - if (seq & 1) + if (seq & 1) { + trace_spf_vma_changed(_RET_IP_, vma, address); goto out_put; + } /* * Can't call vm_ops service has we don't know what they would do * with the VMA. * This include huge page from hugetlbfs. */ - if (vma->vm_ops) + if (vma->vm_ops) { + trace_spf_vma_notsup(_RET_IP_, vma, address); goto out_put; + } /* * __anon_vma_prepare() requires the mmap_sem to be held * because vm_next and vm_prev must be safe. This can't be guaranteed * in the speculative path. */ - if (unlikely(!vma->anon_vma)) + if (unlikely(!vma->anon_vma)) { + trace_spf_vma_notsup(_RET_IP_, vma, address); goto out_put; + } vmf.vma_flags = READ_ONCE(vma->vm_flags); vmf.vma_page_prot = READ_ONCE(vma->vm_page_prot); /* Can't call userland page fault handler in the speculative path */ - if (unlikely(vmf.vma_flags & VM_UFFD_MISSING)) + if (unlikely(vmf.vma_flags & VM_UFFD_MISSING)) { + trace_spf_vma_notsup(_RET_IP_, vma, address); goto out_put; + } - if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) + if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) { /* * This could be detected by the check address against VMA's * boundaries but we want to trace it as not supported instead * of changed. */ + trace_spf_vma_notsup(_RET_IP_, vma, address); goto out_put; + } if (address < READ_ONCE(vma->vm_start) - || READ_ONCE(vma->vm_end) <= address) + || READ_ONCE(vma->vm_end) <= address) { + trace_spf_vma_changed(_RET_IP_, vma, address); goto out_put; + } if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, flags & FAULT_FLAG_INSTRUCTION, flags & FAULT_FLAG_REMOTE)) { + trace_spf_vma_access(_RET_IP_, vma, address); ret = VM_FAULT_SIGSEGV; goto out_put; } @@ -4297,10 +4326,12 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, /* This is one is required to check that the VMA has write access set */ if (flags & FAULT_FLAG_WRITE) { if (unlikely(!(vmf.vma_flags & VM_WRITE))) { + trace_spf_vma_access(_RET_IP_, vma, address); ret = VM_FAULT_SIGSEGV; goto out_put; } } else if (unlikely(!(vmf.vma_flags & (VM_READ|VM_EXEC|VM_WRITE)))) { + trace_spf_vma_access(_RET_IP_, vma, address); ret = VM_FAULT_SIGSEGV; goto out_put; } @@ -4316,8 +4347,11 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, pol = __get_vma_policy(vma, address); if (!pol) pol = get_task_policy(current); - if (pol && pol->mode == MPOL_INTERLEAVE) - goto out_put; + if (!pol) + if (pol && pol->mode == MPOL_INTERLEAVE) { + trace_spf_vma_notsup(_RET_IP_, vma, address); + goto out_put; + } #endif /* @@ -4390,8 +4424,10 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, * We need to re-validate the VMA after checking the bounds, otherwise * we might have a false positive on the bounds. */ - if (read_seqcount_retry(&vma->vm_sequence, seq)) + if (read_seqcount_retry(&vma->vm_sequence, seq)) { + trace_spf_vma_changed(_RET_IP_, vma, address); goto out_put; + } mem_cgroup_enter_user_fault(); ret = handle_pte_fault(&vmf); @@ -4410,6 +4446,7 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, return ret; out_walk: + trace_spf_vma_notsup(_RET_IP_, vma, address); local_irq_enable(); out_put: put_vma(vma); From f556cd74a7ea038c67812b0d9b5d971d03da5372 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:28 +0200 Subject: [PATCH 17/23] mm: speculative page fault handler return VMA When the speculative page fault handler is returning VM_RETRY, there is a chance that VMA fetched without grabbing the mmap_sem can be reused by the legacy page fault handler. By reusing it, we avoid calling find_vma() again. To achieve, that we must ensure that the VMA structure will not be freed in our back. This is done by getting the reference on it (get_vma()) and by assuming that the caller will call the new service can_reuse_spf_vma() once it has grabbed the mmap_sem. can_reuse_spf_vma() is first checking that the VMA is still in the RB tree , and then that the VMA's boundaries matched the passed address and release the reference on the VMA so that it can be freed if needed. In the case the VMA is freed, can_reuse_spf_vma() will have returned false as the VMA is no more in the RB tree. In the architecture page fault handler, the call to the new service reuse_spf_or_find_vma() should be made in place of find_vma(), this will handle the check on the spf_vma and if needed call find_vma(). Signed-off-by: Laurent Dufour Change-Id: Ia56dcf807e8bddf6788fd696dd80372db35476f0 Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:28 [vinmenon@codeaurora.org: trivial merge conflicts] Signed-off-by: Vinayak Menon --- include/linux/mm.h | 22 +++++-- mm/memory.c | 139 +++++++++++++++++++++++++++------------------ 2 files changed, 102 insertions(+), 59 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 2cae596a0f2c..11f87cbe4427 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1544,25 +1544,37 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, #ifdef CONFIG_SPECULATIVE_PAGE_FAULT extern int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, - unsigned int flags); + unsigned int flags, + struct vm_area_struct **vma); static inline int handle_speculative_fault(struct mm_struct *mm, unsigned long address, - unsigned int flags) + unsigned int flags, + struct vm_area_struct **vma) { /* * Try speculative page fault for multithreaded user space task only. */ - if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1) + if (!(flags & FAULT_FLAG_USER) || atomic_read(&mm->mm_users) == 1) { + *vma = NULL; return VM_FAULT_RETRY; - return __handle_speculative_fault(mm, address, flags); + } + return __handle_speculative_fault(mm, address, flags, vma); } +extern bool can_reuse_spf_vma(struct vm_area_struct *vma, + unsigned long address); #else static inline int handle_speculative_fault(struct mm_struct *mm, unsigned long address, - unsigned int flags) + unsigned int flags, + struct vm_area_struct **vma) { return VM_FAULT_RETRY; } +static inline bool can_reuse_spf_vma(struct vm_area_struct *vma, + unsigned long address) +{ + return false; +} #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */ extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, diff --git a/mm/memory.c b/mm/memory.c index 02b85b22c3e6..534ed3bde700 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4238,13 +4238,22 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, /* This is required by vm_normal_page() */ #error "Speculative page fault handler requires CONFIG_ARCH_HAS_PTE_SPECIAL" #endif - /* * vm_normal_page() adds some processing which should be done while * hodling the mmap_sem. */ + +/* + * Tries to handle the page fault in a speculative way, without grabbing the + * mmap_sem. + * When VM_FAULT_RETRY is returned, the vma pointer is valid and this vma must + * be checked later when the mmap_sem has been grabbed by calling + * can_reuse_spf_vma(). + * This is needed as the returned vma is kept in memory until the call to + * can_reuse_spf_vma() is made. + */ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, - unsigned int flags) + unsigned int flags, struct vm_area_struct **vma) { struct vm_fault vmf = { .address = address, @@ -4252,22 +4261,22 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, pgd_t *pgd, pgdval; p4d_t *p4d, p4dval; pud_t pudval; - int seq, ret = VM_FAULT_RETRY; - struct vm_area_struct *vma; + int seq, ret; /* Clear flags that may lead to release the mmap_sem to retry */ flags &= ~(FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_KILLABLE); flags |= FAULT_FLAG_SPECULATIVE; - vma = get_vma(mm, address); - if (!vma) - return ret; + *vma = get_vma(mm, address); + if (!*vma) + return VM_FAULT_RETRY; + vmf.vma = *vma; /* rmb <-> seqlock,vma_rb_erase() */ - seq = raw_read_seqcount(&vma->vm_sequence); + seq = raw_read_seqcount(&vmf.vma->vm_sequence); if (seq & 1) { - trace_spf_vma_changed(_RET_IP_, vma, address); - goto out_put; + trace_spf_vma_changed(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; } /* @@ -4275,9 +4284,9 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, * with the VMA. * This include huge page from hugetlbfs. */ - if (vma->vm_ops) { - trace_spf_vma_notsup(_RET_IP_, vma, address); - goto out_put; + if (vmf.vma->vm_ops) { + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; } /* @@ -4285,18 +4294,18 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, * because vm_next and vm_prev must be safe. This can't be guaranteed * in the speculative path. */ - if (unlikely(!vma->anon_vma)) { - trace_spf_vma_notsup(_RET_IP_, vma, address); - goto out_put; + if (unlikely(!vmf.vma->anon_vma)) { + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; } - vmf.vma_flags = READ_ONCE(vma->vm_flags); - vmf.vma_page_prot = READ_ONCE(vma->vm_page_prot); + vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags); + vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot); /* Can't call userland page fault handler in the speculative path */ if (unlikely(vmf.vma_flags & VM_UFFD_MISSING)) { - trace_spf_vma_notsup(_RET_IP_, vma, address); - goto out_put; + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; } if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) { @@ -4305,36 +4314,27 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, * boundaries but we want to trace it as not supported instead * of changed. */ - trace_spf_vma_notsup(_RET_IP_, vma, address); - goto out_put; + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; } - if (address < READ_ONCE(vma->vm_start) - || READ_ONCE(vma->vm_end) <= address) { - trace_spf_vma_changed(_RET_IP_, vma, address); - goto out_put; + if (address < READ_ONCE(vmf.vma->vm_start) + || READ_ONCE(vmf.vma->vm_end) <= address) { + trace_spf_vma_changed(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; } - if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, + if (!arch_vma_access_permitted(vmf.vma, flags & FAULT_FLAG_WRITE, flags & FAULT_FLAG_INSTRUCTION, - flags & FAULT_FLAG_REMOTE)) { - trace_spf_vma_access(_RET_IP_, vma, address); - ret = VM_FAULT_SIGSEGV; - goto out_put; - } + flags & FAULT_FLAG_REMOTE)) + goto out_segv; /* This is one is required to check that the VMA has write access set */ if (flags & FAULT_FLAG_WRITE) { - if (unlikely(!(vmf.vma_flags & VM_WRITE))) { - trace_spf_vma_access(_RET_IP_, vma, address); - ret = VM_FAULT_SIGSEGV; - goto out_put; - } - } else if (unlikely(!(vmf.vma_flags & (VM_READ|VM_EXEC|VM_WRITE)))) { - trace_spf_vma_access(_RET_IP_, vma, address); - ret = VM_FAULT_SIGSEGV; - goto out_put; - } + if (unlikely(!(vmf.vma_flags & VM_WRITE))) + goto out_segv; + } else if (unlikely(!(vmf.vma_flags & (VM_READ|VM_EXEC|VM_WRITE)))) + goto out_segv; #ifdef CONFIG_NUMA struct mempolicy *pol; @@ -4344,13 +4344,13 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, * mpol_misplaced() which are not compatible with the *speculative page fault processing. */ - pol = __get_vma_policy(vma, address); + pol = __get_vma_policy(vmf.vma, address); if (!pol) pol = get_task_policy(current); if (!pol) if (pol && pol->mode == MPOL_INTERLEAVE) { - trace_spf_vma_notsup(_RET_IP_, vma, address); - goto out_put; + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; } #endif @@ -4412,9 +4412,8 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, vmf.pte = NULL; } - vmf.vma = vma; - vmf.pgoff = linear_page_index(vma, address); - vmf.gfp_mask = __get_fault_gfp_mask(vma); + vmf.pgoff = linear_page_index(vmf.vma, address); + vmf.gfp_mask = __get_fault_gfp_mask(vmf.vma); vmf.sequence = seq; vmf.flags = flags; @@ -4424,16 +4423,22 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, * We need to re-validate the VMA after checking the bounds, otherwise * we might have a false positive on the bounds. */ - if (read_seqcount_retry(&vma->vm_sequence, seq)) { - trace_spf_vma_changed(_RET_IP_, vma, address); - goto out_put; + if (read_seqcount_retry(&vmf.vma->vm_sequence, seq)) { + trace_spf_vma_changed(_RET_IP_, vmf.vma, address); + return VM_FAULT_RETRY; } mem_cgroup_enter_user_fault(); ret = handle_pte_fault(&vmf); mem_cgroup_exit_user_fault(); - put_vma(vma); + /* + * If there is no need to retry, don't return the vma to the caller. + */ + if (ret != VM_FAULT_RETRY) { + put_vma(vmf.vma); + *vma = NULL; + } /* * The task may have entered a memcg OOM situation but @@ -4446,9 +4451,35 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, return ret; out_walk: - trace_spf_vma_notsup(_RET_IP_, vma, address); + trace_spf_vma_notsup(_RET_IP_, vmf.vma, address); local_irq_enable(); -out_put: + return VM_FAULT_RETRY; + +out_segv: + trace_spf_vma_access(_RET_IP_, vmf.vma, address); + /* + * We don't return VM_FAULT_RETRY so the caller is not expected to + * retrieve the fetched VMA. + */ + put_vma(vmf.vma); + *vma = NULL; + return VM_FAULT_SIGSEGV; +} + +/* + * This is used to know if the vma fetch in the speculative page fault handler + * is still valid when trying the regular fault path while holding the + * mmap_sem. + * The call to put_vma(vma) must be made after checking the vma's fields, as + * the vma may be freed by put_vma(). In such a case it is expected that false + * is returned. + */ +bool can_reuse_spf_vma(struct vm_area_struct *vma, unsigned long address) +{ + bool ret; + + ret = !RB_EMPTY_NODE(&vma->vm_rb) && + vma->vm_start <= address && address < vma->vm_end; put_vma(vma); return ret; } From 2842d9234e61dc7ae96500caa4ad48320ba60147 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Tue, 17 Apr 2018 16:33:29 +0200 Subject: [PATCH 18/23] mm: add speculative page fault vmstats Add speculative_pgfault vmstat counter to count successful speculative page fault handling. Also fixing a minor typo in include/linux/vm_event_item.h. Change-Id: I0d3f3dc5195e1156d4b8edf83aff9d8d85904e8e Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Tue, 17 Apr 2018 16:33:29 Signed-off-by: Vinayak Menon --- include/linux/vm_event_item.h | 3 +++ mm/memory.c | 1 + mm/vmstat.c | 5 ++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index d29dd5fef8d2..e92926552bba 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -113,6 +113,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, #ifdef CONFIG_SWAP SWAP_RA, SWAP_RA_HIT, +#endif +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + SPECULATIVE_PGFAULT, #endif NR_VM_EVENT_ITEMS }; diff --git a/mm/memory.c b/mm/memory.c index 534ed3bde700..f16ff231fc13 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4436,6 +4436,7 @@ int __handle_speculative_fault(struct mm_struct *mm, unsigned long address, * If there is no need to retry, don't return the vma to the caller. */ if (ret != VM_FAULT_RETRY) { + count_vm_event(SPECULATIVE_PGFAULT); put_vma(vmf.vma); *vma = NULL; } diff --git a/mm/vmstat.c b/mm/vmstat.c index ee82ac8523bb..149e7d2dc583 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1297,7 +1297,10 @@ const char * const vmstat_text[] = { "swap_ra", "swap_ra_hit", #endif -#endif /* CONFIG_VM_EVENTS_COUNTERS */ +#ifdef CONFIG_SPECULATIVE_PAGE_FAULT + "speculative_pgfault" +#endif +#endif /* CONFIG_VM_EVENT_COUNTERS */ }; #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ From bcca0756b31719a2d969642d65e12136f490c5f2 Mon Sep 17 00:00:00 2001 From: Mahendran Ganesh Date: Fri, 4 May 2018 14:57:48 +0800 Subject: [PATCH 19/23] arm64/mm: define ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Set ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT for arm64. This enables Speculative Page Fault handler. Change-Id: Ic55bd1993b3af78ddac25867c5f422861079bb27 Signed-off-by: Ganesh Mahendran Patch-mainline: linux-mm @ Wed, 2 May 2018 15:54:31 Signed-off-by: Vinayak Menon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 34ea2002c4b8..c7ad4e0df464 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -189,6 +189,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT help ARM 64-bit (AArch64) Linux support. From ad3023acf917f89266afa98f0576fa5e3e187f9f Mon Sep 17 00:00:00 2001 From: Mahendran Ganesh Date: Fri, 4 May 2018 14:57:49 +0800 Subject: [PATCH 20/23] arm64/mm: add speculative page fault This patch enables the speculative page fault on the arm64 architecture. I completed spf porting in 4.9. From the test result, we can see app launching time improved by about 10% in average. For the apps which have more than 50 threads, 15% or even more improvement can be got. Signed-off-by: Ganesh Mahendran Change-Id: Ib7c8b2e354800b5023e6c6400448a6d40aaf89c8 Patch-mainline: linux-mm @ Wed, 2 May 2018 15:54:32 [vinmenon@codeaurora.org: remove the speculative fault perf counter] Signed-off-by: Vinayak Menon [charante@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Charan Teja Reddy --- arch/arm64/mm/fault.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index d26e6cd28953..cc221057a11c 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -406,10 +406,9 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 -static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, +static int __do_page_fault(struct vm_area_struct *vma, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags) { - struct vm_area_struct *vma = find_vma(mm, addr); if (unlikely(!vma)) return VM_FAULT_BADMAP; @@ -456,6 +455,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, vm_fault_t fault, major = 0; unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + struct vm_area_struct *vma = NULL; if (kprobe_page_fault(regs, esr)) return 0; @@ -495,6 +495,14 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + /* + * let's try a speculative page fault without grabbing the + * mmap_sem. + */ + fault = handle_speculative_fault(mm, addr, mm_flags, &vma); + if (fault != VM_FAULT_RETRY) + goto done; + /* * As per x86, we may deadlock here. However, since the kernel only * validly references user space from well defined areas of the code, @@ -519,7 +527,10 @@ retry: #endif } - fault = __do_page_fault(mm, addr, mm_flags, vm_flags); + if (!vma || !can_reuse_spf_vma(vma, addr)) + vma = find_vma(mm, addr); + + fault = __do_page_fault(vma, addr, mm_flags, vm_flags); major |= fault & VM_FAULT_MAJOR; if (fault & VM_FAULT_RETRY) { @@ -542,11 +553,20 @@ retry: if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; mm_flags |= FAULT_FLAG_TRIED; + + /* + * Do not try to reuse this vma and fetch it + * again since we will release the mmap_sem. + */ + vma = NULL; + goto retry; } } up_read(&mm->mmap_sem); +done: + /* * Handle the "normal" (no error) case first. */ From a9e3a1ab5dee4ecb0ec59426e10ebfcd48a54990 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Fri, 18 Jan 2019 16:19:08 +0100 Subject: [PATCH 21/23] mm: protect against PTE changes done by dup_mmap() Vinayak Menon and Ganesh Mahendran reported that the following scenario may lead to thread being blocked due to data corruption: CPU 1 CPU 2 CPU 3 Process 1, Process 1, Process 1, Thread A Thread B Thread C while (1) { while (1) { while(1) { pthread_mutex_lock(l) pthread_mutex_lock(l) fork pthread_mutex_unlock(l) pthread_mutex_unlock(l) } } } In the details this happens because : CPU 1 CPU 2 CPU 3 fork() copy_pte_range() set PTE rdonly got to next VMA... . PTE is seen rdonly PTE still writable . thread is writing to page . -> page fault . copy the page Thread writes to page . . -> no page fault . update the PTE . flush TLB for that PTE flush TLB PTE are now rdonly So the write done by the CPU 3 is interfering with the page copy operation done by CPU 2, leading to the data corruption. To avoid this we mark all the VMA involved in the COW mechanism as changing by calling vm_write_begin(). This ensures that the speculative page fault handler will not try to handle a fault on these pages. The marker is set until the TLB is flushed, ensuring that all the CPUs will now see the PTE as not writable. Once the TLB is flush, the marker is removed by calling vm_write_end(). The variable last is used to keep tracked of the latest VMA marked to handle the error path where part of the VMA may have been marked. Change-Id: I3fe07109e27d8f77c9b435053567fe5c287703aa Reported-by: Ganesh Mahendran Reported-by: Vinayak Menon Signed-off-by: Laurent Dufour Link: https://www.spinics.net/lists/linux-mm/msg171207.html Patch-mainline: linux-mm@ Fri, 18 Jan 2019 17:24:16 Signed-off-by: Charan Teja Reddy Signed-off-by: Vinayak Menon --- kernel/fork.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index 611df503e361..737fb0f67e21 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -483,7 +483,7 @@ EXPORT_SYMBOL(free_task); static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { - struct vm_area_struct *mpnt, *tmp, *prev, **pprev; + struct vm_area_struct *mpnt, *tmp, *prev, **pprev, *last = NULL; struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge; @@ -602,8 +602,18 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, rb_parent = &tmp->vm_rb; mm->map_count++; - if (!(tmp->vm_flags & VM_WIPEONFORK)) + if (!(tmp->vm_flags & VM_WIPEONFORK)) { + if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT)) { + /* + * Mark this VMA as changing to prevent the + * speculative page fault hanlder to process + * it until the TLB are flushed below. + */ + last = mpnt; + vm_write_begin(mpnt); + } retval = copy_page_range(mm, oldmm, mpnt); + } if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); @@ -616,6 +626,22 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); + + if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT)) { + /* + * Since the TLB has been flush, we can safely unmark the + * copied VMAs and allows the speculative page fault handler to + * process them again. + * Walk back the VMA list from the last marked VMA. + */ + for (; last; last = last->vm_prev) { + if (last->vm_flags & VM_DONTCOPY) + continue; + if (!(last->vm_flags & VM_WIPEONFORK)) + vm_write_end(last); + } + } + up_write(&oldmm->mmap_sem); dup_userfaultfd_complete(&uf); fail_uprobe_end: From c41742ec7ca571b523137e2811cefc0307d1539d Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Mon, 5 Nov 2018 18:43:01 +0100 Subject: [PATCH 22/23] mm: don't do swap readahead during speculative page fault Vinayak Menon faced a panic because one thread was page faulting a page in swap, while another one was mprotecting a part of the VMA leading to a VMA split. This raise a panic in swap_vma_readahead() because the VMA's boundaries were not more matching the faulting address. To avoid this, if the page is not found in the swap, the speculative page fault is aborted to retry a regular page fault. Change-Id: Ia9d99fb5fde7bd89f38966838d115b6c8c15c9db Signed-off-by: Laurent Dufour Patch-mainline: linux-mm @ Mon, 5 Nov 2018 18:43:01 +0100 [vinmenon@codeaurora.org: a minor 80+ line fix] Signed-off-by: Vinayak Menon --- mm/memory.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/mm/memory.c b/mm/memory.c index f16ff231fc13..7a5ebc53dc8d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2995,6 +2995,17 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) lru_cache_add_anon(page); swap_readpage(page, true); } + } else if (vmf->flags & FAULT_FLAG_SPECULATIVE) { + /* + * Don't try readahead during a speculative page fault + * as the VMA's boundaries may change in our back. + * If the page is not in the swap cache and synchronous + * read is disabled, fall back to the regular page fault + * mechanism. + */ + delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + ret = VM_FAULT_RETRY; + goto out; } else { page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, vmf); From 8fbac439e0b7a5b875c8e914a5649b4fc9bd7415 Mon Sep 17 00:00:00 2001 From: Patrick Daly Date: Fri, 18 Oct 2019 19:55:49 -0700 Subject: [PATCH 23/23] mm: Fix sleeping while atomic during speculative page fault A speculative page fault may race with a call to free_pagetables. If free_pagetables is called first, *(vmf->pmd) may be empty. __might_sleep() __alloc_pages_nodemask() pte_alloc_one(inline) __pte_alloc() pte_alloc_one_map(inline) alloc_set_pte() filemap_map_pages() do_fault_around(inline) do_read_fault(inline) do_fault(inline) handle_pte_fault() mem_cgroup_exit_user_fault(inline) __handle_speculative_fault() do_page_fault() As filemap_map_pages() holds an rcu_lock(), this triggers a sleeping-while-atomic BUG(). As free_pagetables has already been called, it is also a memory leak. Fix this by skipping to pte_map_lock() to allow spf to detect that the vma has changed, and a normal page fault should be taken instead. Change-Id: I121ca4be99c908656db3a1dc88cfb3b64f01e2fb Signed-off-by: Patrick Daly Signed-off-by: Vinayak Menon --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 7a5ebc53dc8d..b68c3e2356d6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3370,7 +3370,7 @@ static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - if (!pmd_none(*vmf->pmd)) + if (!pmd_none(*vmf->pmd) || (vmf->flags & FAULT_FLAG_SPECULATIVE)) goto map_pte; if (vmf->prealloc_pte) { vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);