ANDROID: mm: remove sequence counting when mmap_lock is not exclusively owned

In a number of cases vm_write_{begin|end} is called while mmap_lock is
not owned exclusively. This is unnecessary and can affect correctness of
the sequence counting protecting speculative page fault handlers. Remove
extra calls.

Bug: 257443051
Change-Id: I1278638a0794448e22fbdab5601212b3b2eaebdc
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Git-commit: bfdcf47ca34dc3b7b63ca16b0a1856e57c57ee47
Git-repo: https://android.googlesource.com/kernel/common/
[quic_c_spathi@quicinc.com: resolve trivial merge conflicts]
Signed-off-by: Srinivasarao Pathipati <quic_c_spathi@quicinc.com>
This commit is contained in:
Suren Baghdasaryan 2022-11-15 10:38:43 -08:00 committed by Gerrit - the friendly Code Review server
parent 365a5b7af5
commit ad939deb18
3 changed files with 0 additions and 10 deletions

View File

@ -500,11 +500,9 @@ static void madvise_cold_page_range(struct mmu_gather *tlb,
.target_task = task,
};
vm_write_begin(vma);
tlb_start_vma(tlb, vma);
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
tlb_end_vma(tlb, vma);
vm_write_end(vma);
}
static long madvise_cold(struct task_struct *task,
@ -538,11 +536,9 @@ static void madvise_pageout_page_range(struct mmu_gather *tlb,
.target_task = task,
};
vm_write_begin(vma);
tlb_start_vma(tlb, vma);
walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
tlb_end_vma(tlb, vma);
vm_write_end(vma);
}
static inline bool can_do_pageout(struct vm_area_struct *vma)
@ -745,12 +741,10 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(&range);
vm_write_begin(vma);
tlb_start_vma(&tlb, vma);
walk_page_range(vma->vm_mm, range.start, range.end,
&madvise_free_walk_ops, &tlb);
tlb_end_vma(&tlb, vma);
vm_write_end(vma);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb, range.start, range.end);

View File

@ -1292,7 +1292,6 @@ void unmap_page_range(struct mmu_gather *tlb,
unsigned long next;
BUG_ON(addr >= end);
vm_write_begin(vma);
tlb_start_vma(tlb, vma);
pgd = pgd_offset(vma->vm_mm, addr);
do {
@ -1302,7 +1301,6 @@ void unmap_page_range(struct mmu_gather *tlb,
next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
} while (pgd++, addr = next, addr != end);
tlb_end_vma(tlb, vma);
vm_write_end(vma);
}

View File

@ -599,11 +599,9 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
{
int nr_updated;
vm_write_begin(vma);
nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
if (nr_updated)
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
vm_write_end(vma);
return nr_updated;
}