android_kernel_xiaomi_sm8350/include/asm-arm/tlb.h
Benjamin Herrenschmidt 5e5419734c add mm argument to pte/pmd/pud/pgd_free
(with Martin Schwidefsky <schwidefsky@de.ibm.com>)

The pgd/pud/pmd/pte page table allocation functions get a mm_struct pointer as
first argument.  The free functions do not get the mm_struct argument.  This
is 1) asymmetrical and 2) to do mm related page table allocations the mm
argument is needed on the free function as well.

[kamalesh@linux.vnet.ibm.com: i386 fix]
[akpm@linux-foundation.org: coding-syle fixes]
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-05 09:44:18 -08:00

95 lines
2.3 KiB
C

/*
* linux/include/asm-arm/tlb.h
*
* Copyright (C) 2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Experimentation shows that on a StrongARM, it appears to be faster
* to use the "invalidate whole tlb" rather than "invalidate single
* tlb" for this.
*
* This appears true for both the process fork+exit case, as well as
* the munmap-large-area case.
*/
#ifndef __ASMARM_TLB_H
#define __ASMARM_TLB_H
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#ifndef CONFIG_MMU
#include <linux/pagemap.h>
#include <asm-generic/tlb.h>
#else /* !CONFIG_MMU */
#include <asm/pgalloc.h>
/*
* TLB handling. This allows us to remove pages from the page
* tables, and efficiently handle the TLB issues.
*/
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
return tlb;
}
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (tlb->fullmm)
flush_tlb_mm(tlb->mm);
/* keep the page table cache within bounds */
check_pgt_cache();
put_cpu_var(mmu_gathers);
}
#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm)
flush_cache_range(vma, vma->vm_start, vma->vm_end);
}
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm)
flush_tlb_range(vma, vma->vm_start, vma->vm_end);
}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
#define tlb_migrate_finish(mm) do { } while (0)
#endif /* CONFIG_MMU */
#endif