android_kernel_xiaomi_sm8350/include/asm-ia64/page.h

232 lines
6.4 KiB
C
Raw Normal View History

#ifndef _ASM_IA64_PAGE_H
#define _ASM_IA64_PAGE_H
/*
* Pagetable related stuff.
*
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
# ifdef __KERNEL__
#include <asm/intrinsics.h>
#include <asm/types.h>
/*
* The top three bits of an IA64 address are its Region Number.
* Different regions are assigned to different purposes.
*/
#define RGN_SHIFT (61)
#define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
#define RGN_BITS (RGN_BASE(-1))
#define RGN_KERNEL 7 /* Identity mapped region */
#define RGN_UNCACHED 6 /* Identity mapped I/O region */
#define RGN_GATE 5 /* Gate page, Kernel text, etc */
#define RGN_HPAGE 4 /* For Huge TLB pages */
/*
* PAGE_SHIFT determines the actual kernel page size.
*/
#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
# define PAGE_SHIFT 12
#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
# define PAGE_SHIFT 13
#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
# define PAGE_SHIFT 14
#elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
# define PAGE_SHIFT 16
#else
# error Unsupported page size!
#endif
#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
#ifdef CONFIG_HUGETLB_PAGE
# define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
# define HPAGE_SHIFT hpage_shift
# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
# define HPAGE_MASK (~(HPAGE_SIZE - 1))
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
[PATCH] hugepage: is_aligned_hugepage_range() cleanup Quite a long time back, prepare_hugepage_range() replaced is_aligned_hugepage_range() as the callback from mm/mmap.c to arch code to verify if an address range is suitable for a hugepage mapping. is_aligned_hugepage_range() stuck around, but only to implement prepare_hugepage_range() on archs which didn't implement their own. Most archs (everything except ia64 and powerpc) used the same implementation of is_aligned_hugepage_range(). On powerpc, which implements its own prepare_hugepage_range(), the custom version was never used. In addition, "is_aligned_hugepage_range()" was a bad name, because it suggests it returns true iff the given range is a good hugepage range, whereas in fact it returns 0-or-error (so the sense is reversed). This patch cleans up by abolishing is_aligned_hugepage_range(). Instead prepare_hugepage_range() is defined directly. Most archs use the default version, which simply checks the given region is aligned to the size of a hugepage. ia64 and powerpc define custom versions. The ia64 one simply checks that the range is in the correct address space region in addition to being suitably aligned. The powerpc version (just as previously) checks for suitable addresses, and if necessary performs low-level MMU frobbing to set up new areas for use by hugepages. No libhugetlbfs testsuite regressions on ppc64 (POWER5 LPAR). Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Zhang Yanmin <yanmin.zhang@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 03:09:01 -05:00
# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
[PATCH] hugepage: Fix hugepage logic in free_pgtables() free_pgtables() has special logic to call hugetlb_free_pgd_range() instead of the normal free_pgd_range() on hugepage VMAs. However, the test it uses to do so is incorrect: it calls is_hugepage_only_range on a hugepage sized range at the start of the vma. is_hugepage_only_range() will return true if the given range has any intersection with a hugepage address region, and in this case the given region need not be hugepage aligned. So, for example, this test can return true if called on, say, a 4k VMA immediately preceding a (nicely aligned) hugepage VMA. At present we get away with this because the powerpc version of hugetlb_free_pgd_range() is just a call to free_pgd_range(). On ia64 (the only other arch with a non-trivial is_hugepage_only_range()) we get away with it for a different reason; the hugepage area is not contiguous with the rest of the user address space, and VMAs are not permitted in between, so the test can't return a false positive there. Nonetheless this should be fixed. We do that in the patch below by replacing the is_hugepage_only_range() test with an explicit test of the VMA using is_vm_hugetlb_page(). This in turn changes behaviour for platforms where is_hugepage_only_range() returns false always (everything except powerpc and ia64). We address this by ensuring that hugetlb_free_pgd_range() is defined to be identical to free_pgd_range() (instead of a no-op) on everything except ia64. Even so, it will prevent some otherwise possible coalescing of calls down to free_pgd_range(). Since this only happens for hugepage VMAs, removing this small optimization seems unlikely to cause any trouble. This patch causes no regressions on the libhugetlbfs testsuite - ppc64 POWER5 (8-way), ppc64 G5 (2-way) and i386 Pentium M (UP). Signed-off-by: David Gibson <dwg@au1.ibm.com> Cc: William Lee Irwin III <wli@holomorphy.com> Acked-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 03:08:57 -05:00
# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef __ASSEMBLY__
# define __pa(x) ((x) - PAGE_OFFSET)
# define __va(x) ((x) + PAGE_OFFSET)
#else /* !__ASSEMBLY */
# define STRICT_MM_TYPECHECKS
extern void clear_page (void *page);
extern void copy_page (void *to, void *from);
/*
* clear_user_page() and copy_user_page() can't be inline functions because
* flush_dcache_page() can't be defined until later...
*/
#define clear_user_page(addr, vaddr, page) \
do { \
clear_page(addr); \
flush_dcache_page(page); \
} while (0)
#define copy_user_page(to, from, vaddr, page) \
do { \
copy_page((to), (from)); \
flush_dcache_page(page); \
} while (0)
#define alloc_zeroed_user_highpage(vma, vaddr) \
({ \
struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr); \
if (page) \
flush_dcache_page(page); \
page; \
})
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#ifdef CONFIG_VIRTUAL_MEM_MAP
extern int ia64_pfn_valid (unsigned long pfn);
#elif defined(CONFIG_FLATMEM)
# define ia64_pfn_valid(pfn) 1
#endif
#ifdef CONFIG_VIRTUAL_MEM_MAP
extern struct page *vmem_map;
#ifdef CONFIG_DISCONTIGMEM
# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
# define pfn_to_page(pfn) (vmem_map + (pfn))
#endif
#endif
#if defined(CONFIG_FLATMEM) || defined(CONFIG_SPARSEMEM)
/* FLATMEM always configures mem_map (mem_map = vmem_map if necessary) */
#include <asm-generic/memory_model.h>
#endif
#ifdef CONFIG_FLATMEM
# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
#elif defined(CONFIG_DISCONTIGMEM)
extern unsigned long min_low_pfn;
extern unsigned long max_low_pfn;
# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
#endif
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
typedef union ia64_va {
struct {
unsigned long off : 61; /* intra-region offset */
unsigned long reg : 3; /* region number */
} f;
unsigned long l;
void *p;
} ia64_va;
/*
* Note: These macros depend on the fact that PAGE_OFFSET has all
* region bits set to 1 and all other bits set to zero. They are
* expressed in this way to ensure they result in a single "dep"
* instruction.
*/
#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
#ifdef CONFIG_HUGETLB_PAGE
# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
| (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
# define is_hugepage_only_range(mm, addr, len) \
(REGION_NUMBER(addr) == RGN_HPAGE || \
REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
extern unsigned int hpage_shift;
#endif
static __inline__ int
get_order (unsigned long size)
{
long double d = size - 1;
long order;
order = ia64_getf_exp(d);
order = order - PAGE_SHIFT - 0xffff + 1;
if (order < 0)
order = 0;
return order;
}
#endif /* !__ASSEMBLY__ */
#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
#ifdef CONFIG_PGTABLE_4
typedef struct { unsigned long pud; } pud_t;
#endif
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
# define pte_val(x) ((x).pte)
# define pmd_val(x) ((x).pmd)
#ifdef CONFIG_PGTABLE_4
# define pud_val(x) ((x).pud)
#endif
# define pgd_val(x) ((x).pgd)
# define pgprot_val(x) ((x).pgprot)
# define __pte(x) ((pte_t) { (x) } )
# define __pgprot(x) ((pgprot_t) { (x) } )
#else /* !STRICT_MM_TYPECHECKS */
/*
* .. while these make it easier on the compiler
*/
# ifndef __ASSEMBLY__
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;
# endif
# define pte_val(x) (x)
# define pmd_val(x) (x)
# define pgd_val(x) (x)
# define pgprot_val(x) (x)
# define __pte(x) (x)
# define __pgd(x) (x)
# define __pgprot(x) (x)
#endif /* !STRICT_MM_TYPECHECKS */
#define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
(((current->personality & READ_IMPLIES_EXEC) != 0) \
? VM_EXEC : 0))
# endif /* __KERNEL__ */
#endif /* _ASM_IA64_PAGE_H */