android_kernel_xiaomi_sm8350/include/asm-sh/cpu-sh4/cacheflush.h
Paul Mundt f3c2575818 sh: Calculate shm alignment at runtime.
Set the SHM alignment at runtime, based off of probed cache desc.
Optimize get_unmapped_area() to only colour align shared mappings.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2006-09-27 18:36:17 +09:00

53 lines
1.7 KiB
C

/*
* include/asm-sh/cpu-sh4/cacheflush.h
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_CPU_SH4_CACHEFLUSH_H
#define __ASM_CPU_SH4_CACHEFLUSH_H
/*
* Caches are broken on SH-4 (unless we use write-through
* caching; in which case they're only semi-broken),
* so we need them.
*/
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
void flush_dcache_page(struct page *pg);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
void flush_icache_range(unsigned long start, unsigned long end);
void flush_cache_sigtramp(unsigned long addr);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
#define flush_icache_page(vma,pg) do { } while (0)
/* Initialization of P3 area for copy_user_page */
void p3_cache_init(void);
#define PG_mapped PG_arch_1
#ifdef CONFIG_MMU
extern int remap_area_pages(unsigned long addr, unsigned long phys_addr,
unsigned long size, unsigned long flags);
#else /* CONFIG_MMU */
static inline int remap_area_pages(unsigned long addr, unsigned long phys_addr,
unsigned long size, unsigned long flags)
{
return 0;
}
#endif /* CONFIG_MMU */
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */