26ff6c11ef
Cleanup of page table allocators, using generic folded PMD and PUD helpers. TLB flushing operations are moved to a more sensible spot. The page fault handler is also optimized slightly, we no longer waste cycles on IRQ disabling for flushing of the page from the ITLB, since we're already under CLI protection by the initial exception handler. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
82 lines
2.9 KiB
C
82 lines
2.9 KiB
C
/*
|
|
* include/asm-sh/cpu-sh3/cacheflush.h
|
|
*
|
|
* Copyright (C) 1999 Niibe Yutaka
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
#ifndef __ASM_CPU_SH3_CACHEFLUSH_H
|
|
#define __ASM_CPU_SH3_CACHEFLUSH_H
|
|
|
|
/*
|
|
* Cache flushing:
|
|
*
|
|
* - flush_cache_all() flushes entire cache
|
|
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
|
|
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
|
|
* - flush_cache_range(vma, start, end) flushes a range of pages
|
|
*
|
|
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
|
|
* - flush_icache_range(start, end) flushes(invalidates) a range for icache
|
|
* - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
|
|
*
|
|
* Caches are indexed (effectively) by physical address on SH-3, so
|
|
* we don't need them.
|
|
*/
|
|
|
|
#if defined(CONFIG_SH7705_CACHE_32KB)
|
|
|
|
/* SH7705 is an SH3 processor with 32KB cache. This has alias issues like the
|
|
* SH4. Unlike the SH4 this is a unified cache so we need to do some work
|
|
* in mmap when 'exec'ing a new binary
|
|
*/
|
|
/* 32KB cache, 4kb PAGE sizes need to check bit 12 */
|
|
#define CACHE_ALIAS 0x00001000
|
|
|
|
extern void flush_cache_all(void);
|
|
extern void flush_cache_mm(struct mm_struct *mm);
|
|
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end);
|
|
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
|
|
extern void flush_dcache_page(struct page *pg);
|
|
extern void flush_icache_range(unsigned long start, unsigned long end);
|
|
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
|
|
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
|
|
/* SH3 has unified cache so no special action needed here */
|
|
#define flush_cache_sigtramp(vaddr) do { } while (0)
|
|
#define flush_page_to_ram(page) do { } while (0)
|
|
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
|
|
|
#define p3_cache_init() do { } while (0)
|
|
|
|
#define PG_mapped PG_arch_1
|
|
|
|
/* We provide our own get_unmapped_area to avoid cache alias issue */
|
|
#define HAVE_ARCH_UNMAPPED_AREA
|
|
|
|
#else
|
|
|
|
#define flush_cache_all() do { } while (0)
|
|
#define flush_cache_mm(mm) do { } while (0)
|
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
|
#define flush_dcache_page(page) do { } while (0)
|
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
#define flush_icache_range(start, end) do { } while (0)
|
|
#define flush_icache_page(vma,pg) do { } while (0)
|
|
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
|
#define flush_cache_sigtramp(vaddr) do { } while (0)
|
|
|
|
#define p3_cache_init() do { } while (0)
|
|
|
|
#endif
|
|
|
|
#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
|
|
|