716777db72
This patch adds a pass-through case when ioremapping P4 addresses. Addresses passed to ioremap() should be physical addresses, so the best option is usually to convert the virtual address to a physical address before calling ioremap. This will give you a virtual address in P2 which matches the physical address and this works well for most internal hardware blocks on the SuperH architecture. However, some hardware blocks must be accessed through P4. Converting the P4 address to a physical and then back to a P2 does not work. One example of this is the sh7722 TMU block, it must be accessed through P4. Without this patch P4 addresses will be mapped using PTEs which requires the page allocator to be up and running. Signed-off-by: Magnus Damm <damm@igel.co.jp> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
65 lines
1.9 KiB
C
65 lines
1.9 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 1999 by Kaz Kojima
|
|
*
|
|
* Defitions for the address spaces of the SH CPUs.
|
|
*/
|
|
#ifndef __ASM_SH_ADDRSPACE_H
|
|
#define __ASM_SH_ADDRSPACE_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <cpu/addrspace.h>
|
|
|
|
/* If this CPU supports segmentation, hook up the helpers */
|
|
#ifdef P1SEG
|
|
|
|
/*
|
|
[ P0/U0 (virtual) ] 0x00000000 <------ User space
|
|
[ P1 (fixed) cached ] 0x80000000 <------ Kernel space
|
|
[ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access
|
|
[ P3 (virtual) cached] 0xC0000000 <------ vmalloced area
|
|
[ P4 control ] 0xE0000000
|
|
*/
|
|
|
|
/* Returns the privileged segment base of a given address */
|
|
#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
|
|
|
|
/* Returns the physical address of a PnSEG (n=1,2) address */
|
|
#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
|
|
|
|
#ifdef CONFIG_29BIT
|
|
/*
|
|
* Map an address to a certain privileged segment
|
|
*/
|
|
#define P1SEGADDR(a) \
|
|
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P1SEG))
|
|
#define P2SEGADDR(a) \
|
|
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P2SEG))
|
|
#define P3SEGADDR(a) \
|
|
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
|
|
#define P4SEGADDR(a) \
|
|
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
|
|
#endif /* 29BIT */
|
|
#endif /* P1SEG */
|
|
|
|
/* Check if an address can be reached in 29 bits */
|
|
#define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000)
|
|
|
|
#ifdef CONFIG_SH_STORE_QUEUES
|
|
/*
|
|
* This is a special case for the SH-4 store queues, as pages for this
|
|
* space still need to be faulted in before it's possible to flush the
|
|
* store queue cache for writeout to the remapped region.
|
|
*/
|
|
#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
|
|
#else
|
|
#define P3_ADDR_MAX P4SEG
|
|
#endif
|
|
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_SH_ADDRSPACE_H */
|