android_kernel_xiaomi_sm8350/arch/arm/mm/proc-arm1020e.S
Russell King 264edb35ce [ARM] Remove yucky ifdefs to print "id(wb)BRR" suffix on CPU name
The "id(wb)BRR" suffix reports which CPU debugging options were (or
were not) selected at kernel build time.  Rather than have every
proc-*.S file implement this, report the control register value,
from which this information can be deduced.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2006-06-29 15:03:09 +01:00

505 lines
12 KiB
ArmAsm

/*
* linux/arch/arm/mm/proc-arm1020e.S: MMU functions for ARM1020
*
* Copyright (C) 2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
* hacked for non-paged-MM by Hyok S. Choi, 2003.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* These are the low level assembler for performing cache and TLB
* functions on the arm1020e.
*
* CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/config.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
/*
* This is the maximum size of an area which will be invalidated
* using the single invalidate entry instructions. Anything larger
* than this, and we go for the whole cache.
*
* This value should be chosen such that we choose the cheapest
* alternative.
*/
#define MAX_AREA_SIZE 32768
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 16
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/
#define CACHE_DLIMIT 32768
.text
/*
* cpu_arm1020e_proc_init()
*/
ENTRY(cpu_arm1020e_proc_init)
mov pc, lr
/*
* cpu_arm1020e_proc_fin()
*/
ENTRY(cpu_arm1020e_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl arm1020e_flush_kern_cache_all
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_arm1020e_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_arm1020e_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_arm1020e_do_idle()
*/
.align 5
ENTRY(cpu_arm1020e_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr
/* ================================= CACHE ================================ */
.align 5
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(arm1020e_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(arm1020e_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 15 to 0
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_user_cache_range(start, end, flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive)
* - end - end address (exclusive)
* - flags - vm_flags for this space
*/
ENTRY(arm1020e_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
#ifndef CONFIG_CPU_DCACHE_DISABLE
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
tst r2, #VM_EXEC
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
#endif
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020e_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020e_coherent_user_range)
mov ip, #0
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - page - page aligned address
*/
ENTRY(arm1020e_flush_kern_dcache_page)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm1020e_dma_inv_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm1020e_dma_clean_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm1020e_dma_flush_range)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_kern_cache_all
.long arm1020e_flush_user_cache_all
.long arm1020e_flush_user_cache_range
.long arm1020e_coherent_kern_range
.long arm1020e_coherent_user_range
.long arm1020e_flush_kern_dcache_page
.long arm1020e_dma_inv_range
.long arm1020e_dma_clean_range
.long arm1020e_dma_flush_range
.align 5
ENTRY(cpu_arm1020e_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_DISABLE
mov ip, #0
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_arm1020e_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_arm1020e_switch_mm)
#ifdef CONFIG_MMU
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r3, c7, c10, 4
mov r1, #0xF @ 16 segments
1: mov r3, #0x3F @ 64 entries
2: mov ip, r3, LSL #26 @ shift up entry
orr ip, ip, r1, LSL #5 @ shift in/up index
mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry
mov ip, #0
subs r3, r3, #1
cmp r3, #0
bge 2b @ entries 3F to 0
subs r1, r1, #1
cmp r1, #0
bge 1b @ segments 15 to 0
#endif
mov r1, #0
#ifndef CONFIG_CPU_ICACHE_DISABLE
mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
#endif
mcr p15, 0, r1, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
#endif
mov pc, lr
/*
* cpu_arm1020e_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_arm1020e_set_pte)
#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #PTE_SMALL_AP_MASK
bic r2, r2, #PTE_TYPE_MASK
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
eor r3, r1, #0x0a @ C & small page?
tst r3, #0x0b
biceq r2, r2, #4
#endif
str r2, [r0] @ hardware version
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_DISABLE
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
#endif /* CONFIG_MMU */
mov pc, lr
__INIT
.type __arm1020e_setup, #function
__arm1020e_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
#endif
mrc p15, 0, r0, c1, c0 @ get control register v4
ldr r5, arm1020e_cr1_clear
bic r0, r0, r5
ldr r5, arm1020e_cr1_set
orr r0, r0, r5
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
orr r0, r0, #0x4000 @ .R.. .... .... ....
#endif
mov pc, lr
.size __arm1020e_setup, . - __arm1020e_setup
/*
* R
* .RVI ZFRS BLDP WCAM
* .011 1001 ..11 0101
*/
.type arm1020e_cr1_clear, #object
.type arm1020e_cr1_set, #object
arm1020e_cr1_clear:
.word 0x5f3f
arm1020e_cr1_set:
.word 0x3935
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
.type arm1020e_processor_functions, #object
arm1020e_processor_functions:
.word v4t_early_abort
.word cpu_arm1020e_proc_init
.word cpu_arm1020e_proc_fin
.word cpu_arm1020e_reset
.word cpu_arm1020e_do_idle
.word cpu_arm1020e_dcache_clean_area
.word cpu_arm1020e_switch_mm
.word cpu_arm1020e_set_pte
.size arm1020e_processor_functions, . - arm1020e_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv5te"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm1020e_name, #object
cpu_arm1020e_name:
.asciz "ARM1020E"
.size cpu_arm1020e_name, . - cpu_arm1020e_name
.align
.section ".proc.info.init", #alloc, #execinstr
.type __arm1020e_proc_info,#object
__arm1020e_proc_info:
.long 0x4105a200 @ ARM 1020TE (Architecture v5TE)
.long 0xff0ffff0
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
b __arm1020e_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
.long cpu_arm1020e_name
.long arm1020e_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm1020e_cache_fns
.size __arm1020e_proc_info, . - __arm1020e_proc_info