2a4aca1144
Currently, the various forms of low level TLB invalidations are all implemented in misc_32.S for 32-bit processors, in a fairly scary mess of #ifdef's and with interesting duplication such as a whole bunch of code for FSL _tlbie and _tlbia which are no longer used. This moves things around such that _tlbie is now defined in hash_low_32.S and is only used by the 32-bit hash code, and all nohash CPUs use the various _tlbil_* forms that are now moved to a new file, tlb_nohash_low.S. I moved all the definitions for that stuff out of include/asm/tlbflush.h as they are really internal mm stuff, into mm/mmu_decl.h The code should have no functional changes. I kept some variants inline for trivial forms on things like 40x and 8xx. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
166 lines
3.7 KiB
ArmAsm
166 lines
3.7 KiB
ArmAsm
/*
|
|
* This file contains low-level functions for performing various
|
|
* types of TLB invalidations on various processors with no hash
|
|
* table.
|
|
*
|
|
* This file implements the following functions for all no-hash
|
|
* processors. Some aren't implemented for some variants. Some
|
|
* are inline in tlbflush.h
|
|
*
|
|
* - tlbil_va
|
|
* - tlbil_pid
|
|
* - tlbil_all
|
|
* - tlbivax_bcast (not yet)
|
|
*
|
|
* Code mostly moved over from misc_32.S
|
|
*
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
*
|
|
* Partially rewritten by Cort Dougan (cort@cs.nmt.edu)
|
|
* Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
*/
|
|
|
|
#include <asm/reg.h>
|
|
#include <asm/page.h>
|
|
#include <asm/cputable.h>
|
|
#include <asm/mmu.h>
|
|
#include <asm/ppc_asm.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/processor.h>
|
|
|
|
#if defined(CONFIG_40x)
|
|
|
|
/*
|
|
* 40x implementation needs only tlbil_va
|
|
*/
|
|
_GLOBAL(_tlbil_va)
|
|
/* We run the search with interrupts disabled because we have to change
|
|
* the PID and I don't want to preempt when that happens.
|
|
*/
|
|
mfmsr r5
|
|
mfspr r6,SPRN_PID
|
|
wrteei 0
|
|
mtspr SPRN_PID,r4
|
|
tlbsx. r3, 0, r3
|
|
mtspr SPRN_PID,r6
|
|
wrtee r5
|
|
bne 1f
|
|
sync
|
|
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
|
|
* clear. Since 25 is the V bit in the TLB_TAG, loading this value
|
|
* will invalidate the TLB entry. */
|
|
tlbwe r3, r3, TLB_TAG
|
|
isync
|
|
1: blr
|
|
|
|
#elif defined(CONFIG_8xx)
|
|
|
|
/*
|
|
* Nothing to do for 8xx, everything is inline
|
|
*/
|
|
|
|
#elif defined(CONFIG_44x)
|
|
|
|
/*
|
|
* 440 implementation uses tlbsx/we for tlbil_va and a full sweep
|
|
* of the TLB for everything else.
|
|
*/
|
|
_GLOBAL(_tlbil_va)
|
|
mfspr r5,SPRN_MMUCR
|
|
rlwimi r5,r4,0,24,31 /* Set TID */
|
|
|
|
/* We have to run the search with interrupts disabled, even critical
|
|
* and debug interrupts (in fact the only critical exceptions we have
|
|
* are debug and machine check). Otherwise an interrupt which causes
|
|
* a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
|
|
mfmsr r4
|
|
lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
|
|
addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
|
|
andc r6,r4,r6
|
|
mtmsr r6
|
|
mtspr SPRN_MMUCR,r5
|
|
tlbsx. r3, 0, r3
|
|
mtmsr r4
|
|
bne 1f
|
|
sync
|
|
/* There are only 64 TLB entries, so r3 < 64,
|
|
* which means bit 22, is clear. Since 22 is
|
|
* the V bit in the TLB_PAGEID, loading this
|
|
* value will invalidate the TLB entry.
|
|
*/
|
|
tlbwe r3, r3, PPC44x_TLB_PAGEID
|
|
isync
|
|
1: blr
|
|
|
|
_GLOBAL(_tlbil_all)
|
|
_GLOBAL(_tlbil_pid)
|
|
li r3,0
|
|
sync
|
|
|
|
/* Load high watermark */
|
|
lis r4,tlb_44x_hwater@ha
|
|
lwz r5,tlb_44x_hwater@l(r4)
|
|
|
|
1: tlbwe r3,r3,PPC44x_TLB_PAGEID
|
|
addi r3,r3,1
|
|
cmpw 0,r3,r5
|
|
ble 1b
|
|
|
|
isync
|
|
blr
|
|
|
|
#elif defined(CONFIG_FSL_BOOKE)
|
|
/*
|
|
* FSL BookE implementations. Currently _pid and _all are the
|
|
* same. This will change when tlbilx is actually supported and
|
|
* performs invalidate-by-PID. This change will be driven by
|
|
* mmu_features conditional
|
|
*/
|
|
|
|
/*
|
|
* Flush MMU TLB on the local processor
|
|
*/
|
|
_GLOBAL(_tlbil_pid)
|
|
_GLOBAL(_tlbil_all)
|
|
#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
|
|
MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
|
|
li r3,(MMUCSR0_TLBFI)@l
|
|
mtspr SPRN_MMUCSR0, r3
|
|
1:
|
|
mfspr r3,SPRN_MMUCSR0
|
|
andi. r3,r3,MMUCSR0_TLBFI@l
|
|
bne 1b
|
|
msync
|
|
isync
|
|
blr
|
|
|
|
/*
|
|
* Flush MMU TLB for a particular address, but only on the local processor
|
|
* (no broadcast)
|
|
*/
|
|
_GLOBAL(_tlbil_va)
|
|
mfmsr r10
|
|
wrteei 0
|
|
slwi r4,r4,16
|
|
mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
|
|
tlbsx 0,r3
|
|
mfspr r4,SPRN_MAS1 /* check valid */
|
|
andis. r3,r4,MAS1_VALID@h
|
|
beq 1f
|
|
rlwinm r4,r4,0,1,31
|
|
mtspr SPRN_MAS1,r4
|
|
tlbwe
|
|
msync
|
|
isync
|
|
1: wrtee r10
|
|
blr
|
|
#elif
|
|
#error Unsupported processor type !
|
|
#endif
|