android_kernel_cmhtcleo/arch/arm/mm/proc-arm925.S
2010-08-27 11:19:57 +02:00

541 lines
14 KiB
ArmAsm

/*
* linux/arch/arm/mm/arm925.S: MMU functions for ARM925
*
* Copyright (C) 1999,2000 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd.
* Copyright (C) 2002 RidgeRun, Inc.
* Copyright (C) 2002-2003 MontaVista Software, Inc.
*
* Update for Linux-2.6 and cache flush improvements
* Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
*
* hacked for non-paged-MM by Hyok S. Choi, 2004.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
* These are the low level assembler for performing cache and TLB
* functions on the arm925.
*
* CONFIG_CPU_ARM925_CPU_IDLE -> nohlt
*
* Some additional notes based on deciphering the TI TRM on OMAP-5910:
*
* NOTE1: The TI925T Configuration Register bit "D-cache clean and flush
* entry mode" must be 0 to flush the entries in both segments
* at once. This is the default value. See TRM 2-20 and 2-24 for
* more information.
*
* NOTE2: Default is the "D-cache clean and flush entry mode". It looks
* like the "Transparent mode" must be on for partial cache flushes
* to work in this mode. This mode only works with 16-bit external
* memory. See TRM 2-24 for more information.
*
* NOTE3: Write-back cache flushing seems to be flakey with devices using
* direct memory access, such as USB OHCI. The workaround is to use
* write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is
* the default for OMAP-1510).
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 16
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 2
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 256
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*/
#define CACHE_DLIMIT 8192
.text
/*
* cpu_arm925_proc_init()
*/
ENTRY(cpu_arm925_proc_init)
mov pc, lr
/*
* cpu_arm925_proc_fin()
*/
ENTRY(cpu_arm925_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
bl arm925_flush_kern_cache_all
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_arm925_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_arm925_reset)
/* Send software reset to MPU and DSP */
mov ip, #0xff000000
orr ip, ip, #0x00fe0000
orr ip, ip, #0x0000ce00
mov r4, #1
strh r4, [ip, #0x10]
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_arm925_do_idle()
*
* Called with IRQs disabled
*/
.align 10
ENTRY(cpu_arm925_do_idle)
mov r0, #0
mrc p15, 0, r1, c1, c0, 0 @ Read control register
mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
bic r2, r1, #1 << 12
mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
mov pc, lr
/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular
* address space.
*/
ENTRY(arm925_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(arm925_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
/* Flush entries in both segments at once, see NOTE1 above */
mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 4
bcs 2b @ entries 255 to 0
#endif
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_user_cache_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address range.
*
* - start - start address (inclusive)
* - end - end address (exclusive)
* - flags - vm_flags describing address space
*/
ENTRY(arm925_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bgt __flush_whole_cache
1: tst r2, #VM_EXEC
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
#else
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
#endif
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm925_coherent_kern_range)
/* FALLTHROUGH */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm925_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(arm925_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm925_dma_inv_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
#endif
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(arm925_dma_clean_range)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(arm925_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
#else
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
#endif
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
ENTRY(arm925_cache_fns)
.long arm925_flush_kern_cache_all
.long arm925_flush_user_cache_all
.long arm925_flush_user_cache_range
.long arm925_coherent_kern_range
.long arm925_coherent_user_range
.long arm925_flush_kern_dcache_page
.long arm925_dma_inv_range
.long arm925_dma_clean_range
.long arm925_dma_flush_range
ENTRY(cpu_arm925_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_arm925_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_arm925_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
/* Flush entries in bothe segments at once, see NOTE1 above */
mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 4
bcs 2b @ entries 255 to 0
#endif
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mov pc, lr
/*
* cpu_arm925_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_arm925_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
#endif
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif /* CONFIG_MMU */
mov pc, lr
__INIT
.type __arm925_setup, #function
__arm925_setup:
mov r0, #0
#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
orr r0,r0,#1 << 7
#endif
/* Transparent on, D-cache clean & flush mode. See NOTE2 above */
orr r0,r0,#1 << 1 @ transparent mode on
mcr p15, 0, r0, c15, c1, 0 @ write TI config register
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
#endif
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mov r0, #4 @ disable write-back on caches explicitly
mcr p15, 7, r0, c15, c0, 0
#endif
adr r5, arm925_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, r5
orr r0, r0, r6
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
orr r0, r0, #0x4000 @ .1.. .... .... ....
#endif
mov pc, lr
.size __arm925_setup, . - __arm925_setup
/*
* R
* .RVI ZFRS BLDP WCAM
* .011 0001 ..11 1101
*
*/
.type arm925_crval, #object
arm925_crval:
crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130
__INITDATA
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
.type arm925_processor_functions, #object
arm925_processor_functions:
.word v4t_early_abort
.word legacy_pabort
.word cpu_arm925_proc_init
.word cpu_arm925_proc_fin
.word cpu_arm925_reset
.word cpu_arm925_do_idle
.word cpu_arm925_dcache_clean_area
.word cpu_arm925_switch_mm
.word cpu_arm925_set_pte_ext
.size arm925_processor_functions, . - arm925_processor_functions
.section ".rodata"
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4t"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.type cpu_arm925_name, #object
cpu_arm925_name:
.asciz "ARM925T"
.size cpu_arm925_name, . - cpu_arm925_name
.align
.section ".proc.info.init", #alloc, #execinstr
.type __arm925_proc_info,#object
__arm925_proc_info:
.long 0x54029250
.long 0xfffffff0
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
b __arm925_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
.long cpu_arm925_name
.long arm925_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm925_cache_fns
.size __arm925_proc_info, . - __arm925_proc_info
.type __arm915_proc_info,#object
__arm915_proc_info:
.long 0x54029150
.long 0xfffffff0
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
b __arm925_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
.long cpu_arm925_name
.long arm925_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm925_cache_fns
.size __arm925_proc_info, . - __arm925_proc_info