This should speed up a few applications 'quite' a lot, since we clear only the specified cache range instead of complete cache drain and invalidation. Caught this while working on assembly functions for lk and read about this issue on 8x50 kernels.
It is suggested to be tested a bit before a release.
This commit is contained in:
parent
1a10008f85
commit
c017fabcb6
@ -154,37 +154,24 @@ ENTRY(v7_coherent_kern_range)
|
||||
* - the Icache does not read data from the write buffer
|
||||
*/
|
||||
ENTRY(v7_coherent_user_range)
|
||||
UNWIND(.fnstart )
|
||||
dcache_line_size r2, r3
|
||||
sub r3, r2, #1
|
||||
bic r0, r0, r3
|
||||
1:
|
||||
USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification
|
||||
dsb
|
||||
USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line
|
||||
add r0, r0, r2
|
||||
2:
|
||||
cmp r0, r1
|
||||
blo 1b
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
|
||||
dsb
|
||||
isb
|
||||
mov pc, lr
|
||||
|
||||
/*
|
||||
* Fault handling for the cache operation above. If the virtual address in r0
|
||||
* isn't mapped, just try the next page.
|
||||
*/
|
||||
9001:
|
||||
mov r0, r0, lsr #12
|
||||
mov r0, r0, lsl #12
|
||||
add r0, r0, #4096
|
||||
b 2b
|
||||
UNWIND(.fnend )
|
||||
dcache_line_size r2, r3
|
||||
sub r3, r2, #1
|
||||
bic r0, r0, r3
|
||||
1: mcr p15, 0, r0, c7, c11, 1
|
||||
dsb
|
||||
mcr p15, 0, r0, c7, c5, 1
|
||||
add r0, r0, r2
|
||||
cmp r0, r1
|
||||
blo 1b
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c5, 6
|
||||
dsb
|
||||
isb
|
||||
mov pc, lr
|
||||
ENDPROC(v7_coherent_kern_range)
|
||||
ENDPROC(v7_coherent_user_range)
|
||||
|
||||
|
||||
/*
|
||||
* v7_flush_kern_dcache_page(kaddr)
|
||||
*
|
||||
|
Loading…
x
Reference in New Issue
Block a user