From c017fabcb6eadce1828a6a87e78faae8e023ab07 Mon Sep 17 00:00:00 2001 From: zeusk Date: Mon, 19 Mar 2012 02:17:25 +0530 Subject: [PATCH] This should speed up a few applications 'quite' a lot, since we clear only the specified cache range instead of complete cache drain and invalidation. Caught this while working on assembly functions for lk and read about this issue on 8x50 kernels. It is suggested to be tested a bit before a release. --- arch/arm/mm/cache-v7.S | 43 +++++++++++++++--------------------------- 1 file changed, 15 insertions(+), 28 deletions(-) diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index e1bd9759..6459b18a 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -154,37 +154,24 @@ ENTRY(v7_coherent_kern_range) * - the Icache does not read data from the write buffer */ ENTRY(v7_coherent_user_range) - UNWIND(.fnstart ) - dcache_line_size r2, r3 - sub r3, r2, #1 - bic r0, r0, r3 -1: - USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification - dsb - USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line - add r0, r0, r2 -2: - cmp r0, r1 - blo 1b - mov r0, #0 - mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB - dsb - isb - mov pc, lr - -/* - * Fault handling for the cache operation above. If the virtual address in r0 - * isn't mapped, just try the next page. - */ -9001: - mov r0, r0, lsr #12 - mov r0, r0, lsl #12 - add r0, r0, #4096 - b 2b - UNWIND(.fnend ) +dcache_line_size r2, r3 +sub r3, r2, #1 +bic r0, r0, r3 +1: mcr p15, 0, r0, c7, c11, 1 +dsb +mcr p15, 0, r0, c7, c5, 1 +add r0, r0, r2 +cmp r0, r1 +blo 1b +mov r0, #0 +mcr p15, 0, r0, c7, c5, 6 +dsb +isb +mov pc, lr ENDPROC(v7_coherent_kern_range) ENDPROC(v7_coherent_user_range) + /* * v7_flush_kern_dcache_page(kaddr) *