1/* 2 * linux/arch/arm/mm/cache-v6.S 3 * 4 * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This is the "shell" of the ARMv6 processor support. 11 */ 12#include <linux/linkage.h> 13#include <linux/init.h> 14#include <asm/assembler.h> 15#include <asm/unwind.h> 16 17#include "proc-macros.S" 18 19#define HARVARD_CACHE 20#define CACHE_LINE_SIZE 32 21#define D_CACHE_LINE_SIZE 32 22#define BTB_FLUSH_SIZE 8 23 24/* 25 * v6_flush_icache_all() 26 * 27 * Flush the whole I-cache. 28 * 29 * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail. 30 * This erratum is present in 1136, 1156 and 1176. It does not affect the 31 * MPCore. 32 * 33 * Registers: 34 * r0 - set to 0 35 * r1 - corrupted 36 */ 37ENTRY(v6_flush_icache_all) 38 mov r0, #0 39#ifdef CONFIG_ARM_ERRATA_411920 40 mrs r1, cpsr 41 cpsid ifa @ disable interrupts 42 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 43 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 44 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 45 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 46 msr cpsr_cx, r1 @ restore interrupts 47 .rept 11 @ ARM Ltd recommends at least 48 nop @ 11 NOPs 49 .endr 50#else 51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 52#endif 53 mov pc, lr 54ENDPROC(v6_flush_icache_all) 55 56/* 57 * v6_flush_cache_all() 58 * 59 * Flush the entire cache. 60 * 61 * It is assumed that: 62 */ 63ENTRY(v6_flush_kern_cache_all) 64 mov r0, #0 65#ifdef HARVARD_CACHE 66 mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate 67#ifndef CONFIG_ARM_ERRATA_411920 68 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 69#else 70 b v6_flush_icache_all 71#endif 72#else 73 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 74#endif 75 mov pc, lr 76 77/* 78 * v6_flush_cache_all() 79 * 80 * Flush all TLB entries in a particular address space 81 * 82 * - mm - mm_struct describing address space 83 */ 84ENTRY(v6_flush_user_cache_all) 85 /*FALLTHROUGH*/ 86 87/* 88 * v6_flush_cache_range(start, end, flags) 89 * 90 * Flush a range of TLB entries in the specified address space. 91 * 92 * - start - start address (may not be aligned) 93 * - end - end address (exclusive, may not be aligned) 94 * - flags - vm_area_struct flags describing address space 95 * 96 * It is assumed that: 97 * - we have a VIPT cache. 98 */ 99ENTRY(v6_flush_user_cache_range) 100 mov pc, lr 101 102/* 103 * v6_coherent_kern_range(start,end) 104 * 105 * Ensure that the I and D caches are coherent within specified 106 * region. This is typically used when code has been written to 107 * a memory region, and will be executed. 108 * 109 * - start - virtual start address of region 110 * - end - virtual end address of region 111 * 112 * It is assumed that: 113 * - the Icache does not read data from the write buffer 114 */ 115ENTRY(v6_coherent_kern_range) 116 /* FALLTHROUGH */ 117 118/* 119 * v6_coherent_user_range(start,end) 120 * 121 * Ensure that the I and D caches are coherent within specified 122 * region. This is typically used when code has been written to 123 * a memory region, and will be executed. 124 * 125 * - start - virtual start address of region 126 * - end - virtual end address of region 127 * 128 * It is assumed that: 129 * - the Icache does not read data from the write buffer 130 */ 131ENTRY(v6_coherent_user_range) 132 UNWIND(.fnstart ) 133#ifdef HARVARD_CACHE 134 bic r0, r0, #CACHE_LINE_SIZE - 1 1351: 136 USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line 137 add r0, r0, #CACHE_LINE_SIZE 1382: 139 cmp r0, r1 140 blo 1b 141#endif 142 mov r0, #0 143#ifdef HARVARD_CACHE 144 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 145#ifndef CONFIG_ARM_ERRATA_411920 146 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 147#else 148 b v6_flush_icache_all 149#endif 150#else 151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 152#endif 153 mov pc, lr 154 155/* 156 * Fault handling for the cache operation above. If the virtual address in r0 157 * isn't mapped, just try the next page. 158 */ 1599001: 160 mov r0, r0, lsr #12 161 mov r0, r0, lsl #12 162 add r0, r0, #4096 163 b 2b 164 UNWIND(.fnend ) 165ENDPROC(v6_coherent_user_range) 166ENDPROC(v6_coherent_kern_range) 167 168/* 169 * v6_flush_kern_dcache_area(void *addr, size_t size) 170 * 171 * Ensure that the data held in the page kaddr is written back 172 * to the page in question. 173 * 174 * - addr - kernel address 175 * - size - region size 176 */ 177ENTRY(v6_flush_kern_dcache_area) 178 add r1, r0, r1 179 bic r0, r0, #D_CACHE_LINE_SIZE - 1 1801: 181#ifdef HARVARD_CACHE 182 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 183#else 184 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line 185#endif 186 add r0, r0, #D_CACHE_LINE_SIZE 187 cmp r0, r1 188 blo 1b 189#ifdef HARVARD_CACHE 190 mov r0, #0 191 mcr p15, 0, r0, c7, c10, 4 192#endif 193 mov pc, lr 194 195 196/* 197 * v6_dma_inv_range(start,end) 198 * 199 * Invalidate the data cache within the specified region; we will 200 * be performing a DMA operation in this region and we want to 201 * purge old data in the cache. 202 * 203 * - start - virtual start address of region 204 * - end - virtual end address of region 205 */ 206v6_dma_inv_range: 207#ifdef CONFIG_DMA_CACHE_RWFO 208 ldrb r2, [r0] @ read for ownership 209 strb r2, [r0] @ write for ownership 210#endif 211 tst r0, #D_CACHE_LINE_SIZE - 1 212 bic r0, r0, #D_CACHE_LINE_SIZE - 1 213#ifdef HARVARD_CACHE 214 mcrne p15, 0, r0, c7, c10, 1 @ clean D line 215#else 216 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line 217#endif 218 tst r1, #D_CACHE_LINE_SIZE - 1 219#ifdef CONFIG_DMA_CACHE_RWFO 220 ldrneb r2, [r1, #-1] @ read for ownership 221 strneb r2, [r1, #-1] @ write for ownership 222#endif 223 bic r1, r1, #D_CACHE_LINE_SIZE - 1 224#ifdef HARVARD_CACHE 225 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line 226#else 227 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 228#endif 2291: 230#ifdef HARVARD_CACHE 231 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line 232#else 233 mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line 234#endif 235 add r0, r0, #D_CACHE_LINE_SIZE 236 cmp r0, r1 237#ifdef CONFIG_DMA_CACHE_RWFO 238 ldrlo r2, [r0] @ read for ownership 239 strlo r2, [r0] @ write for ownership 240#endif 241 blo 1b 242 mov r0, #0 243 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 244 mov pc, lr 245 246/* 247 * v6_dma_clean_range(start,end) 248 * - start - virtual start address of region 249 * - end - virtual end address of region 250 */ 251v6_dma_clean_range: 252 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2531: 254#ifdef CONFIG_DMA_CACHE_RWFO 255 ldr r2, [r0] @ read for ownership 256#endif 257#ifdef HARVARD_CACHE 258 mcr p15, 0, r0, c7, c10, 1 @ clean D line 259#else 260 mcr p15, 0, r0, c7, c11, 1 @ clean unified line 261#endif 262 add r0, r0, #D_CACHE_LINE_SIZE 263 cmp r0, r1 264 blo 1b 265 mov r0, #0 266 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 267 mov pc, lr 268 269/* 270 * v6_dma_flush_range(start,end) 271 * - start - virtual start address of region 272 * - end - virtual end address of region 273 */ 274ENTRY(v6_dma_flush_range) 275#ifdef CONFIG_DMA_CACHE_RWFO 276 ldrb r2, [r0] @ read for ownership 277 strb r2, [r0] @ write for ownership 278#endif 279 bic r0, r0, #D_CACHE_LINE_SIZE - 1 2801: 281#ifdef HARVARD_CACHE 282 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 283#else 284 mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line 285#endif 286 add r0, r0, #D_CACHE_LINE_SIZE 287 cmp r0, r1 288#ifdef CONFIG_DMA_CACHE_RWFO 289 ldrlob r2, [r0] @ read for ownership 290 strlob r2, [r0] @ write for ownership 291#endif 292 blo 1b 293 mov r0, #0 294 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 295 mov pc, lr 296 297/* 298 * dma_map_area(start, size, dir) 299 * - start - kernel virtual start address 300 * - size - size of region 301 * - dir - DMA direction 302 */ 303ENTRY(v6_dma_map_area) 304 add r1, r1, r0 305 teq r2, #DMA_FROM_DEVICE 306 beq v6_dma_inv_range 307#ifndef CONFIG_DMA_CACHE_RWFO 308 b v6_dma_clean_range 309#else 310 teq r2, #DMA_TO_DEVICE 311 beq v6_dma_clean_range 312 b v6_dma_flush_range 313#endif 314ENDPROC(v6_dma_map_area) 315 316/* 317 * dma_unmap_area(start, size, dir) 318 * - start - kernel virtual start address 319 * - size - size of region 320 * - dir - DMA direction 321 */ 322ENTRY(v6_dma_unmap_area) 323#ifndef CONFIG_DMA_CACHE_RWFO 324 add r1, r1, r0 325 teq r2, #DMA_TO_DEVICE 326 bne v6_dma_inv_range 327#endif 328 mov pc, lr 329ENDPROC(v6_dma_unmap_area) 330 331 __INITDATA 332 333 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 334 define_cache_functions v6 335