1/* 2 * arch/xtensa/mm/misc.S 3 * 4 * Miscellaneous assembly functions. 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2001 - 2007 Tensilica Inc. 11 * 12 * Chris Zankel <chris@zankel.net> 13 */ 14 15 16#include <linux/linkage.h> 17#include <asm/page.h> 18#include <asm/pgtable.h> 19#include <asm/asmmacro.h> 20#include <asm/cacheasm.h> 21#include <asm/tlbflush.h> 22 23 24/* 25 * clear_page and clear_user_page are the same for non-cache-aliased configs. 26 * 27 * clear_page (unsigned long page) 28 * a2 29 */ 30 31ENTRY(clear_page) 32 entry a1, 16 33 34 movi a3, 0 35 __loopi a2, a7, PAGE_SIZE, 32 36 s32i a3, a2, 0 37 s32i a3, a2, 4 38 s32i a3, a2, 8 39 s32i a3, a2, 12 40 s32i a3, a2, 16 41 s32i a3, a2, 20 42 s32i a3, a2, 24 43 s32i a3, a2, 28 44 __endla a2, a7, 32 45 46 retw 47 48/* 49 * copy_page and copy_user_page are the same for non-cache-aliased configs. 50 * 51 * copy_page (void *to, void *from) 52 * a2 a3 53 */ 54 55ENTRY(copy_page) 56 entry a1, 16 57 58 __loopi a2, a4, PAGE_SIZE, 32 59 60 l32i a8, a3, 0 61 l32i a9, a3, 4 62 s32i a8, a2, 0 63 s32i a9, a2, 4 64 65 l32i a8, a3, 8 66 l32i a9, a3, 12 67 s32i a8, a2, 8 68 s32i a9, a2, 12 69 70 l32i a8, a3, 16 71 l32i a9, a3, 20 72 s32i a8, a2, 16 73 s32i a9, a2, 20 74 75 l32i a8, a3, 24 76 l32i a9, a3, 28 77 s32i a8, a2, 24 78 s32i a9, a2, 28 79 80 addi a2, a2, 32 81 addi a3, a3, 32 82 83 __endl a2, a4 84 85 retw 86 87#ifdef CONFIG_MMU 88/* 89 * If we have to deal with cache aliasing, we use temporary memory mappings 90 * to ensure that the source and destination pages have the same color as 91 * the virtual address. We use way 0 and 1 for temporary mappings in such cases. 92 * 93 * The temporary DTLB entries shouldn't be flushed by interrupts, but are 94 * flushed by preemptive task switches. Special code in the 95 * fast_second_level_miss handler re-established the temporary mapping. 96 * It requires that the PPNs for the destination and source addresses are 97 * in a6, and a7, respectively. 98 */ 99 100/* TLB miss exceptions are treated special in the following region */ 101 102ENTRY(__tlbtemp_mapping_start) 103 104#if (DCACHE_WAY_SIZE > PAGE_SIZE) 105 106/* 107 * clear_user_page (void *addr, unsigned long vaddr, struct page *page) 108 * a2 a3 a4 109 */ 110 111ENTRY(clear_user_page) 112 entry a1, 32 113 114 /* Mark page dirty and determine alias. */ 115 116 movi a7, (1 << PG_ARCH_1) 117 l32i a5, a4, PAGE_FLAGS 118 xor a6, a2, a3 119 extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER 120 extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER 121 or a5, a5, a7 122 slli a3, a3, PAGE_SHIFT 123 s32i a5, a4, PAGE_FLAGS 124 125 /* Skip setting up a temporary DTLB if not aliased. */ 126 127 beqz a6, 1f 128 129 /* Invalidate kernel page. */ 130 131 mov a10, a2 132 call8 __invalidate_dcache_page 133 134 /* Setup a temporary DTLB with the color of the VPN */ 135 136 movi a4, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) 137 movi a5, TLBTEMP_BASE_1 # virt 138 add a6, a2, a4 # ppn 139 add a2, a5, a3 # add 'color' 140 141 wdtlb a6, a2 142 dsync 143 1441: movi a3, 0 145 __loopi a2, a7, PAGE_SIZE, 32 146 s32i a3, a2, 0 147 s32i a3, a2, 4 148 s32i a3, a2, 8 149 s32i a3, a2, 12 150 s32i a3, a2, 16 151 s32i a3, a2, 20 152 s32i a3, a2, 24 153 s32i a3, a2, 28 154 __endla a2, a7, 32 155 156 bnez a6, 1f 157 retw 158 159 /* We need to invalidate the temporary idtlb entry, if any. */ 160 1611: addi a2, a2, -PAGE_SIZE 162 idtlb a2 163 dsync 164 165 retw 166 167/* 168 * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) 169 * a2 a3 a4 a5 170 */ 171 172ENTRY(copy_user_page) 173 174 entry a1, 32 175 176 /* Mark page dirty and determine alias for destination. */ 177 178 movi a8, (1 << PG_ARCH_1) 179 l32i a9, a5, PAGE_FLAGS 180 xor a6, a2, a4 181 xor a7, a3, a4 182 extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER 183 extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER 184 extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER 185 or a9, a9, a8 186 slli a4, a4, PAGE_SHIFT 187 s32i a9, a5, PAGE_FLAGS 188 movi a5, -PAGE_OFFSET + (PAGE_KERNEL | _PAGE_HW_WRITE) 189 190 beqz a6, 1f 191 192 /* Invalidate dcache */ 193 194 mov a10, a2 195 call8 __invalidate_dcache_page 196 197 /* Setup a temporary DTLB with a matching color. */ 198 199 movi a8, TLBTEMP_BASE_1 # base 200 add a6, a2, a5 # ppn 201 add a2, a8, a4 # add 'color' 202 203 wdtlb a6, a2 204 dsync 205 206 /* Skip setting up a temporary DTLB for destination if not aliased. */ 207 2081: beqz a7, 1f 209 210 /* Setup a temporary DTLB with a matching color. */ 211 212 movi a8, TLBTEMP_BASE_2 # base 213 add a7, a3, a5 # ppn 214 add a3, a8, a4 215 addi a8, a3, 1 # way1 216 217 wdtlb a7, a8 218 dsync 219 2201: __loopi a2, a4, PAGE_SIZE, 32 221 222 l32i a8, a3, 0 223 l32i a9, a3, 4 224 s32i a8, a2, 0 225 s32i a9, a2, 4 226 227 l32i a8, a3, 8 228 l32i a9, a3, 12 229 s32i a8, a2, 8 230 s32i a9, a2, 12 231 232 l32i a8, a3, 16 233 l32i a9, a3, 20 234 s32i a8, a2, 16 235 s32i a9, a2, 20 236 237 l32i a8, a3, 24 238 l32i a9, a3, 28 239 s32i a8, a2, 24 240 s32i a9, a2, 28 241 242 addi a2, a2, 32 243 addi a3, a3, 32 244 245 __endl a2, a4 246 247 /* We need to invalidate any temporary mapping! */ 248 249 bnez a6, 1f 250 bnez a7, 2f 251 retw 252 2531: addi a2, a2, -PAGE_SIZE 254 idtlb a2 255 dsync 256 bnez a7, 2f 257 retw 258 2592: addi a3, a3, -PAGE_SIZE+1 260 idtlb a3 261 dsync 262 263 retw 264 265#endif 266 267#if (DCACHE_WAY_SIZE > PAGE_SIZE) 268 269/* 270 * void __flush_invalidate_dcache_page_alias (addr, phys) 271 * a2 a3 272 */ 273 274ENTRY(__flush_invalidate_dcache_page_alias) 275 entry sp, 16 276 277 movi a7, 0 # required for exception handler 278 addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) 279 mov a4, a2 280 wdtlb a6, a2 281 dsync 282 283 ___flush_invalidate_dcache_page a2 a3 284 285 idtlb a4 286 dsync 287 288 retw 289 290#endif 291 292ENTRY(__tlbtemp_mapping_itlb) 293 294#if (ICACHE_WAY_SIZE > PAGE_SIZE) 295 296ENTRY(__invalidate_icache_page_alias) 297 entry sp, 16 298 299 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) 300 mov a4, a2 301 witlb a6, a2 302 isync 303 304 ___invalidate_icache_page a2 a3 305 306 iitlb a4 307 isync 308 retw 309 310#endif 311 312/* End of special treatment in tlb miss exception */ 313 314ENTRY(__tlbtemp_mapping_end) 315#endif /* CONFIG_MMU 316 317/* 318 * void __invalidate_icache_page(ulong start) 319 */ 320 321ENTRY(__invalidate_icache_page) 322 entry sp, 16 323 324 ___invalidate_icache_page a2 a3 325 isync 326 327 retw 328 329/* 330 * void __invalidate_dcache_page(ulong start) 331 */ 332 333ENTRY(__invalidate_dcache_page) 334 entry sp, 16 335 336 ___invalidate_dcache_page a2 a3 337 dsync 338 339 retw 340 341/* 342 * void __flush_invalidate_dcache_page(ulong start) 343 */ 344 345ENTRY(__flush_invalidate_dcache_page) 346 entry sp, 16 347 348 ___flush_invalidate_dcache_page a2 a3 349 350 dsync 351 retw 352 353/* 354 * void __flush_dcache_page(ulong start) 355 */ 356 357ENTRY(__flush_dcache_page) 358 entry sp, 16 359 360 ___flush_dcache_page a2 a3 361 362 dsync 363 retw 364 365/* 366 * void __invalidate_icache_range(ulong start, ulong size) 367 */ 368 369ENTRY(__invalidate_icache_range) 370 entry sp, 16 371 372 ___invalidate_icache_range a2 a3 a4 373 isync 374 375 retw 376 377/* 378 * void __flush_invalidate_dcache_range(ulong start, ulong size) 379 */ 380 381ENTRY(__flush_invalidate_dcache_range) 382 entry sp, 16 383 384 ___flush_invalidate_dcache_range a2 a3 a4 385 dsync 386 387 retw 388 389/* 390 * void _flush_dcache_range(ulong start, ulong size) 391 */ 392 393ENTRY(__flush_dcache_range) 394 entry sp, 16 395 396 ___flush_dcache_range a2 a3 a4 397 dsync 398 399 retw 400 401/* 402 * void _invalidate_dcache_range(ulong start, ulong size) 403 */ 404 405ENTRY(__invalidate_dcache_range) 406 entry sp, 16 407 408 ___invalidate_dcache_range a2 a3 a4 409 410 retw 411 412/* 413 * void _invalidate_icache_all(void) 414 */ 415 416ENTRY(__invalidate_icache_all) 417 entry sp, 16 418 419 ___invalidate_icache_all a2 a3 420 isync 421 422 retw 423 424/* 425 * void _flush_invalidate_dcache_all(void) 426 */ 427 428ENTRY(__flush_invalidate_dcache_all) 429 entry sp, 16 430 431 ___flush_invalidate_dcache_all a2 a3 432 dsync 433 434 retw 435 436/* 437 * void _invalidate_dcache_all(void) 438 */ 439 440ENTRY(__invalidate_dcache_all) 441 entry sp, 16 442 443 ___invalidate_dcache_all a2 a3 444 dsync 445 446 retw 447 448