1 /*
2 * linux/include/asm-arm/proc-armo/cache.h
3 *
4 * Copyright (C) 1999-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Cache handling for 26-bit ARM processors.
11 */
12 #define flush_cache_all() do { } while (0)
13 #define flush_cache_mm(mm) do { } while (0)
14 #define flush_cache_range(mm,start,end) do { } while (0)
15 #define flush_cache_page(vma,vmaddr) do { } while (0)
16 #define flush_page_to_ram(page) do { } while (0)
17
18 #define invalidate_dcache_range(start,end) do { } while (0)
19 #define clean_dcache_range(start,end) do { } while (0)
20 #define flush_dcache_range(start,end) do { } while (0)
21 #define flush_dcache_page(page) do { } while (0)
22 #define clean_dcache_entry(_s) do { } while (0)
23 #define clean_cache_entry(_start) do { } while (0)
24
25 #define flush_icache_range(start,end) do { } while (0)
26 #define flush_icache_page(vma,page) do { } while (0)
27
28 /* DAG: ARM3 will flush cache on MEMC updates anyway? so don't bother */
29 #define clean_cache_area(_start,_size) do { } while (0)
30
31 /*
32 * TLB flushing:
33 *
34 * - flush_tlb_all() flushes all processes TLBs
35 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
36 * - flush_tlb_page(vma, vmaddr) flushes one page
37 * - flush_tlb_range(mm, start, end) flushes a range of pages
38 */
39 #define flush_tlb_all() memc_update_all()
40 #define flush_tlb_mm(mm) memc_update_mm(mm)
41 #define flush_tlb_range(mm,start,end) \
42 do { memc_update_mm(mm); (void)(start); (void)(end); } while (0)
43 #define flush_tlb_page(vma, vmaddr) do { } while (0)
44
45 /*
46 * The following handle the weird MEMC chip
47 */
memc_update_all(void)48 static inline void memc_update_all(void)
49 {
50 struct task_struct *p;
51
52 cpu_memc_update_all(init_mm.pgd);
53 for_each_task(p) {
54 if (!p->mm)
55 continue;
56 cpu_memc_update_all(p->mm->pgd);
57 }
58 processor._set_pgd(current->active_mm->pgd);
59 }
60
memc_update_mm(struct mm_struct * mm)61 static inline void memc_update_mm(struct mm_struct *mm)
62 {
63 cpu_memc_update_all(mm->pgd);
64
65 if (mm == current->active_mm)
66 processor._set_pgd(mm->pgd);
67 }
68
69 static inline void
memc_clear(struct mm_struct * mm,struct page * page)70 memc_clear(struct mm_struct *mm, struct page *page)
71 {
72 cpu_memc_update_entry(mm->pgd, (unsigned long) page_address(page), 0);
73
74 if (mm == current->active_mm)
75 processor._set_pgd(mm->pgd);
76 }
77
78 static inline void
memc_update_addr(struct mm_struct * mm,pte_t pte,unsigned long vaddr)79 memc_update_addr(struct mm_struct *mm, pte_t pte, unsigned long vaddr)
80 {
81 cpu_memc_update_entry(mm->pgd, pte_val(pte), vaddr);
82
83 if (mm == current->active_mm)
84 processor._set_pgd(mm->pgd);
85 }
86
87 static inline void
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t pte)88 update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
89 {
90 struct mm_struct *mm = vma->vm_mm;
91 memc_update_addr(mm, pte, addr);
92 }
93