1 /* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
18 #include <asm/tlb.h>
19
20 /* Heavily inspired by the ppc64 code. */
21
22 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23
flush_tlb_pending(void)24 void flush_tlb_pending(void)
25 {
26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 struct mm_struct *mm = tb->mm;
28
29 if (!tb->tlb_nr)
30 goto out;
31
32 flush_tsb_user(tb);
33
34 if (CTX_VALID(mm->context)) {
35 if (tb->tlb_nr == 1) {
36 global_flush_tlb_page(mm, tb->vaddrs[0]);
37 } else {
38 #ifdef CONFIG_SMP
39 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
40 &tb->vaddrs[0]);
41 #else
42 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
43 tb->tlb_nr, &tb->vaddrs[0]);
44 #endif
45 }
46 }
47
48 tb->tlb_nr = 0;
49
50 out:
51 put_cpu_var(tlb_batch);
52 }
53
arch_enter_lazy_mmu_mode(void)54 void arch_enter_lazy_mmu_mode(void)
55 {
56 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
57
58 tb->active = 1;
59 }
60
arch_leave_lazy_mmu_mode(void)61 void arch_leave_lazy_mmu_mode(void)
62 {
63 struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
64
65 if (tb->tlb_nr)
66 flush_tlb_pending();
67 tb->active = 0;
68 }
69
tlb_batch_add(struct mm_struct * mm,unsigned long vaddr,pte_t * ptep,pte_t orig,int fullmm)70 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
71 pte_t *ptep, pte_t orig, int fullmm)
72 {
73 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
74 unsigned long nr;
75
76 vaddr &= PAGE_MASK;
77 if (pte_exec(orig))
78 vaddr |= 0x1UL;
79
80 if (tlb_type != hypervisor &&
81 pte_dirty(orig)) {
82 unsigned long paddr, pfn = pte_pfn(orig);
83 struct address_space *mapping;
84 struct page *page;
85
86 if (!pfn_valid(pfn))
87 goto no_cache_flush;
88
89 page = pfn_to_page(pfn);
90 if (PageReserved(page))
91 goto no_cache_flush;
92
93 /* A real file page? */
94 mapping = page_mapping(page);
95 if (!mapping)
96 goto no_cache_flush;
97
98 paddr = (unsigned long) page_address(page);
99 if ((paddr ^ vaddr) & (1 << 13))
100 flush_dcache_page_all(mm, page);
101 }
102
103 no_cache_flush:
104
105 if (fullmm) {
106 put_cpu_var(tlb_batch);
107 return;
108 }
109
110 nr = tb->tlb_nr;
111
112 if (unlikely(nr != 0 && mm != tb->mm)) {
113 flush_tlb_pending();
114 nr = 0;
115 }
116
117 if (!tb->active) {
118 flush_tsb_user_page(mm, vaddr);
119 global_flush_tlb_page(mm, vaddr);
120 goto out;
121 }
122
123 if (nr == 0)
124 tb->mm = mm;
125
126 tb->vaddrs[nr] = vaddr;
127 tb->tlb_nr = ++nr;
128 if (nr >= TLB_BATCH_NR)
129 flush_tlb_pending();
130
131 out:
132 put_cpu_var(tlb_batch);
133 }
134