1 /*
2  * This file contains the routines for TLB flushing.
3  * On machines where the MMU uses a hash table to store virtual to
4  * physical translations, these routines flush entries from the
5  * hash table also.
6  *  -- paulus
7  *
8  *  Derived from arch/ppc/mm/init.c:
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
13  *    Copyright (C) 1996 Paul Mackerras
14  *
15  *  Derived from "arch/i386/mm/init.c"
16  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
17  *
18  *  This program is free software; you can redistribute it and/or
19  *  modify it under the terms of the GNU General Public License
20  *  as published by the Free Software Foundation; either version
21  *  2 of the License, or (at your option) any later version.
22  *
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/init.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 
31 #include <asm/tlbflush.h>
32 #include <asm/tlb.h>
33 
34 #include "mmu_decl.h"
35 
36 /*
37  * Called when unmapping pages to flush entries from the TLB/hash table.
38  */
flush_hash_entry(struct mm_struct * mm,pte_t * ptep,unsigned long addr)39 void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
40 {
41 	unsigned long ptephys;
42 
43 	if (Hash != 0) {
44 		ptephys = __pa(ptep) & PAGE_MASK;
45 		flush_hash_pages(mm->context.id, addr, ptephys, 1);
46 	}
47 }
48 EXPORT_SYMBOL(flush_hash_entry);
49 
50 /*
51  * Called by ptep_set_access_flags, must flush on CPUs for which the
52  * DSI handler can't just "fixup" the TLB on a write fault
53  */
flush_tlb_page_nohash(struct vm_area_struct * vma,unsigned long addr)54 void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
55 {
56 	if (Hash != 0)
57 		return;
58 	_tlbie(addr);
59 }
60 
61 /*
62  * Called at the end of a mmu_gather operation to make sure the
63  * TLB flush is completely done.
64  */
tlb_flush(struct mmu_gather * tlb)65 void tlb_flush(struct mmu_gather *tlb)
66 {
67 	if (Hash == 0) {
68 		/*
69 		 * 603 needs to flush the whole TLB here since
70 		 * it doesn't use a hash table.
71 		 */
72 		_tlbia();
73 	}
74 
75 	/* Push out batch of freed page tables */
76 	pte_free_finish();
77 }
78 
79 /*
80  * TLB flushing:
81  *
82  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
83  *  - flush_tlb_page(vma, vmaddr) flushes one page
84  *  - flush_tlb_range(vma, start, end) flushes a range of pages
85  *  - flush_tlb_kernel_range(start, end) flushes kernel pages
86  *
87  * since the hardware hash table functions as an extension of the
88  * tlb as far as the linux tables are concerned, flush it too.
89  *    -- Cort
90  */
91 
flush_range(struct mm_struct * mm,unsigned long start,unsigned long end)92 static void flush_range(struct mm_struct *mm, unsigned long start,
93 			unsigned long end)
94 {
95 	pmd_t *pmd;
96 	unsigned long pmd_end;
97 	int count;
98 	unsigned int ctx = mm->context.id;
99 
100 	if (Hash == 0) {
101 		_tlbia();
102 		return;
103 	}
104 	start &= PAGE_MASK;
105 	if (start >= end)
106 		return;
107 	end = (end - 1) | ~PAGE_MASK;
108 	pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
109 	for (;;) {
110 		pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
111 		if (pmd_end > end)
112 			pmd_end = end;
113 		if (!pmd_none(*pmd)) {
114 			count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
115 			flush_hash_pages(ctx, start, pmd_val(*pmd), count);
116 		}
117 		if (pmd_end == end)
118 			break;
119 		start = pmd_end + 1;
120 		++pmd;
121 	}
122 }
123 
124 /*
125  * Flush kernel TLB entries in the given range
126  */
flush_tlb_kernel_range(unsigned long start,unsigned long end)127 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
128 {
129 	flush_range(&init_mm, start, end);
130 }
131 EXPORT_SYMBOL(flush_tlb_kernel_range);
132 
133 /*
134  * Flush all the (user) entries for the address space described by mm.
135  */
flush_tlb_mm(struct mm_struct * mm)136 void flush_tlb_mm(struct mm_struct *mm)
137 {
138 	struct vm_area_struct *mp;
139 
140 	if (Hash == 0) {
141 		_tlbia();
142 		return;
143 	}
144 
145 	/*
146 	 * It is safe to go down the mm's list of vmas when called
147 	 * from dup_mmap, holding mmap_sem.  It would also be safe from
148 	 * unmap_region or exit_mmap, but not from vmtruncate on SMP -
149 	 * but it seems dup_mmap is the only SMP case which gets here.
150 	 */
151 	for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
152 		flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
153 }
154 EXPORT_SYMBOL(flush_tlb_mm);
155 
flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)156 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
157 {
158 	struct mm_struct *mm;
159 	pmd_t *pmd;
160 
161 	if (Hash == 0) {
162 		_tlbie(vmaddr);
163 		return;
164 	}
165 	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
166 	pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
167 	if (!pmd_none(*pmd))
168 		flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
169 }
170 EXPORT_SYMBOL(flush_tlb_page);
171 
172 /*
173  * For each address in the range, find the pte for the address
174  * and check _PAGE_HASHPTE bit; if it is set, find and destroy
175  * the corresponding HPTE.
176  */
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)177 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
178 		     unsigned long end)
179 {
180 	flush_range(vma->vm_mm, start, end);
181 }
182 EXPORT_SYMBOL(flush_tlb_range);
183