1 /*
2  * This file contains the routines for TLB flushing.
3  * On machines where the MMU uses a hash table to store virtual to
4  * physical translations, these routines flush entries from the
5  * hash table also.
6  *  -- paulus
7  *
8  *  Derived from arch/ppc/mm/init.c:
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
13  *    Copyright (C) 1996 Paul Mackerras
14  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15  *
16  *  Derived from "arch/i386/mm/init.c"
17  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
18  *
19  *  This program is free software; you can redistribute it and/or
20  *  modify it under the terms of the GNU General Public License
21  *  as published by the Free Software Foundation; either version
22  *  2 of the License, or (at your option) any later version.
23  *
24  */
25 
26 #include <linux/config.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/init.h>
30 #include <asm/mmu.h>
31 #include "mmu_decl.h"
32 
33 /*
34  * TLB flushing:
35  *
36  *  - flush_tlb_all() flushes all processes TLBs
37  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
38  *  - flush_tlb_page(vma, vmaddr) flushes one page
39  *  - flush_tlb_range(mm, start, end) flushes a range of pages
40  *
41  * since the hardware hash table functions as an extension of the
42  * tlb as far as the linux tables are concerned, flush it too.
43  *    -- Cort
44  */
45 
46 /*
47  * Flush all tlb/hash table entries (except perhaps for those
48  * mapping RAM starting at PAGE_OFFSET, since they never change).
49  */
50 void
local_flush_tlb_all(void)51 local_flush_tlb_all(void)
52 {
53 	/* aargh!!! */
54 	/*
55 	 * Just flush the kernel part of the address space, that's
56 	 * all that the current callers of this require.
57 	 * Eventually I hope to persuade the powers that be that
58 	 * we can and should dispense with flush_tlb_all().
59 	 *  -- paulus.
60 	 */
61 	local_flush_tlb_range(&init_mm, TASK_SIZE, ~0UL);
62 
63 #ifdef CONFIG_SMP
64 	smp_send_tlb_invalidate(0);
65 #endif /* CONFIG_SMP */
66 }
67 
68 /*
69  * Flush all the (user) entries for the address space described
70  * by mm.  We can't rely on mm->mmap describing all the entries
71  * that might be in the hash table.
72  */
73 void
local_flush_tlb_mm(struct mm_struct * mm)74 local_flush_tlb_mm(struct mm_struct *mm)
75 {
76 	if (Hash == 0) {
77 		_tlbia();
78 		return;
79 	}
80 
81 	if (mm->map_count) {
82 		struct vm_area_struct *mp;
83 		for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
84 			local_flush_tlb_range(mm, mp->vm_start, mp->vm_end);
85 	} else
86 		local_flush_tlb_range(mm, 0, TASK_SIZE);
87 
88 #ifdef CONFIG_SMP
89 	smp_send_tlb_invalidate(0);
90 #endif
91 }
92 
93 void
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long vmaddr)94 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
95 {
96 	struct mm_struct *mm;
97 	pmd_t *pmd;
98 	pte_t *pte;
99 
100 	if (Hash == 0) {
101 		_tlbie(vmaddr);
102 		return;
103 	}
104 	mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
105 	pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
106 	if (!pmd_none(*pmd)) {
107 		pte = pte_offset(pmd, vmaddr);
108 		if (pte_val(*pte) & _PAGE_HASHPTE)
109 			flush_hash_page(mm->context, vmaddr, pte);
110 	}
111 #ifdef CONFIG_SMP
112 	smp_send_tlb_invalidate(0);
113 #endif
114 }
115 
116 
117 /*
118  * For each address in the range, find the pte for the address
119  * and check _PAGE_HASHPTE bit; if it is set, find and destroy
120  * the corresponding HPTE.
121  */
122 void
local_flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)123 local_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
124 {
125 	pmd_t *pmd;
126 	pte_t *pte;
127 	unsigned long pmd_end;
128 	unsigned int ctx = mm->context;
129 
130 	if (Hash == 0) {
131 		_tlbia();
132 		return;
133 	}
134 	start &= PAGE_MASK;
135 	if (start >= end)
136 		return;
137 	pmd = pmd_offset(pgd_offset(mm, start), start);
138 	do {
139 		pmd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
140 		if (!pmd_none(*pmd)) {
141 			if (!pmd_end || pmd_end > end)
142 				pmd_end = end;
143 			pte = pte_offset(pmd, start);
144 			do {
145 				if ((pte_val(*pte) & _PAGE_HASHPTE) != 0)
146 					flush_hash_page(ctx, start, pte);
147 				start += PAGE_SIZE;
148 				++pte;
149 			} while (start && start < pmd_end);
150 		} else {
151 			start = pmd_end;
152 		}
153 		++pmd;
154 	} while (start && start < end);
155 
156 #ifdef CONFIG_SMP
157 	smp_send_tlb_invalidate(0);
158 #endif
159 }
160