1 /*
2  * Switch a MMU context.
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  */
11 #ifndef _ASM_MMU_CONTEXT_H
12 #define _ASM_MMU_CONTEXT_H
13 
14 #include <linux/config.h>
15 #include <linux/slab.h>
16 #include <asm/pgalloc.h>
17 #include <asm/pgtable.h>
18 
19 /*
20  * For the fast tlb miss handlers, we currently keep a per cpu array
21  * of pointers to the current pgd for each processor. Also, the proc.
22  * id is stuffed into the context register. This should be changed to
23  * use the processor id via current->processor, where current is stored
24  * in watchhi/lo. The context register should be used to contiguously
25  * map the page tables.
26  */
27 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \
28 	pgd_current[smp_processor_id()] = (unsigned long)(pgd)
29 #define TLBMISS_HANDLER_SETUP() \
30 	write_c0_context(((long)(&pgd_current[smp_processor_id()])) << 23); \
31 	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
32 extern unsigned long pgd_current[];
33 
34 #define cpu_context(cpu, mm)	((mm)->context[cpu])
35 #define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & ASID_MASK)
36 #define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
37 
38 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
39 
40 #define ASID_INC	0x40
41 #define ASID_MASK	0xfc0
42 
43 #elif defined(CONFIG_CPU_RM9000)
44 
45 #define ASID_INC	0x1
46 #define ASID_MASK	0xfff
47 
48 #else /* FIXME: not correct for R6000, R8000 */
49 
50 #define ASID_INC	0x1
51 #define ASID_MASK	0xff
52 
53 #endif
54 
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk,unsigned cpu)55 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
56 {
57 }
58 
59 /*
60  *  All unused by hardware upper bits will be considered
61  *  as a software asid extension.
62  */
63 #define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
64 #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
65 
66 static inline void
get_new_mmu_context(struct mm_struct * mm,unsigned long cpu)67 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
68 {
69 	unsigned long asid = asid_cache(cpu);
70 
71 	if (! ((asid += ASID_INC) & ASID_MASK) ) {
72 		flush_icache_all();
73 		local_flush_tlb_all();	/* start new asid cycle */
74 		if (!asid)		/* fix version if needed */
75 			asid = ASID_FIRST_VERSION;
76 	}
77 	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
78 }
79 
80 /*
81  * Initialize the context related info for a new mm_struct
82  * instance.
83  */
84 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)85 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
86 {
87 	int i;
88 
89 	for (i = 0; i < smp_num_cpus; i++)
90 		cpu_context(i, mm) = 0;
91 	return 0;
92 }
93 
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk,unsigned cpu)94 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
95                              struct task_struct *tsk, unsigned cpu)
96 {
97 	unsigned long flags;
98 
99 	local_irq_save(flags);
100 
101 	/* Check if our ASID is of an older version and thus invalid */
102 	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
103 		get_new_mmu_context(next, cpu);
104 
105 	write_c0_entryhi(cpu_context(cpu, next));
106 	TLBMISS_HANDLER_SETUP_PGD(next->pgd);
107 
108 	/*
109 	 * Mark current->active_mm as not "active" anymore.
110 	 * We don't want to mislead possible IPI tlb flush routines.
111 	 */
112 	clear_bit(cpu, &prev->cpu_vm_mask);
113 	set_bit(cpu, &next->cpu_vm_mask);
114 
115 	local_irq_restore(flags);
116 }
117 
118 /*
119  * Destroy context related info for an mm_struct that is about
120  * to be put to rest.
121  */
destroy_context(struct mm_struct * mm)122 static inline void destroy_context(struct mm_struct *mm)
123 {
124 }
125 
126 /*
127  * After we have set current->mm to a new value, this activates
128  * the context for the new mm so we see the new mappings.
129  */
130 static inline void
activate_mm(struct mm_struct * prev,struct mm_struct * next)131 activate_mm(struct mm_struct *prev, struct mm_struct *next)
132 {
133 	unsigned long flags;
134 	int cpu = smp_processor_id();
135 
136 	local_irq_save(flags);
137 
138 	/* Unconditionally get a new ASID.  */
139 	get_new_mmu_context(next, cpu);
140 
141 	write_c0_entryhi(cpu_context(cpu, next));
142 	TLBMISS_HANDLER_SETUP_PGD(next->pgd);
143 
144 	/* mark mmu ownership change */
145 	clear_bit(cpu, &prev->cpu_vm_mask);
146 	set_bit(cpu, &next->cpu_vm_mask);
147 
148 	local_irq_restore(flags);
149 }
150 
151 /*
152  * If mm is currently active_mm, we can't really drop it.  Instead,
153  * we will get a new one for it.
154  */
155 static inline void
drop_mmu_context(struct mm_struct * mm,unsigned cpu)156 drop_mmu_context(struct mm_struct *mm, unsigned cpu)
157 {
158 	unsigned long flags;
159 
160 	local_irq_save(flags);
161 
162 	if (test_bit(cpu, &mm->cpu_vm_mask))  {
163 		get_new_mmu_context(mm, cpu);
164 		write_c0_entryhi(cpu_asid(cpu, mm));
165 	} else {
166 		/* will get a new context next time */
167 		cpu_context(cpu, mm) = 0;
168 	}
169 
170 	local_irq_restore(flags);
171 }
172 
173 #endif /* _ASM_MMU_CONTEXT_H */
174