1 #ifdef __KERNEL__
2 #ifndef __PPC_MMU_CONTEXT_H
3 #define __PPC_MMU_CONTEXT_H
4
5 #include <linux/config.h>
6 #include <asm/atomic.h>
7 #include <asm/bitops.h>
8 #include <asm/mmu.h>
9
10 /*
11 * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
12 * (virtual segment identifiers) for each context. Although the
13 * hardware supports 24-bit VSIDs, and thus >1 million contexts,
14 * we only use 32,768 of them. That is ample, since there can be
15 * at most around 30,000 tasks in the system anyway, and it means
16 * that we can use a bitmap to indicate which contexts are in use.
17 * Using a bitmap means that we entirely avoid all of the problems
18 * that we used to have when the context number overflowed,
19 * particularly on SMP systems.
20 * -- paulus.
21 */
22
23 /*
24 * This function defines the mapping from contexts to VSIDs (virtual
25 * segment IDs). We use a skew on both the context and the high 4 bits
26 * of the 32-bit virtual address (the "effective segment ID") in order
27 * to spread out the entries in the MMU hash table. Note, if this
28 * function is changed then arch/ppc/mm/hashtable.S will have to be
29 * changed to correspond.
30 */
31 #define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
32 & 0xffffff)
33
34 /*
35 The MPC8xx has only 16 contexts. We rotate through them on each
36 task switch. A better way would be to keep track of tasks that
37 own contexts, and implement an LRU usage. That way very active
38 tasks don't always have to pay the TLB reload overhead. The
39 kernel pages are mapped shared, so the kernel can run on behalf
40 of any task that makes a kernel entry. Shared does not mean they
41 are not protected, just that the ASID comparison is not performed.
42 -- Dan
43
44 The IBM4xx has 256 contexts, so we can just rotate through these
45 as a way of "switching" contexts. If the TID of the TLB is zero,
46 the PID/TID comparison is disabled, so we can use a TID of zero
47 to represent all kernel pages as shared among all contexts.
48 -- Dan
49 */
50
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk,unsigned cpu)51 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
52 {
53 }
54
55 #ifdef CONFIG_8xx
56 #define NO_CONTEXT 16
57 #define LAST_CONTEXT 15
58 #define FIRST_CONTEXT 0
59
60 #elif CONFIG_4xx
61 #define NO_CONTEXT 256
62 #define LAST_CONTEXT 255
63 #define FIRST_CONTEXT 1
64
65 #else
66
67 /* PPC 6xx, 7xx CPUs */
68 #define NO_CONTEXT ((mm_context_t) -1)
69 #define LAST_CONTEXT 32767
70 #define FIRST_CONTEXT 1
71 #endif
72
73 /*
74 * Set the current MMU context.
75 * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
76 * loading up the segment registers for the user part of the address space.
77 *
78 * Since the PGD is immediately available, it is much faster to simply
79 * pass this along as a second parameter, which is required for 8xx and
80 * can be used for debugging on all processors (if you happen to have
81 * an Abatron).
82 */
83 extern void set_context(mm_context_t context, pgd_t *pgd);
84
85 /*
86 * Bitmap of contexts in use.
87 * The size of this bitmap is LAST_CONTEXT + 1 bits.
88 */
89 extern unsigned long context_map[];
90
91 /*
92 * This caches the next context number that we expect to be free.
93 * Its use is an optimization only, we can't rely on this context
94 * number to be free, but it usually will be.
95 */
96 extern mm_context_t next_mmu_context;
97
98 /*
99 * If we don't have sufficient contexts to give one to every task
100 * that could be in the system, we need to be able to steal contexts.
101 * These variables support that.
102 */
103 #if LAST_CONTEXT < 30000
104 #define FEW_CONTEXTS 1
105 extern atomic_t nr_free_contexts;
106 extern struct mm_struct *context_mm[LAST_CONTEXT+1];
107 extern void steal_context(void);
108 #endif
109
110 /*
111 * Get a new mmu context for the address space described by `mm'.
112 */
get_mmu_context(struct mm_struct * mm)113 static inline void get_mmu_context(struct mm_struct *mm)
114 {
115 mm_context_t ctx;
116
117 if (mm->context != NO_CONTEXT)
118 return;
119 #ifdef FEW_CONTEXTS
120 while (atomic_dec_if_positive(&nr_free_contexts) < 0)
121 steal_context();
122 #endif
123 ctx = next_mmu_context;
124 while (test_and_set_bit(ctx, context_map)) {
125 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
126 if (ctx > LAST_CONTEXT)
127 ctx = 0;
128 }
129 next_mmu_context = (ctx + 1) & LAST_CONTEXT;
130 mm->context = ctx;
131 #ifdef FEW_CONTEXTS
132 context_mm[ctx] = mm;
133 #endif
134 }
135
136 /*
137 * Set up the context for a new address space.
138 */
139 #define init_new_context(tsk,mm) (((mm)->context = NO_CONTEXT), 0)
140
141 /*
142 * We're finished using the context for an address space.
143 */
destroy_context(struct mm_struct * mm)144 static inline void destroy_context(struct mm_struct *mm)
145 {
146 if (mm->context != NO_CONTEXT) {
147 clear_bit(mm->context, context_map);
148 mm->context = NO_CONTEXT;
149 #ifdef FEW_CONTEXTS
150 atomic_inc(&nr_free_contexts);
151 #endif
152 }
153 }
154
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk,int cpu)155 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
156 struct task_struct *tsk, int cpu)
157 {
158 tsk->thread.pgdir = next->pgd;
159 get_mmu_context(next);
160 set_context(next->context, next->pgd);
161 }
162
163 /*
164 * After we have set current->mm to a new value, this activates
165 * the context for the new mm so we see the new mappings.
166 */
activate_mm(struct mm_struct * active_mm,struct mm_struct * mm)167 static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
168 {
169 current->thread.pgdir = mm->pgd;
170 get_mmu_context(mm);
171 set_context(mm->context, mm->pgd);
172 }
173
174 extern void mmu_context_init(void);
175
176 #endif /* __PPC_MMU_CONTEXT_H */
177 #endif /* __KERNEL__ */
178