1 #ifndef _ASM_IA64_MMU_CONTEXT_H
2 #define _ASM_IA64_MMU_CONTEXT_H
3
4 /*
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9 /*
10 * Routines to manage the allocation of task context numbers. Task context numbers are
11 * used to reduce or eliminate the need to perform TLB flushes due to context switches.
12 * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
13 * consider the region number when performing a TLB lookup, we need to assign a unique
14 * region id to each region in a process. We use the least significant three bits in a
15 * region id for this purpose.
16 */
17
18 #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
19
20 #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
21
22 # ifndef __ASSEMBLY__
23
24 #include <linux/sched.h>
25 #include <linux/spinlock.h>
26
27 #include <asm/processor.h>
28
29 struct ia64_ctx {
30 spinlock_t lock;
31 unsigned int next; /* next context number to use */
32 unsigned int limit; /* next >= limit => must call wrap_mmu_context() */
33 unsigned int max_ctx; /* max. context value supported by all CPUs */
34 };
35
36 extern struct ia64_ctx ia64_ctx;
37
38 extern void wrap_mmu_context (struct mm_struct *mm);
39
40 static inline void
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk,unsigned cpu)41 enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
42 {
43 }
44
45 /*
46 * When the context counter wraps around all TLBs need to be flushed because an old
47 * context number might have been reused. This is signalled by the ia64_need_tlb_flush
48 * per-CPU variable, which is checked in the routine below. Called by activate_mm().
49 * <efocht@ess.nec.de>
50 */
51 static inline void
delayed_tlb_flush(void)52 delayed_tlb_flush (void)
53 {
54 extern void local_flush_tlb_all (void);
55
56 if (unlikely(local_cpu_data->need_tlb_flush)) {
57 local_flush_tlb_all();
58 local_cpu_data->need_tlb_flush = 0;
59 }
60 }
61
62 static inline mm_context_t
get_mmu_context(struct mm_struct * mm)63 get_mmu_context (struct mm_struct *mm)
64 {
65 unsigned long flags;
66 mm_context_t context = mm->context;
67
68 if (context)
69 return context;
70
71 spin_lock_irqsave(&ia64_ctx.lock, flags);
72 {
73 /* re-check, now that we've got the lock: */
74 context = mm->context;
75 if (context == 0) {
76 if (ia64_ctx.next >= ia64_ctx.limit)
77 wrap_mmu_context(mm);
78 mm->context = context = ia64_ctx.next++;
79 }
80 }
81 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
82 return context;
83 }
84
85 /*
86 * Initialize context number to some sane value. MM is guaranteed to be a brand-new
87 * address-space, so no TLB flushing is needed, ever.
88 */
89 static inline int
init_new_context(struct task_struct * p,struct mm_struct * mm)90 init_new_context (struct task_struct *p, struct mm_struct *mm)
91 {
92 mm->context = 0;
93 return 0;
94 }
95
96 static inline void
destroy_context(struct mm_struct * mm)97 destroy_context (struct mm_struct *mm)
98 {
99 /* Nothing to do. */
100 }
101
102 static inline void
reload_context(mm_context_t context)103 reload_context (mm_context_t context)
104 {
105 unsigned long rid;
106 unsigned long rid_incr = 0;
107 unsigned long rr0, rr1, rr2, rr3, rr4;
108
109 rid = context << 3; /* make space for encoding the region number */
110 rid_incr = 1 << 8;
111
112 /* encode the region id, preferred page size, and VHPT enable bit: */
113 rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
114 rr1 = rr0 + 1*rid_incr;
115 rr2 = rr0 + 2*rid_incr;
116 rr3 = rr0 + 3*rid_incr;
117 rr4 = rr0 + 4*rid_incr;
118 #ifdef CONFIG_HUGETLB_PAGE
119 rr4 = (rr4 & (~(0xfcUL))) | (HPAGE_SHIFT << 2);
120 #endif
121
122 ia64_set_rr(0x0000000000000000, rr0);
123 ia64_set_rr(0x2000000000000000, rr1);
124 ia64_set_rr(0x4000000000000000, rr2);
125 ia64_set_rr(0x6000000000000000, rr3);
126 ia64_set_rr(0x8000000000000000, rr4);
127 ia64_insn_group_barrier();
128 ia64_srlz_i(); /* srlz.i implies srlz.d */
129 ia64_insn_group_barrier();
130 }
131
132 static inline void
activate_context(struct mm_struct * mm)133 activate_context (struct mm_struct *mm)
134 {
135 mm_context_t context;
136
137 do {
138 context = get_mmu_context(mm);
139 reload_context(context);
140 /* in the unlikely event of a TLB-flush by another thread, redo the load: */
141 } while (unlikely(context != mm->context));
142 }
143
144 /*
145 * Switch from address space PREV to address space NEXT.
146 */
147 static inline void
activate_mm(struct mm_struct * prev,struct mm_struct * next)148 activate_mm (struct mm_struct *prev, struct mm_struct *next)
149 {
150 delayed_tlb_flush();
151
152 /*
153 * We may get interrupts here, but that's OK because interrupt handlers cannot
154 * touch user-space.
155 */
156 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
157 activate_context(next);
158 }
159
160 #define switch_mm(prev_mm,next_mm,next_task,cpu) activate_mm(prev_mm, next_mm)
161
162 # endif /* ! __ASSEMBLY__ */
163 #endif /* _ASM_IA64_MMU_CONTEXT_H */
164