1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* include/asm-generic/tlb.h
3 *
4 * Generic TLB shootdown code
5 *
6 * Copyright 2001 Red Hat, Inc.
7 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 *
9 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
10 */
11 #ifndef _ASM_GENERIC__TLB_H
12 #define _ASM_GENERIC__TLB_H
13
14 #include <linux/mmu_notifier.h>
15 #include <linux/swap.h>
16 #include <linux/hugetlb_inline.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19
20 /*
21 * Blindly accessing user memory from NMI context can be dangerous
22 * if we're in the middle of switching the current user task or switching
23 * the loaded mm.
24 */
25 #ifndef nmi_uaccess_okay
26 # define nmi_uaccess_okay() true
27 #endif
28
29 #ifdef CONFIG_MMU
30
31 /*
32 * Generic MMU-gather implementation.
33 *
34 * The mmu_gather data structure is used by the mm code to implement the
35 * correct and efficient ordering of freeing pages and TLB invalidations.
36 *
37 * This correct ordering is:
38 *
39 * 1) unhook page
40 * 2) TLB invalidate page
41 * 3) free page
42 *
43 * That is, we must never free a page before we have ensured there are no live
44 * translations left to it. Otherwise it might be possible to observe (or
45 * worse, change) the page content after it has been reused.
46 *
47 * The mmu_gather API consists of:
48 *
49 * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
50 *
51 * start and finish a mmu_gather
52 *
53 * Finish in particular will issue a (final) TLB invalidate and free
54 * all (remaining) queued pages.
55 *
56 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
57 *
58 * Defaults to flushing at tlb_end_vma() to reset the range; helps when
59 * there's large holes between the VMAs.
60 *
61 * - tlb_remove_table()
62 *
63 * tlb_remove_table() is the basic primitive to free page-table directories
64 * (__p*_free_tlb()). In it's most primitive form it is an alias for
65 * tlb_remove_page() below, for when page directories are pages and have no
66 * additional constraints.
67 *
68 * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
69 *
70 * - tlb_remove_page() / __tlb_remove_page()
71 * - tlb_remove_page_size() / __tlb_remove_page_size()
72 *
73 * __tlb_remove_page_size() is the basic primitive that queues a page for
74 * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
75 * boolean indicating if the queue is (now) full and a call to
76 * tlb_flush_mmu() is required.
77 *
78 * tlb_remove_page() and tlb_remove_page_size() imply the call to
79 * tlb_flush_mmu() when required and has no return value.
80 *
81 * - tlb_change_page_size()
82 *
83 * call before __tlb_remove_page*() to set the current page-size; implies a
84 * possible tlb_flush_mmu() call.
85 *
86 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
87 *
88 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
89 * related state, like the range)
90 *
91 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
92 * whatever pages are still batched.
93 *
94 * - mmu_gather::fullmm
95 *
96 * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
97 * the entire mm; this allows a number of optimizations.
98 *
99 * - We can ignore tlb_{start,end}_vma(); because we don't
100 * care about ranges. Everything will be shot down.
101 *
102 * - (RISC) architectures that use ASIDs can cycle to a new ASID
103 * and delay the invalidation until ASID space runs out.
104 *
105 * - mmu_gather::need_flush_all
106 *
107 * A flag that can be set by the arch code if it wants to force
108 * flush the entire TLB irrespective of the range. For instance
109 * x86-PAE needs this when changing top-level entries.
110 *
111 * And allows the architecture to provide and implement tlb_flush():
112 *
113 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
114 * use of:
115 *
116 * - mmu_gather::start / mmu_gather::end
117 *
118 * which provides the range that needs to be flushed to cover the pages to
119 * be freed.
120 *
121 * - mmu_gather::freed_tables
122 *
123 * set when we freed page table pages
124 *
125 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
126 *
127 * returns the smallest TLB entry size unmapped in this range.
128 *
129 * If an architecture does not provide tlb_flush() a default implementation
130 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
131 * specified, in which case we'll default to flush_tlb_mm().
132 *
133 * Additionally there are a few opt-in features:
134 *
135 * MMU_GATHER_PAGE_SIZE
136 *
137 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
138 * changes the size and provides mmu_gather::page_size to tlb_flush().
139 *
140 * This might be useful if your architecture has size specific TLB
141 * invalidation instructions.
142 *
143 * MMU_GATHER_TABLE_FREE
144 *
145 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
146 * for page directores (__p*_free_tlb()).
147 *
148 * Useful if your architecture has non-page page directories.
149 *
150 * When used, an architecture is expected to provide __tlb_remove_table()
151 * which does the actual freeing of these pages.
152 *
153 * MMU_GATHER_RCU_TABLE_FREE
154 *
155 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
156 * comment below).
157 *
158 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
159 * and therefore doesn't naturally serialize with software page-table walkers.
160 *
161 * MMU_GATHER_NO_FLUSH_CACHE
162 *
163 * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
164 * before unmapping a VMA.
165 *
166 * NOTE: strictly speaking we shouldn't have this knob and instead rely on
167 * flush_cache_range() being a NOP, except Sparc64 seems to be
168 * different here.
169 *
170 * MMU_GATHER_MERGE_VMAS
171 *
172 * Indicates the architecture wants to merge ranges over VMAs; typical when
173 * multiple range invalidates are more expensive than a full invalidate.
174 *
175 * MMU_GATHER_NO_RANGE
176 *
177 * Use this if your architecture lacks an efficient flush_tlb_range(). This
178 * option implies MMU_GATHER_MERGE_VMAS above.
179 *
180 * MMU_GATHER_NO_GATHER
181 *
182 * If the option is set the mmu_gather will not track individual pages for
183 * delayed page free anymore. A platform that enables the option needs to
184 * provide its own implementation of the __tlb_remove_page_size() function to
185 * free pages.
186 *
187 * This is useful if your architecture already flushes TLB entries in the
188 * various ptep_get_and_clear() functions.
189 */
190
191 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
192
193 struct mmu_table_batch {
194 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
195 struct rcu_head rcu;
196 #endif
197 unsigned int nr;
198 void *tables[];
199 };
200
201 #define MAX_TABLE_BATCH \
202 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
203
204 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
205
206 #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
207
208 /*
209 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
210 * page directories and we can use the normal page batching to free them.
211 */
212 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
213
214 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
215
216 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
217 /*
218 * This allows an architecture that does not use the linux page-tables for
219 * hardware to skip the TLBI when freeing page tables.
220 */
221 #ifndef tlb_needs_table_invalidate
222 #define tlb_needs_table_invalidate() (true)
223 #endif
224
225 void tlb_remove_table_sync_one(void);
226
227 #else
228
229 #ifdef tlb_needs_table_invalidate
230 #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
231 #endif
232
tlb_remove_table_sync_one(void)233 static inline void tlb_remove_table_sync_one(void) { }
234
235 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
236
237
238 #ifndef CONFIG_MMU_GATHER_NO_GATHER
239 /*
240 * If we can't allocate a page to make a big batch of page pointers
241 * to work on, then just handle a few from the on-stack structure.
242 */
243 #define MMU_GATHER_BUNDLE 8
244
245 struct mmu_gather_batch {
246 struct mmu_gather_batch *next;
247 unsigned int nr;
248 unsigned int max;
249 struct page *pages[];
250 };
251
252 #define MAX_GATHER_BATCH \
253 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
254
255 /*
256 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
257 * lockups for non-preemptible kernels on huge machines when a lot of memory
258 * is zapped during unmapping.
259 * 10K pages freed at once should be safe even without a preemption point.
260 */
261 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
262
263 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
264 int page_size);
265 #endif
266
267 /*
268 * struct mmu_gather is an opaque type used by the mm code for passing around
269 * any data needed by arch specific code for tlb_remove_page.
270 */
271 struct mmu_gather {
272 struct mm_struct *mm;
273
274 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
275 struct mmu_table_batch *batch;
276 #endif
277
278 unsigned long start;
279 unsigned long end;
280 /*
281 * we are in the middle of an operation to clear
282 * a full mm and can make some optimizations
283 */
284 unsigned int fullmm : 1;
285
286 /*
287 * we have performed an operation which
288 * requires a complete flush of the tlb
289 */
290 unsigned int need_flush_all : 1;
291
292 /*
293 * we have removed page directories
294 */
295 unsigned int freed_tables : 1;
296
297 /*
298 * at which levels have we cleared entries?
299 */
300 unsigned int cleared_ptes : 1;
301 unsigned int cleared_pmds : 1;
302 unsigned int cleared_puds : 1;
303 unsigned int cleared_p4ds : 1;
304
305 /*
306 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
307 */
308 unsigned int vma_exec : 1;
309 unsigned int vma_huge : 1;
310 unsigned int vma_pfn : 1;
311
312 unsigned int batch_count;
313
314 #ifndef CONFIG_MMU_GATHER_NO_GATHER
315 struct mmu_gather_batch *active;
316 struct mmu_gather_batch local;
317 struct page *__pages[MMU_GATHER_BUNDLE];
318
319 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
320 unsigned int page_size;
321 #endif
322 #endif
323 };
324
325 void tlb_flush_mmu(struct mmu_gather *tlb);
326
__tlb_adjust_range(struct mmu_gather * tlb,unsigned long address,unsigned int range_size)327 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
328 unsigned long address,
329 unsigned int range_size)
330 {
331 tlb->start = min(tlb->start, address);
332 tlb->end = max(tlb->end, address + range_size);
333 }
334
__tlb_reset_range(struct mmu_gather * tlb)335 static inline void __tlb_reset_range(struct mmu_gather *tlb)
336 {
337 if (tlb->fullmm) {
338 tlb->start = tlb->end = ~0;
339 } else {
340 tlb->start = TASK_SIZE;
341 tlb->end = 0;
342 }
343 tlb->freed_tables = 0;
344 tlb->cleared_ptes = 0;
345 tlb->cleared_pmds = 0;
346 tlb->cleared_puds = 0;
347 tlb->cleared_p4ds = 0;
348 /*
349 * Do not reset mmu_gather::vma_* fields here, we do not
350 * call into tlb_start_vma() again to set them if there is an
351 * intermediate flush.
352 */
353 }
354
355 #ifdef CONFIG_MMU_GATHER_NO_RANGE
356
357 #if defined(tlb_flush)
358 #error MMU_GATHER_NO_RANGE relies on default tlb_flush()
359 #endif
360
361 /*
362 * When an architecture does not have efficient means of range flushing TLBs
363 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
364 * range small. We equally don't have to worry about page granularity or other
365 * things.
366 *
367 * All we need to do is issue a full flush for any !0 range.
368 */
tlb_flush(struct mmu_gather * tlb)369 static inline void tlb_flush(struct mmu_gather *tlb)
370 {
371 if (tlb->end)
372 flush_tlb_mm(tlb->mm);
373 }
374
375 #else /* CONFIG_MMU_GATHER_NO_RANGE */
376
377 #ifndef tlb_flush
378 /*
379 * When an architecture does not provide its own tlb_flush() implementation
380 * but does have a reasonably efficient flush_vma_range() implementation
381 * use that.
382 */
tlb_flush(struct mmu_gather * tlb)383 static inline void tlb_flush(struct mmu_gather *tlb)
384 {
385 if (tlb->fullmm || tlb->need_flush_all) {
386 flush_tlb_mm(tlb->mm);
387 } else if (tlb->end) {
388 struct vm_area_struct vma = {
389 .vm_mm = tlb->mm,
390 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
391 (tlb->vma_huge ? VM_HUGETLB : 0),
392 };
393
394 flush_tlb_range(&vma, tlb->start, tlb->end);
395 }
396 }
397 #endif
398
399 #endif /* CONFIG_MMU_GATHER_NO_RANGE */
400
401 static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)402 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
403 {
404 /*
405 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
406 * mips-4k) flush only large pages.
407 *
408 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
409 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
410 * range.
411 *
412 * We rely on tlb_end_vma() to issue a flush, such that when we reset
413 * these values the batch is empty.
414 */
415 tlb->vma_huge = is_vm_hugetlb_page(vma);
416 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
417 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
418 }
419
tlb_flush_mmu_tlbonly(struct mmu_gather * tlb)420 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
421 {
422 /*
423 * Anything calling __tlb_adjust_range() also sets at least one of
424 * these bits.
425 */
426 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
427 tlb->cleared_puds || tlb->cleared_p4ds))
428 return;
429
430 tlb_flush(tlb);
431 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
432 __tlb_reset_range(tlb);
433 }
434
tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)435 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
436 struct page *page, int page_size)
437 {
438 if (__tlb_remove_page_size(tlb, page, page_size))
439 tlb_flush_mmu(tlb);
440 }
441
__tlb_remove_page(struct mmu_gather * tlb,struct page * page)442 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
443 {
444 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
445 }
446
447 /* tlb_remove_page
448 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
449 * required.
450 */
tlb_remove_page(struct mmu_gather * tlb,struct page * page)451 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
452 {
453 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
454 }
455
tlb_change_page_size(struct mmu_gather * tlb,unsigned int page_size)456 static inline void tlb_change_page_size(struct mmu_gather *tlb,
457 unsigned int page_size)
458 {
459 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
460 if (tlb->page_size && tlb->page_size != page_size) {
461 if (!tlb->fullmm && !tlb->need_flush_all)
462 tlb_flush_mmu(tlb);
463 }
464
465 tlb->page_size = page_size;
466 #endif
467 }
468
tlb_get_unmap_shift(struct mmu_gather * tlb)469 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
470 {
471 if (tlb->cleared_ptes)
472 return PAGE_SHIFT;
473 if (tlb->cleared_pmds)
474 return PMD_SHIFT;
475 if (tlb->cleared_puds)
476 return PUD_SHIFT;
477 if (tlb->cleared_p4ds)
478 return P4D_SHIFT;
479
480 return PAGE_SHIFT;
481 }
482
tlb_get_unmap_size(struct mmu_gather * tlb)483 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
484 {
485 return 1UL << tlb_get_unmap_shift(tlb);
486 }
487
488 /*
489 * In the case of tlb vma handling, we can optimise these away in the
490 * case where we're doing a full MM flush. When we're doing a munmap,
491 * the vmas are adjusted to only cover the region to be torn down.
492 */
tlb_start_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)493 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
494 {
495 if (tlb->fullmm)
496 return;
497
498 tlb_update_vma_flags(tlb, vma);
499 #ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
500 flush_cache_range(vma, vma->vm_start, vma->vm_end);
501 #endif
502 }
503
tlb_end_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)504 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
505 {
506 if (tlb->fullmm)
507 return;
508
509 /*
510 * VM_PFNMAP is more fragile because the core mm will not track the
511 * page mapcount -- there might not be page-frames for these PFNs after
512 * all. Force flush TLBs for such ranges to avoid munmap() vs
513 * unmap_mapping_range() races.
514 */
515 if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
516 /*
517 * Do a TLB flush and reset the range at VMA boundaries; this avoids
518 * the ranges growing with the unused space between consecutive VMAs.
519 */
520 tlb_flush_mmu_tlbonly(tlb);
521 }
522 }
523
524 /*
525 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
526 * and set corresponding cleared_*.
527 */
tlb_flush_pte_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)528 static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
529 unsigned long address, unsigned long size)
530 {
531 __tlb_adjust_range(tlb, address, size);
532 tlb->cleared_ptes = 1;
533 }
534
tlb_flush_pmd_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)535 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
536 unsigned long address, unsigned long size)
537 {
538 __tlb_adjust_range(tlb, address, size);
539 tlb->cleared_pmds = 1;
540 }
541
tlb_flush_pud_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)542 static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
543 unsigned long address, unsigned long size)
544 {
545 __tlb_adjust_range(tlb, address, size);
546 tlb->cleared_puds = 1;
547 }
548
tlb_flush_p4d_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)549 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
550 unsigned long address, unsigned long size)
551 {
552 __tlb_adjust_range(tlb, address, size);
553 tlb->cleared_p4ds = 1;
554 }
555
556 #ifndef __tlb_remove_tlb_entry
557 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
558 #endif
559
560 /**
561 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
562 *
563 * Record the fact that pte's were really unmapped by updating the range,
564 * so we can later optimise away the tlb invalidate. This helps when
565 * userspace is unmapping already-unmapped pages, which happens quite a lot.
566 */
567 #define tlb_remove_tlb_entry(tlb, ptep, address) \
568 do { \
569 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
570 __tlb_remove_tlb_entry(tlb, ptep, address); \
571 } while (0)
572
573 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
574 do { \
575 unsigned long _sz = huge_page_size(h); \
576 if (_sz >= P4D_SIZE) \
577 tlb_flush_p4d_range(tlb, address, _sz); \
578 else if (_sz >= PUD_SIZE) \
579 tlb_flush_pud_range(tlb, address, _sz); \
580 else if (_sz >= PMD_SIZE) \
581 tlb_flush_pmd_range(tlb, address, _sz); \
582 else \
583 tlb_flush_pte_range(tlb, address, _sz); \
584 __tlb_remove_tlb_entry(tlb, ptep, address); \
585 } while (0)
586
587 /**
588 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
589 * This is a nop so far, because only x86 needs it.
590 */
591 #ifndef __tlb_remove_pmd_tlb_entry
592 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
593 #endif
594
595 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
596 do { \
597 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
598 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
599 } while (0)
600
601 /**
602 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
603 * invalidation. This is a nop so far, because only x86 needs it.
604 */
605 #ifndef __tlb_remove_pud_tlb_entry
606 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
607 #endif
608
609 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
610 do { \
611 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
612 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
613 } while (0)
614
615 /*
616 * For things like page tables caches (ie caching addresses "inside" the
617 * page tables, like x86 does), for legacy reasons, flushing an
618 * individual page had better flush the page table caches behind it. This
619 * is definitely how x86 works, for example. And if you have an
620 * architected non-legacy page table cache (which I'm not aware of
621 * anybody actually doing), you're going to have some architecturally
622 * explicit flushing for that, likely *separate* from a regular TLB entry
623 * flush, and thus you'd need more than just some range expansion..
624 *
625 * So if we ever find an architecture
626 * that would want something that odd, I think it is up to that
627 * architecture to do its own odd thing, not cause pain for others
628 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
629 *
630 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
631 */
632
633 #ifndef pte_free_tlb
634 #define pte_free_tlb(tlb, ptep, address) \
635 do { \
636 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
637 tlb->freed_tables = 1; \
638 __pte_free_tlb(tlb, ptep, address); \
639 } while (0)
640 #endif
641
642 #ifndef pmd_free_tlb
643 #define pmd_free_tlb(tlb, pmdp, address) \
644 do { \
645 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
646 tlb->freed_tables = 1; \
647 __pmd_free_tlb(tlb, pmdp, address); \
648 } while (0)
649 #endif
650
651 #ifndef pud_free_tlb
652 #define pud_free_tlb(tlb, pudp, address) \
653 do { \
654 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
655 tlb->freed_tables = 1; \
656 __pud_free_tlb(tlb, pudp, address); \
657 } while (0)
658 #endif
659
660 #ifndef p4d_free_tlb
661 #define p4d_free_tlb(tlb, pudp, address) \
662 do { \
663 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
664 tlb->freed_tables = 1; \
665 __p4d_free_tlb(tlb, pudp, address); \
666 } while (0)
667 #endif
668
669 #ifndef pte_needs_flush
pte_needs_flush(pte_t oldpte,pte_t newpte)670 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
671 {
672 return true;
673 }
674 #endif
675
676 #ifndef huge_pmd_needs_flush
huge_pmd_needs_flush(pmd_t oldpmd,pmd_t newpmd)677 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
678 {
679 return true;
680 }
681 #endif
682
683 #endif /* CONFIG_MMU */
684
685 #endif /* _ASM_GENERIC__TLB_H */
686