Lines Matching refs:tlb

18 static bool tlb_next_batch(struct mmu_gather *tlb)  in tlb_next_batch()  argument
23 if (tlb->delayed_rmap && tlb->active != &tlb->local) in tlb_next_batch()
26 batch = tlb->active; in tlb_next_batch()
28 tlb->active = batch->next; in tlb_next_batch()
32 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
39 tlb->batch_count++; in tlb_next_batch()
44 tlb->active->next = batch; in tlb_next_batch()
45 tlb->active = batch; in tlb_next_batch()
73 void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) in tlb_flush_rmaps() argument
75 if (!tlb->delayed_rmap) in tlb_flush_rmaps()
78 tlb_flush_rmap_batch(&tlb->local, vma); in tlb_flush_rmaps()
79 if (tlb->active != &tlb->local) in tlb_flush_rmaps()
80 tlb_flush_rmap_batch(tlb->active, vma); in tlb_flush_rmaps()
81 tlb->delayed_rmap = 0; in tlb_flush_rmaps()
85 static void tlb_batch_pages_flush(struct mmu_gather *tlb) in tlb_batch_pages_flush() argument
89 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush()
105 tlb->active = &tlb->local; in tlb_batch_pages_flush()
108 static void tlb_batch_list_free(struct mmu_gather *tlb) in tlb_batch_list_free() argument
112 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free()
116 tlb->local.next = NULL; in tlb_batch_list_free()
119 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size) in __tlb_remove_page_size() argument
123 VM_BUG_ON(!tlb->end); in __tlb_remove_page_size()
126 VM_WARN_ON(tlb->page_size != page_size); in __tlb_remove_page_size()
129 batch = tlb->active; in __tlb_remove_page_size()
136 if (!tlb_next_batch(tlb)) in __tlb_remove_page_size()
138 batch = tlb->active; in __tlb_remove_page_size()
229 static inline void tlb_table_invalidate(struct mmu_gather *tlb) in tlb_table_invalidate() argument
237 tlb_flush_mmu_tlbonly(tlb); in tlb_table_invalidate()
247 static void tlb_table_flush(struct mmu_gather *tlb) in tlb_table_flush() argument
249 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush()
252 tlb_table_invalidate(tlb); in tlb_table_flush()
258 void tlb_remove_table(struct mmu_gather *tlb, void *table) in tlb_remove_table() argument
260 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table()
265 tlb_table_invalidate(tlb); in tlb_remove_table()
274 tlb_table_flush(tlb); in tlb_remove_table()
277 static inline void tlb_table_init(struct mmu_gather *tlb) in tlb_table_init() argument
279 tlb->batch = NULL; in tlb_table_init()
284 static inline void tlb_table_flush(struct mmu_gather *tlb) { } in tlb_table_flush() argument
285 static inline void tlb_table_init(struct mmu_gather *tlb) { } in tlb_table_init() argument
289 static void tlb_flush_mmu_free(struct mmu_gather *tlb) in tlb_flush_mmu_free() argument
291 tlb_table_flush(tlb); in tlb_flush_mmu_free()
293 tlb_batch_pages_flush(tlb); in tlb_flush_mmu_free()
297 void tlb_flush_mmu(struct mmu_gather *tlb) in tlb_flush_mmu() argument
299 tlb_flush_mmu_tlbonly(tlb); in tlb_flush_mmu()
300 tlb_flush_mmu_free(tlb); in tlb_flush_mmu()
303 static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, in __tlb_gather_mmu() argument
306 tlb->mm = mm; in __tlb_gather_mmu()
307 tlb->fullmm = fullmm; in __tlb_gather_mmu()
310 tlb->need_flush_all = 0; in __tlb_gather_mmu()
311 tlb->local.next = NULL; in __tlb_gather_mmu()
312 tlb->local.nr = 0; in __tlb_gather_mmu()
313 tlb->local.max = ARRAY_SIZE(tlb->__pages); in __tlb_gather_mmu()
314 tlb->active = &tlb->local; in __tlb_gather_mmu()
315 tlb->batch_count = 0; in __tlb_gather_mmu()
317 tlb->delayed_rmap = 0; in __tlb_gather_mmu()
319 tlb_table_init(tlb); in __tlb_gather_mmu()
321 tlb->page_size = 0; in __tlb_gather_mmu()
324 __tlb_reset_range(tlb); in __tlb_gather_mmu()
325 inc_tlb_flush_pending(tlb->mm); in __tlb_gather_mmu()
336 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm) in tlb_gather_mmu() argument
338 __tlb_gather_mmu(tlb, mm, false); in tlb_gather_mmu()
352 void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm) in tlb_gather_mmu_fullmm() argument
354 __tlb_gather_mmu(tlb, mm, true); in tlb_gather_mmu_fullmm()
364 void tlb_finish_mmu(struct mmu_gather *tlb) in tlb_finish_mmu() argument
378 if (mm_tlb_flush_nested(tlb->mm)) { in tlb_finish_mmu()
387 tlb->fullmm = 1; in tlb_finish_mmu()
388 __tlb_reset_range(tlb); in tlb_finish_mmu()
389 tlb->freed_tables = 1; in tlb_finish_mmu()
392 tlb_flush_mmu(tlb); in tlb_finish_mmu()
395 tlb_batch_list_free(tlb); in tlb_finish_mmu()
397 dec_tlb_flush_pending(tlb->mm); in tlb_finish_mmu()