Home
last modified time | relevance | path

Searched refs:batch (Results 1 – 25 of 57) sorted by relevance

123

/linux-3.4.99/arch/powerpc/mm/
Dtlb_hash64.c45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local
52 i = batch->index; in hpte_need_flush()
98 if (!batch->active) { in hpte_need_flush()
114 if (i != 0 && (mm != batch->mm || batch->psize != psize || in hpte_need_flush()
115 batch->ssize != ssize)) { in hpte_need_flush()
116 __flush_tlb_pending(batch); in hpte_need_flush()
120 batch->mm = mm; in hpte_need_flush()
121 batch->psize = psize; in hpte_need_flush()
122 batch->ssize = ssize; in hpte_need_flush()
124 batch->pte[i] = rpte; in hpte_need_flush()
[all …]
Dhash_native_64.c474 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); in native_flush_hash_range() local
475 unsigned long psize = batch->psize; in native_flush_hash_range()
476 int ssize = batch->ssize; in native_flush_hash_range()
482 va = batch->vaddr[i]; in native_flush_hash_range()
483 pte = batch->pte[i]; in native_flush_hash_range()
508 va = batch->vaddr[i]; in native_flush_hash_range()
509 pte = batch->pte[i]; in native_flush_hash_range()
525 va = batch->vaddr[i]; in native_flush_hash_range()
526 pte = batch->pte[i]; in native_flush_hash_range()
Dhugetlbpage.c389 struct hugepd_freelist *batch = in hugepd_free_rcu_callback() local
393 for (i = 0; i < batch->index; i++) in hugepd_free_rcu_callback()
394 kmem_cache_free(hugepte_cache, batch->ptes[i]); in hugepd_free_rcu_callback()
396 free_page((unsigned long)batch); in hugepd_free_rcu_callback()
Dhash_utils_64.c1182 struct ppc64_tlb_batch *batch = in flush_hash_range() local
1186 flush_hash_page(batch->vaddr[i], batch->pte[i], in flush_hash_range()
1187 batch->psize, batch->ssize, local); in flush_hash_range()
/linux-3.4.99/arch/powerpc/include/asm/
Dtlbflush.h104 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
113 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); in arch_enter_lazy_mmu_mode() local
115 batch->active = 1; in arch_enter_lazy_mmu_mode()
120 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); in arch_leave_lazy_mmu_mode() local
122 if (batch->index) in arch_leave_lazy_mmu_mode()
123 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode()
124 batch->active = 0; in arch_leave_lazy_mmu_mode()
/linux-3.4.99/arch/s390/mm/
Dpgtable.c749 struct mmu_table_batch *batch; in tlb_remove_table_rcu() local
752 batch = container_of(head, struct mmu_table_batch, rcu); in tlb_remove_table_rcu()
754 for (i = 0; i < batch->nr; i++) in tlb_remove_table_rcu()
755 __tlb_remove_table(batch->tables[i]); in tlb_remove_table_rcu()
757 free_page((unsigned long)batch); in tlb_remove_table_rcu()
762 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush() local
764 if (*batch) { in tlb_table_flush()
766 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); in tlb_table_flush()
767 *batch = NULL; in tlb_table_flush()
773 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table() local
[all …]
/linux-3.4.99/fs/xfs/
Dxfs_sync.c120 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_inode_ag_walk() local
126 (void **)batch, first_index, in xfs_inode_ag_walk()
138 struct xfs_inode *ip = batch[i]; in xfs_inode_ag_walk()
141 batch[i] = NULL; in xfs_inode_ag_walk()
166 if (!batch[i]) in xfs_inode_ag_walk()
168 error = execute(batch[i], pag, flags); in xfs_inode_ag_walk()
169 IRELE(batch[i]); in xfs_inode_ag_walk()
931 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_reclaim_inodes_ag() local
937 (void **)batch, first_index, in xfs_reclaim_inodes_ag()
951 struct xfs_inode *ip = batch[i]; in xfs_reclaim_inodes_ag()
[all …]
/linux-3.4.99/mm/
Dmemory.c177 struct mmu_gather_batch *batch; in tlb_next_batch() local
179 batch = tlb->active; in tlb_next_batch()
180 if (batch->next) { in tlb_next_batch()
181 tlb->active = batch->next; in tlb_next_batch()
188 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); in tlb_next_batch()
189 if (!batch) in tlb_next_batch()
193 batch->next = NULL; in tlb_next_batch()
194 batch->nr = 0; in tlb_next_batch()
195 batch->max = MAX_GATHER_BATCH; in tlb_next_batch()
197 tlb->active->next = batch; in tlb_next_batch()
[all …]
Dpage_alloc.c1117 if (pcp->count >= pcp->batch) in drain_zone_pages()
1118 to_drain = pcp->batch; in drain_zone_pages()
1287 free_pcppages_bulk(zone, pcp->batch, pcp); in free_hot_cold_page()
1288 pcp->count -= pcp->batch; in free_hot_cold_page()
1405 pcp->batch, list, in buffered_rmqueue()
2735 pageset->pcp.batch, pageset->pcp.count); in show_free_areas()
3352 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3703 int batch; in zone_batchsize() local
3711 batch = zone->present_pages / 1024; in zone_batchsize()
3712 if (batch * PAGE_SIZE > 512 * 1024) in zone_batchsize()
[all …]
Dmemcontrol.c2293 unsigned int batch = max(CHARGE_BATCH, nr_pages); in __mem_cgroup_try_charge() local
2381 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, oom_check); in __mem_cgroup_try_charge()
2386 batch = nr_pages; in __mem_cgroup_try_charge()
2407 if (batch > nr_pages) in __mem_cgroup_try_charge()
2408 refill_stock(memcg, batch - nr_pages); in __mem_cgroup_try_charge()
2904 struct memcg_batch_info *batch = NULL; in mem_cgroup_do_uncharge() local
2911 batch = &current->memcg_batch; in mem_cgroup_do_uncharge()
2917 if (!batch->memcg) in mem_cgroup_do_uncharge()
2918 batch->memcg = memcg; in mem_cgroup_do_uncharge()
2927 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE)) in mem_cgroup_do_uncharge()
[all …]
/linux-3.4.99/fs/
Daio.c449 static void kiocb_batch_init(struct kiocb_batch *batch, long total) in kiocb_batch_init() argument
451 INIT_LIST_HEAD(&batch->head); in kiocb_batch_init()
452 batch->count = total; in kiocb_batch_init()
455 static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch) in kiocb_batch_free() argument
459 if (list_empty(&batch->head)) in kiocb_batch_free()
463 list_for_each_entry_safe(req, n, &batch->head, ki_batch) { in kiocb_batch_free()
478 static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch) in kiocb_batch_refill() argument
486 to_alloc = min(batch->count, KIOCB_BATCH_SIZE); in kiocb_batch_refill()
492 list_add(&req->ki_batch, &batch->head); in kiocb_batch_refill()
522 list_for_each_entry_safe(req, n, &batch->head, ki_batch) { in kiocb_batch_refill()
[all …]
/linux-3.4.99/drivers/gpu/drm/i915/
Di915_dma.c453 drm_i915_batchbuffer_t * batch, in i915_dispatch_batchbuffer() argument
457 int nbox = batch->num_cliprects; in i915_dispatch_batchbuffer()
460 if ((batch->start | batch->used) & 0x7) { in i915_dispatch_batchbuffer()
471 batch->DR1, batch->DR4); in i915_dispatch_batchbuffer()
483 OUT_RING(batch->start); in i915_dispatch_batchbuffer()
486 OUT_RING(batch->start | MI_BATCH_NON_SECURE); in i915_dispatch_batchbuffer()
494 OUT_RING(batch->start | MI_BATCH_NON_SECURE); in i915_dispatch_batchbuffer()
495 OUT_RING(batch->start + batch->used - 4); in i915_dispatch_batchbuffer()
597 drm_i915_batchbuffer_t *batch = data; in i915_batchbuffer() local
607 batch->start, batch->used, batch->num_cliprects); in i915_batchbuffer()
[all …]
/linux-3.4.99/lib/
Dpercpu_counter.c74 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) in __percpu_counter_add() argument
80 if (count >= batch || count <= -batch) { in __percpu_counter_add()
/linux-3.4.99/arch/s390/include/asm/
Dtlb.h33 struct mmu_table_batch *batch; member
55 tlb->batch = NULL; in tlb_gather_mmu()
/linux-3.4.99/arch/powerpc/platforms/pseries/
Dlpar.c393 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); in pSeries_lpar_flush_hash_range() local
404 psize = batch->psize; in pSeries_lpar_flush_hash_range()
405 ssize = batch->ssize; in pSeries_lpar_flush_hash_range()
408 va = batch->vaddr[i]; in pSeries_lpar_flush_hash_range()
409 pte = batch->pte[i]; in pSeries_lpar_flush_hash_range()
/linux-3.4.99/include/linux/
Dpercpu_counter.h41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
126 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) in __percpu_counter_add() argument
Dshrinker.h34 long batch; /* reclaim batch size, 0 = default */ member
/linux-3.4.99/tools/vm/
Dpage-types.c646 unsigned long batch; in walk_pfn() local
651 batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); in walk_pfn()
652 pages = kpageflags_read(buf, index, batch); in walk_pfn()
668 unsigned long batch; in walk_vma() local
674 batch = min_t(unsigned long, count, PAGEMAP_BATCH); in walk_vma()
675 pages = pagemap_read(buf, index, batch); in walk_vma()
/linux-3.4.99/arch/powerpc/kernel/
Dprocess.c405 struct ppc64_tlb_batch *batch; in __switch_to() local
506 batch = &__get_cpu_var(ppc64_tlb_batch); in __switch_to()
507 if (batch->active) { in __switch_to()
509 if (batch->index) in __switch_to()
510 __flush_tlb_pending(batch); in __switch_to()
511 batch->active = 0; in __switch_to()
531 batch = &__get_cpu_var(ppc64_tlb_batch); in __switch_to()
532 batch->active = 1; in __switch_to()
/linux-3.4.99/include/asm-generic/
Dtlb.h95 struct mmu_table_batch *batch; member
/linux-3.4.99/Documentation/cgroups/
Dfreezer-subsystem.txt1 The cgroup freezer is useful to batch job management system which start
6 be started/stopped by the batch job management system. It also provides
Dcpuacct.txt49 due to the batch processing nature of percpu_counter.
/linux-3.4.99/block/
DKconfig.iosched21 a new point in the service tree and doing a batch of IO from there
/linux-3.4.99/drivers/target/iscsi/
Discsi_target_erl1.c1079 int batch = 0; in iscsit_handle_ooo_cmdsn() local
1087 batch = 1; in iscsit_handle_ooo_cmdsn()
1092 batch = 1; in iscsit_handle_ooo_cmdsn()
1100 ooo_cmdsn->batch_count = (batch) ? in iscsit_handle_ooo_cmdsn()
/linux-3.4.99/Documentation/trace/
Devents-kmem.txt60 When pages are freed in batch, the also mm_page_free_batched is triggered.
62 freed in batch with a page list. Significant amounts of activity here could

123