/linux-6.1.9/mm/ |
D | mmu_gather.c | 20 struct mmu_gather_batch *batch; in tlb_next_batch() local 22 batch = tlb->active; in tlb_next_batch() 23 if (batch->next) { in tlb_next_batch() 24 tlb->active = batch->next; in tlb_next_batch() 31 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); in tlb_next_batch() 32 if (!batch) in tlb_next_batch() 36 batch->next = NULL; in tlb_next_batch() 37 batch->nr = 0; in tlb_next_batch() 38 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 40 tlb->active->next = batch; in tlb_next_batch() [all …]
|
/linux-6.1.9/arch/powerpc/mm/book3s64/ |
D | hash_tlb.c | 44 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local 51 i = batch->index; in hpte_need_flush() 103 if (!batch->active) { in hpte_need_flush() 119 if (i != 0 && (mm != batch->mm || batch->psize != psize || in hpte_need_flush() 120 batch->ssize != ssize)) { in hpte_need_flush() 121 __flush_tlb_pending(batch); in hpte_need_flush() 125 batch->mm = mm; in hpte_need_flush() 126 batch->psize = psize; in hpte_need_flush() 127 batch->ssize = ssize; in hpte_need_flush() 129 batch->pte[i] = rpte; in hpte_need_flush() [all …]
|
/linux-6.1.9/drivers/gpu/drm/i915/selftests/ |
D | igt_spinner.c | 97 if (!spin->batch) { in igt_spinner_pin() 105 spin->batch = vaddr; in igt_spinner_pin() 147 u32 *batch; in igt_spinner_create_request() local 155 if (!spin->batch) { in igt_spinner_create_request() 176 batch = spin->batch; in igt_spinner_create_request() 179 *batch++ = MI_STORE_DWORD_IMM_GEN4; in igt_spinner_create_request() 180 *batch++ = lower_32_bits(hws_address(hws, rq)); in igt_spinner_create_request() 181 *batch++ = upper_32_bits(hws_address(hws, rq)); in igt_spinner_create_request() 183 *batch++ = MI_STORE_DWORD_IMM_GEN4; in igt_spinner_create_request() 184 *batch++ = 0; in igt_spinner_create_request() [all …]
|
D | i915_request.c | 990 struct i915_vma *batch) in empty_request() argument 1000 batch->node.start, in empty_request() 1001 batch->node.size, in empty_request() 1017 struct i915_vma *batch; in live_empty_request() local 1026 batch = empty_batch(i915); in live_empty_request() 1027 if (IS_ERR(batch)) in live_empty_request() 1028 return PTR_ERR(batch); in live_empty_request() 1043 request = empty_request(engine, batch); in live_empty_request() 1056 request = empty_request(engine, batch); in live_empty_request() 1086 i915_vma_unpin(batch); in live_empty_request() [all …]
|
/linux-6.1.9/drivers/gpu/drm/i915/gt/ |
D | gen7_renderclear.c | 235 gen7_emit_state_base_address(struct batch_chunk *batch, in gen7_emit_state_base_address() argument 238 u32 *cs = batch_alloc_items(batch, 0, 10); in gen7_emit_state_base_address() 242 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address() 244 *cs++ = (batch_addr(batch) + surface_state_base) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address() 246 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address() 248 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address() 250 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; in gen7_emit_state_base_address() 257 batch_advance(batch, cs); in gen7_emit_state_base_address() 261 gen7_emit_vfe_state(struct batch_chunk *batch, in gen7_emit_vfe_state() argument 267 u32 *cs = batch_alloc_items(batch, 32, 8); in gen7_emit_vfe_state() [all …]
|
D | intel_lrc.c | 1510 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) in gen8_emit_flush_coherentl3_wa() argument 1513 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; in gen8_emit_flush_coherentl3_wa() 1514 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa() 1515 *batch++ = intel_gt_scratch_offset(engine->gt, in gen8_emit_flush_coherentl3_wa() 1517 *batch++ = 0; in gen8_emit_flush_coherentl3_wa() 1519 *batch++ = MI_LOAD_REGISTER_IMM(1); in gen8_emit_flush_coherentl3_wa() 1520 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa() 1521 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; in gen8_emit_flush_coherentl3_wa() 1523 batch = gen8_emit_pipe_control(batch, in gen8_emit_flush_coherentl3_wa() 1528 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; in gen8_emit_flush_coherentl3_wa() [all …]
|
D | gen8_engine_cs.h | 52 __gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) in __gen8_emit_pipe_control() argument 54 memset(batch, 0, 6 * sizeof(u32)); in __gen8_emit_pipe_control() 56 batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0; in __gen8_emit_pipe_control() 57 batch[1] = flags1; in __gen8_emit_pipe_control() 58 batch[2] = offset; in __gen8_emit_pipe_control() 60 return batch + 6; in __gen8_emit_pipe_control() 63 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) in gen8_emit_pipe_control() argument 65 return __gen8_emit_pipe_control(batch, 0, flags, offset); in gen8_emit_pipe_control() 68 static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) in gen12_emit_pipe_control() argument 70 return __gen8_emit_pipe_control(batch, flags0, flags1, offset); in gen12_emit_pipe_control()
|
D | selftest_hangcheck.c | 38 u32 *batch; member 81 h->batch = vaddr; in hang_init() 128 u32 *batch; in hang_create_request() local 148 h->batch = vaddr; in hang_create_request() 186 batch = h->batch; in hang_create_request() 188 *batch++ = MI_STORE_DWORD_IMM_GEN4; in hang_create_request() 189 *batch++ = lower_32_bits(hws_address(hws, rq)); in hang_create_request() 190 *batch++ = upper_32_bits(hws_address(hws, rq)); in hang_create_request() 191 *batch++ = rq->fence.seqno; in hang_create_request() 192 *batch++ = MI_NOOP; in hang_create_request() [all …]
|
D | selftest_workarounds.c | 508 struct i915_vma *batch; in check_dirty_whitelist() local 517 batch = create_batch(ce->vm); in check_dirty_whitelist() 518 if (IS_ERR(batch)) { in check_dirty_whitelist() 519 err = PTR_ERR(batch); in check_dirty_whitelist() 546 err = i915_gem_object_lock(batch->obj, &ww); in check_dirty_whitelist() 552 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); in check_dirty_whitelist() 618 i915_gem_object_flush_map(batch->obj); in check_dirty_whitelist() 619 i915_gem_object_unpin_map(batch->obj); in check_dirty_whitelist() 635 err = i915_request_await_object(rq, batch->obj, false); in check_dirty_whitelist() 637 err = i915_vma_move_to_active(batch, rq, 0); in check_dirty_whitelist() [all …]
|
D | selftest_lrc.c | 972 struct i915_vma *batch; in store_context() local 976 batch = create_user_vma(ce->vm, SZ_64K); in store_context() 977 if (IS_ERR(batch)) in store_context() 978 return batch; in store_context() 980 cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC); in store_context() 982 i915_vma_put(batch); in store_context() 988 i915_gem_object_unpin_map(batch->obj); in store_context() 989 i915_vma_put(batch); in store_context() 1050 i915_gem_object_flush_map(batch->obj); in store_context() 1051 i915_gem_object_unpin_map(batch->obj); in store_context() [all …]
|
D | selftest_engine_cs.c | 134 struct i915_vma *batch; in perf_mi_bb_start() local 140 batch = create_empty_batch(ce); in perf_mi_bb_start() 141 if (IS_ERR(batch)) { in perf_mi_bb_start() 142 err = PTR_ERR(batch); in perf_mi_bb_start() 147 err = i915_vma_sync(batch); in perf_mi_bb_start() 150 i915_vma_put(batch); in perf_mi_bb_start() 168 batch->node.start, 8, in perf_mi_bb_start() 189 i915_vma_put(batch); in perf_mi_bb_start()
|
/linux-6.1.9/arch/powerpc/include/asm/book3s/64/ |
D | tlbflush-hash.h | 25 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 31 struct ppc64_tlb_batch *batch; in arch_enter_lazy_mmu_mode() local 40 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_enter_lazy_mmu_mode() 41 batch->active = 1; in arch_enter_lazy_mmu_mode() 46 struct ppc64_tlb_batch *batch; in arch_leave_lazy_mmu_mode() local 50 batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_leave_lazy_mmu_mode() 52 if (batch->index) in arch_leave_lazy_mmu_mode() 53 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode() 54 batch->active = 0; in arch_leave_lazy_mmu_mode()
|
/linux-6.1.9/drivers/gpu/drm/i915/gem/selftests/ |
D | igt_gem_utils.c | 116 struct i915_vma *batch; in igt_gpu_fill_dw() local 123 batch = igt_emit_store_dw(vma, offset, count, val); in igt_gpu_fill_dw() 124 if (IS_ERR(batch)) in igt_gpu_fill_dw() 125 return PTR_ERR(batch); in igt_gpu_fill_dw() 133 i915_vma_lock(batch); in igt_gpu_fill_dw() 134 err = i915_request_await_object(rq, batch->obj, false); in igt_gpu_fill_dw() 136 err = i915_vma_move_to_active(batch, rq, 0); in igt_gpu_fill_dw() 137 i915_vma_unlock(batch); in igt_gpu_fill_dw() 154 batch->node.start, batch->node.size, in igt_gpu_fill_dw() 162 i915_vma_unpin_and_release(&batch, 0); in igt_gpu_fill_dw()
|
D | i915_gem_client_blt.c | 103 struct i915_vma *batch; member 143 struct drm_i915_gem_object *batch) in prepare_blit() argument 145 const int ver = GRAPHICS_VER(to_i915(batch->base.dev)); in prepare_blit() 150 cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC); in prepare_blit() 254 i915_gem_object_flush_map(batch); in prepare_blit() 255 i915_gem_object_unpin_map(batch); in prepare_blit() 268 i915_vma_put(t->batch); in tiled_blits_destroy_buffers() 307 t->batch = __create_vma(t, PAGE_SIZE, false); in tiled_blits_create_buffers() 308 if (IS_ERR(t->batch)) in tiled_blits_create_buffers() 309 return PTR_ERR(t->batch); in tiled_blits_create_buffers() [all …]
|
/linux-6.1.9/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_mob.c | 237 struct vmw_otable_batch *batch) in vmw_otable_batch_setup() argument 241 struct vmw_otable *otables = batch->otables; in vmw_otable_batch_setup() 246 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 254 ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo); in vmw_otable_batch_setup() 259 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 260 if (!batch->otables[i].enabled) in vmw_otable_batch_setup() 263 ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, in vmw_otable_batch_setup() 274 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 275 if (batch->otables[i].enabled) in vmw_otable_batch_setup() 277 &batch->otables[i]); in vmw_otable_batch_setup() [all …]
|
/linux-6.1.9/drivers/xen/ |
D | gntdev.c | 799 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, in gntdev_get_page() argument 807 ret = pin_user_pages_fast(addr, 1, batch->writeable ? FOLL_WRITE : 0, &page); in gntdev_get_page() 811 batch->pages[batch->nr_pages++] = page; in gntdev_get_page() 819 static void gntdev_put_pages(struct gntdev_copy_batch *batch) in gntdev_put_pages() argument 821 unpin_user_pages_dirty_lock(batch->pages, batch->nr_pages, batch->writeable); in gntdev_put_pages() 822 batch->nr_pages = 0; in gntdev_put_pages() 823 batch->writeable = false; in gntdev_put_pages() 826 static int gntdev_copy(struct gntdev_copy_batch *batch) in gntdev_copy() argument 830 gnttab_batch_copy(batch->ops, batch->nr_ops); in gntdev_copy() 831 gntdev_put_pages(batch); in gntdev_copy() [all …]
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | rx.c | 17 int batch, i; in mlx5e_xsk_alloc_rx_mpwqe() local 25 batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units, in mlx5e_xsk_alloc_rx_mpwqe() 34 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe() 35 wi->alloc_units[batch].xsk = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe() 36 if (unlikely(!wi->alloc_units[batch].xsk)) in mlx5e_xsk_alloc_rx_mpwqe() 45 for (i = 0; i < batch; i++) { in mlx5e_xsk_alloc_rx_mpwqe() 53 for (i = 0; i < batch; i++) { in mlx5e_xsk_alloc_rx_mpwqe() 64 for (i = 0; i < batch; i++) { in mlx5e_xsk_alloc_rx_mpwqe() 89 for (i = 0; i < batch; i++) { in mlx5e_xsk_alloc_rx_mpwqe() 134 while (--batch >= 0) in mlx5e_xsk_alloc_rx_mpwqe() [all …]
|
/linux-6.1.9/tools/testing/selftests/bpf/map_tests/ |
D | htab_map_batch_ops.c | 79 __u32 batch, count, total, total_success; in __test_map_lookup_and_delete_batch() local 109 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 119 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 127 err = bpf_map_lookup_and_delete_batch(map_fd, NULL, &batch, keys, in __test_map_lookup_and_delete_batch() 153 total ? &batch : NULL, in __test_map_lookup_and_delete_batch() 154 &batch, keys + total, in __test_map_lookup_and_delete_batch() 216 total ? &batch : NULL, in __test_map_lookup_and_delete_batch() 217 &batch, keys + total, in __test_map_lookup_and_delete_batch()
|
/linux-6.1.9/tools/virtio/ |
D | virtio_test.c | 170 bool delayed, int batch, int reset_n, int bufs) in run_test() argument 178 const bool random_batch = batch == RANDOM_BATCH; in run_test() 193 batch = (random() % vq->vring.num) + 1; in run_test() 196 (started - completed) < batch) { in run_test() 347 long batch = 1, reset = 0; in main() local 376 batch = RANDOM_BATCH; in main() 378 batch = strtol(optarg, NULL, 10); in main() 379 assert(batch > 0); in main() 380 assert(batch < (long)INT_MAX + 1); in main() 401 run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000); in main()
|
/linux-6.1.9/drivers/net/ethernet/netronome/nfp/flower/ |
D | lag_conf.c | 204 unsigned int member_cnt, enum nfp_fl_lag_batch *batch) in nfp_fl_lag_config_group() argument 224 if (*batch == NFP_FL_LAG_BATCH_FIRST) { in nfp_fl_lag_config_group() 227 *batch = NFP_FL_LAG_BATCH_MEMBER; in nfp_fl_lag_config_group() 233 *batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_config_group() 239 if (*batch == NFP_FL_LAG_BATCH_FINISHED) { in nfp_fl_lag_config_group() 266 enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST; in nfp_fl_lag_do_work() local 288 &batch); in nfp_fl_lag_do_work() 357 active_count, &batch); in nfp_fl_lag_do_work() 371 if (batch == NFP_FL_LAG_BATCH_MEMBER) { in nfp_fl_lag_do_work() 372 batch = NFP_FL_LAG_BATCH_FINISHED; in nfp_fl_lag_do_work() [all …]
|
/linux-6.1.9/tools/virtio/ringtest/ |
D | main.c | 22 int batch = 1; variable 116 int tokick = batch; in run_guest() 129 tokick = batch; in run_guest() 348 batch = c; in main() 372 if (batch > max_outstanding) in main() 373 batch = max_outstanding; in main()
|
/linux-6.1.9/net/core/ |
D | netclassid_cgroup.c | 64 unsigned int batch; member 76 if (--ctx->batch == 0) { in update_classid_sock() 77 ctx->batch = UPDATE_CLASSID_BATCH; in update_classid_sock() 87 .batch = UPDATE_CLASSID_BATCH in update_classid_task()
|
/linux-6.1.9/drivers/vfio/ |
D | vfio_iommu_type1.c | 468 static void vfio_batch_init(struct vfio_batch *batch) in vfio_batch_init() argument 470 batch->size = 0; in vfio_batch_init() 471 batch->offset = 0; in vfio_batch_init() 476 batch->pages = (struct page **) __get_free_page(GFP_KERNEL); in vfio_batch_init() 477 if (!batch->pages) in vfio_batch_init() 480 batch->capacity = VFIO_BATCH_MAX_CAPACITY; in vfio_batch_init() 484 batch->pages = &batch->fallback_page; in vfio_batch_init() 485 batch->capacity = 1; in vfio_batch_init() 488 static void vfio_batch_unpin(struct vfio_batch *batch, struct vfio_dma *dma) in vfio_batch_unpin() argument 490 while (batch->size) { in vfio_batch_unpin() [all …]
|
/linux-6.1.9/include/linux/ |
D | ptr_ring.h | 39 int batch; /* number of entries to consume in a batch */ member 272 if (unlikely(consumer_head - r->consumer_tail >= r->batch || in __ptr_ring_discard_one() 477 r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); in __ptr_ring_set_size() 483 if (r->batch > r->size / 2 || !r->batch) in __ptr_ring_set_size() 484 r->batch = 1; in __ptr_ring_set_size() 511 static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, in ptr_ring_unconsume() argument 544 r->queue[head] = batch[--n]; in ptr_ring_unconsume() 553 destroy(batch[--n]); in ptr_ring_unconsume()
|
D | percpu_counter.h | 47 s32 batch); 49 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 147 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 168 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) in percpu_counter_add_batch() argument
|