/linux-6.6.21/drivers/gpu/drm/i915/ |
D | i915_request.c | 115 struct i915_request *rq = to_request(fence); in i915_fence_release() local 117 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && in i915_fence_release() 118 rq->guc_prio != GUC_PRIO_FINI); in i915_fence_release() 120 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); in i915_fence_release() 121 if (rq->batch_res) { in i915_fence_release() 122 i915_vma_resource_put(rq->batch_res); in i915_fence_release() 123 rq->batch_res = NULL; in i915_fence_release() 133 i915_sw_fence_fini(&rq->submit); in i915_fence_release() 134 i915_sw_fence_fini(&rq->semaphore); in i915_fence_release() 167 if (is_power_of_2(rq->execution_mask) && in i915_fence_release() [all …]
|
D | i915_request.h | 64 #define RQ_TRACE(rq, fmt, ...) do { \ argument 65 const struct i915_request *rq__ = (rq); \ 378 void __i915_request_skip(struct i915_request *rq); 379 bool i915_request_set_error_once(struct i915_request *rq, int error); 380 struct i915_request *i915_request_mark_eio(struct i915_request *rq); 383 void __i915_request_queue(struct i915_request *rq, 385 void __i915_request_queue_bh(struct i915_request *rq); 387 bool i915_request_retire(struct i915_request *rq); 388 void i915_request_retire_upto(struct i915_request *rq); 400 i915_request_get(struct i915_request *rq) in i915_request_get() argument [all …]
|
/linux-6.6.21/kernel/sched/ |
D | pelt.h | 7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); 13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument 15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg() 19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) in update_thermal_load_avg() argument 24 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument 31 int update_irq_load_avg(struct rq *rq, u64 running); 34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument 64 static inline u64 rq_clock_pelt(struct rq *rq) in rq_clock_pelt() argument [all …]
|
D | sched.h | 100 struct rq; 114 extern void calc_global_load_tick(struct rq *this_rq); 115 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 117 extern void call_trace_sched_update_nr_running(struct rq *rq, int count); 614 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member 695 struct rq *rq; member 896 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 949 struct rq; 952 void (*func)(struct rq *rq); 962 struct rq { struct [all …]
|
D | stats.h | 13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument 15 if (rq) { in rq_sched_info_arrive() 16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive() 17 rq->rq_sched_info.pcount++; in rq_sched_info_arrive() 25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument 27 if (rq) in rq_sched_info_depart() 28 rq->rq_cpu_time += delta; in rq_sched_info_depart() 32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument 34 if (rq) in rq_sched_info_dequeue() 35 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeue() [all …]
|
D | deadline.c | 62 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq() 64 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq() 70 struct rq *rq = task_rq(p); in dl_rq_of_se() local 72 return &rq->dl; in dl_rq_of_se() 177 struct rq *rq = cpu_rq(i); in __dl_update() local 179 rq->dl.extra_bw += bw; in __dl_update() 313 struct rq *rq; in dl_change_utilization() local 320 rq = task_rq(p); in dl_change_utilization() 322 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization() 334 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization() [all …]
|
D | stop_task.c | 19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument 21 return sched_stop_runnable(rq); in balance_stop() 26 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument 31 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument 33 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop() 36 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument 38 if (!sched_stop_runnable(rq)) in pick_task_stop() 41 return rq->stop; in pick_task_stop() 44 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument 46 struct task_struct *p = pick_task_stop(rq); in pick_next_task_stop() [all …]
|
D | rt.c | 179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() 181 return rt_rq->rq; in rq_of_rt_rq() 189 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se() 193 return rt_rq->rq; in rq_of_rt_se() 222 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local 226 rt_rq->rq = rq; in init_tg_rt_entry() 236 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry() 294 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() 296 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq() 299 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se() [all …]
|
D | core.c | 118 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 239 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument 241 rq->core->core_task_seq++; in sched_core_enqueue() 246 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue() 249 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument 251 rq->core->core_task_seq++; in sched_core_dequeue() 254 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue() 263 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue() 264 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue() 265 resched_curr(rq); in sched_core_dequeue() [all …]
|
/linux-6.6.21/drivers/scsi/fnic/ |
D | vnic_rq.c | 15 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument 18 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 22 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs() 23 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs() 30 buf = rq->bufs[i]; in vnic_rq_alloc_bufs() 33 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 34 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 36 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs() 39 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs() 47 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs() [all …]
|
D | vnic_rq.h | 93 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument 96 return rq->ring.desc_avail; in vnic_rq_desc_avail() 99 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument 102 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 105 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument 107 return rq->to_use->desc; in vnic_rq_next_desc() 110 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument 112 return rq->to_use->index; in vnic_rq_next_index() 115 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument 117 return rq->buf_index++; in vnic_rq_next_buf_index() [all …]
|
/linux-6.6.21/drivers/net/ethernet/cisco/enic/ |
D | vnic_rq.c | 18 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument 21 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 25 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_KERNEL); in vnic_rq_alloc_bufs() 26 if (!rq->bufs[i]) in vnic_rq_alloc_bufs() 31 buf = rq->bufs[i]; in vnic_rq_alloc_bufs() 34 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 35 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 37 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs() 40 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs() 48 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs() [all …]
|
D | vnic_rq.h | 84 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument 87 return rq->ring.desc_avail; in vnic_rq_desc_avail() 90 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument 93 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 96 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument 98 return rq->to_use->desc; in vnic_rq_next_desc() 101 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument 103 return rq->to_use->index; in vnic_rq_next_index() 106 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument 111 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post() [all …]
|
/linux-6.6.21/drivers/gpu/drm/i915/gt/ |
D | gen8_engine_cs.c | 13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_rcs() argument 42 if (GRAPHICS_VER(rq->i915) == 9) in gen8_emit_flush_rcs() 46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) in gen8_emit_flush_rcs() 58 cs = intel_ring_begin(rq, len); in gen8_emit_flush_rcs() 74 intel_ring_advance(rq, cs); in gen8_emit_flush_rcs() 79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) in gen8_emit_flush_xcs() argument 83 cs = intel_ring_begin(rq, 4); in gen8_emit_flush_xcs() 99 if (rq->engine->class == VIDEO_DECODE_CLASS) in gen8_emit_flush_xcs() 107 intel_ring_advance(rq, cs); in gen8_emit_flush_xcs() 112 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen11_emit_flush_rcs() argument [all …]
|
D | gen6_engine_cs.c | 55 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) in gen6_emit_post_sync_nonzero_flush() argument 58 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_post_sync_nonzero_flush() 62 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush() 72 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush() 74 cs = intel_ring_begin(rq, 6); in gen6_emit_post_sync_nonzero_flush() 84 intel_ring_advance(rq, cs); in gen6_emit_post_sync_nonzero_flush() 89 int gen6_emit_flush_rcs(struct i915_request *rq, u32 mode) in gen6_emit_flush_rcs() argument 92 intel_gt_scratch_offset(rq->engine->gt, in gen6_emit_flush_rcs() 98 ret = gen6_emit_post_sync_nonzero_flush(rq); in gen6_emit_flush_rcs() 130 cs = intel_ring_begin(rq, 4); in gen6_emit_flush_rcs() [all …]
|
D | intel_breadcrumbs.c | 106 check_signal_order(struct intel_context *ce, struct i915_request *rq) in check_signal_order() argument 108 if (rq->context != ce) in check_signal_order() 111 if (!list_is_last(&rq->signal_link, &ce->signals) && in check_signal_order() 112 i915_seqno_passed(rq->fence.seqno, in check_signal_order() 113 list_next_entry(rq, signal_link)->fence.seqno)) in check_signal_order() 116 if (!list_is_first(&rq->signal_link, &ce->signals) && in check_signal_order() 117 i915_seqno_passed(list_prev_entry(rq, signal_link)->fence.seqno, in check_signal_order() 118 rq->fence.seqno)) in check_signal_order() 207 struct i915_request *rq; in signal_irq_work() local 209 list_for_each_entry_rcu(rq, &ce->signals, signal_link) { in signal_irq_work() [all …]
|
D | selftest_execlists.c | 28 static bool is_active(struct i915_request *rq) in is_active() argument 30 if (i915_request_is_active(rq)) in is_active() 33 if (i915_request_on_hold(rq)) in is_active() 36 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) in is_active() 43 struct i915_request *rq, in wait_for_submit() argument 53 if (i915_request_completed(rq)) /* that was quick! */ in wait_for_submit() 58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit() 69 struct i915_request *rq, in wait_for_reset() argument 81 if (i915_request_completed(rq)) in wait_for_reset() 84 if (READ_ONCE(rq->fence.error)) in wait_for_reset() [all …]
|
/linux-6.6.21/include/linux/ |
D | blk-mq.h | 198 static inline bool blk_rq_is_passthrough(struct request *rq) in blk_rq_is_passthrough() argument 200 return blk_op_is_passthrough(rq->cmd_flags); in blk_rq_is_passthrough() 208 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) argument 210 #define rq_dma_dir(rq) \ argument 211 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 213 #define rq_list_add(listptr, rq) do { \ argument 214 (rq)->rq_next = *(listptr); \ 215 *(listptr) = rq; \ 218 #define rq_list_add_tail(lastpptr, rq) do { \ argument 219 (rq)->rq_next = NULL; \ [all …]
|
/linux-6.6.21/fs/erofs/ |
D | decompressor.c | 20 struct z_erofs_decompress_req *rq; member 68 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_prepare_dstpages() local 73 EROFS_SB(rq->sb)->lz4.max_distance_pages; in z_erofs_lz4_prepare_dstpages() 79 struct page *const page = rq->out[i]; in z_erofs_lz4_prepare_dstpages() 86 if (!rq->fillgaps && test_bit(j, bounced)) { in z_erofs_lz4_prepare_dstpages() 89 availables[top++] = rq->out[i - lz4_max_distance_pages]; in z_erofs_lz4_prepare_dstpages() 119 rq->out[i] = victim; in z_erofs_lz4_prepare_dstpages() 128 struct z_erofs_decompress_req *rq = ctx->rq; in z_erofs_lz4_handle_overlap() local 133 if (rq->inplace_io) { in z_erofs_lz4_handle_overlap() 135 if (rq->partial_decoding || !may_inplace || in z_erofs_lz4_handle_overlap() [all …]
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
D | rx.c | 19 int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) in mlx5e_xsk_alloc_rx_mpwqe() argument 21 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix); in mlx5e_xsk_alloc_rx_mpwqe() 22 struct mlx5e_icosq *icosq = rq->icosq; in mlx5e_xsk_alloc_rx_mpwqe() 30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe() 35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, in mlx5e_xsk_alloc_rx_mpwqe() 36 rq->mpwqe.pages_per_wqe); in mlx5e_xsk_alloc_rx_mpwqe() 44 for (; batch < rq->mpwqe.pages_per_wqe; batch++) { in mlx5e_xsk_alloc_rx_mpwqe() 45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe() 50 pi = mlx5e_icosq_get_next_pi(icosq, rq->mpwqe.umr_wqebbs); in mlx5e_xsk_alloc_rx_mpwqe() 52 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe)); in mlx5e_xsk_alloc_rx_mpwqe() [all …]
|
/linux-6.6.21/drivers/scsi/esas2r/ |
D | esas2r_disc.c | 49 struct esas2r_request *rq); 51 struct esas2r_request *rq); 55 struct esas2r_request *rq); 59 struct esas2r_request *rq); 61 struct esas2r_request *rq); 63 struct esas2r_request *rq); 65 struct esas2r_request *rq); 67 struct esas2r_request *rq); 69 struct esas2r_request *rq); 71 struct esas2r_request *rq); [all …]
|
D | esas2r_vda.c | 59 static void clear_vda_request(struct esas2r_request *rq); 62 struct esas2r_request *rq); 67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument 93 clear_vda_request(rq); in esas2r_process_vda_ioctl() 95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl() 96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl() 97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl() 112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl() 113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl() 115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl() [all …]
|
/linux-6.6.21/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rx.c | 64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, in mlx5e_read_enhanced_title_slot() argument 92 struct mlx5e_cq_decomp *cqd = &rq->cqd; in mlx5e_read_enhanced_title_slot() 97 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state))) in mlx5e_read_enhanced_title_slot() 100 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) in mlx5e_read_enhanced_title_slot() 105 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1); in mlx5e_read_enhanced_title_slot() [all …]
|
/linux-6.6.21/block/ |
D | blk-flush.c | 103 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) in blk_flush_policy() argument 107 if (blk_rq_sectors(rq)) in blk_flush_policy() 111 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy() 114 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy() 120 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument 122 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq() 125 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument 132 rq->bio = rq->biotail; in blk_flush_restore_request() 135 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request() 136 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() [all …]
|
D | blk-mq.c | 48 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags); 49 static void blk_mq_request_bypass_insert(struct request *rq, 92 static bool blk_mq_check_inflight(struct request *rq, void *priv) in blk_mq_check_inflight() argument 96 if (rq->part && blk_do_io_stat(rq) && in blk_mq_check_inflight() 97 (!mi->part->bd_partno || rq->part == mi->part) && in blk_mq_check_inflight() 98 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_inflight() 99 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight() 315 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument 317 memset(rq, 0, sizeof(*rq)); in blk_rq_init() 319 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init() [all …]
|