Lines Matching refs:e

78 	struct amdgpu_mux_entry *e = NULL;  in amdgpu_mux_resubmit_chunks()  local
89 e = &mux->ring_entry[i]; in amdgpu_mux_resubmit_chunks()
94 if (!e) { in amdgpu_mux_resubmit_chunks()
99 last_seq = atomic_read(&e->ring->fence_drv.last_seq); in amdgpu_mux_resubmit_chunks()
103 list_for_each_entry(chunk, &e->list, entry) { in amdgpu_mux_resubmit_chunks()
105 amdgpu_fence_update_start_timestamp(e->ring, in amdgpu_mux_resubmit_chunks()
109 le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) { in amdgpu_mux_resubmit_chunks()
110 if (chunk->cntl_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
111 amdgpu_ring_patch_cntl(e->ring, in amdgpu_mux_resubmit_chunks()
113 if (chunk->ce_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
114 amdgpu_ring_patch_ce(e->ring, chunk->ce_offset); in amdgpu_mux_resubmit_chunks()
115 if (chunk->de_offset <= e->ring->buf_mask) in amdgpu_mux_resubmit_chunks()
116 amdgpu_ring_patch_de(e->ring, chunk->de_offset); in amdgpu_mux_resubmit_chunks()
118 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring, in amdgpu_mux_resubmit_chunks()
178 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_fini() local
183 e = &mux->ring_entry[i]; in amdgpu_ring_mux_fini()
184 list_for_each_entry_safe(chunk, chunk2, &e->list, entry) { in amdgpu_ring_mux_fini()
198 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_add_sw_ring() local
205 e = &mux->ring_entry[mux->num_ring_entries]; in amdgpu_ring_mux_add_sw_ring()
207 e->ring = ring; in amdgpu_ring_mux_add_sw_ring()
209 INIT_LIST_HEAD(&e->list); in amdgpu_ring_mux_add_sw_ring()
216 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_set_wptr() local
223 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_set_wptr()
224 if (!e) { in amdgpu_ring_mux_set_wptr()
236 e->sw_cptr = e->sw_wptr; in amdgpu_ring_mux_set_wptr()
238 if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit) in amdgpu_ring_mux_set_wptr()
239 e->sw_cptr = mux->wptr_resubmit; in amdgpu_ring_mux_set_wptr()
240 e->sw_wptr = wptr; in amdgpu_ring_mux_set_wptr()
241 e->start_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
245 amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr); in amdgpu_ring_mux_set_wptr()
246 e->end_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
249 e->end_ptr_in_hw_ring = mux->real_ring->wptr; in amdgpu_ring_mux_set_wptr()
256 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_get_wptr() local
258 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_wptr()
259 if (!e) { in amdgpu_ring_mux_get_wptr()
264 return e->sw_wptr; in amdgpu_ring_mux_get_wptr()
285 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_get_rptr() local
288 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_get_rptr()
289 if (!e) { in amdgpu_ring_mux_get_rptr()
296 start = e->start_ptr_in_hw_ring & mux->real_ring->buf_mask; in amdgpu_ring_mux_get_rptr()
297 end = e->end_ptr_in_hw_ring & mux->real_ring->buf_mask; in amdgpu_ring_mux_get_rptr()
306 e->sw_rptr = (e->sw_cptr + offset) & ring->buf_mask; in amdgpu_ring_mux_get_rptr()
308 e->sw_rptr = e->sw_cptr; in amdgpu_ring_mux_get_rptr()
311 e->sw_rptr = e->sw_wptr; in amdgpu_ring_mux_get_rptr()
314 return e->sw_rptr; in amdgpu_ring_mux_get_rptr()
436 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_start_ib() local
443 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_start_ib()
444 if (!e) { in amdgpu_ring_mux_start_ib()
460 list_add_tail(&chunk->entry, &e->list); in amdgpu_ring_mux_start_ib()
466 struct amdgpu_mux_entry *e; in scan_and_remove_signaled_chunk() local
469 e = amdgpu_ring_mux_sw_entry(mux, ring); in scan_and_remove_signaled_chunk()
470 if (!e) { in scan_and_remove_signaled_chunk()
477 list_for_each_entry_safe(chunk, tmp, &e->list, entry) { in scan_and_remove_signaled_chunk()
489 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_ib_mark_offset() local
492 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_ib_mark_offset()
493 if (!e) { in amdgpu_ring_mux_ib_mark_offset()
498 chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry); in amdgpu_ring_mux_ib_mark_offset()
522 struct amdgpu_mux_entry *e; in amdgpu_ring_mux_end_ib() local
525 e = amdgpu_ring_mux_sw_entry(mux, ring); in amdgpu_ring_mux_end_ib()
526 if (!e) { in amdgpu_ring_mux_end_ib()
531 chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry); in amdgpu_ring_mux_end_ib()
545 struct amdgpu_mux_entry *e; in amdgpu_mcbp_handle_trailing_fence_irq() local
556 e = &mux->ring_entry[i]; in amdgpu_mcbp_handle_trailing_fence_irq()
557 if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) { in amdgpu_mcbp_handle_trailing_fence_irq()
558 ring = e->ring; in amdgpu_mcbp_handle_trailing_fence_irq()