Lines Matching refs:ce
180 static inline void init_sched_state(struct intel_context *ce) in init_sched_state() argument
182 lockdep_assert_held(&ce->guc_state.lock); in init_sched_state()
183 ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK; in init_sched_state()
197 static bool sched_state_is_init(struct intel_context *ce) in sched_state_is_init() argument
199 return !(ce->guc_state.sched_state & ~SCHED_STATE_VALID_INIT); in sched_state_is_init()
203 context_wait_for_deregister_to_register(struct intel_context *ce) in context_wait_for_deregister_to_register() argument
205 return ce->guc_state.sched_state & in context_wait_for_deregister_to_register()
210 set_context_wait_for_deregister_to_register(struct intel_context *ce) in set_context_wait_for_deregister_to_register() argument
212 lockdep_assert_held(&ce->guc_state.lock); in set_context_wait_for_deregister_to_register()
213 ce->guc_state.sched_state |= in set_context_wait_for_deregister_to_register()
218 clr_context_wait_for_deregister_to_register(struct intel_context *ce) in clr_context_wait_for_deregister_to_register() argument
220 lockdep_assert_held(&ce->guc_state.lock); in clr_context_wait_for_deregister_to_register()
221 ce->guc_state.sched_state &= in clr_context_wait_for_deregister_to_register()
226 context_destroyed(struct intel_context *ce) in context_destroyed() argument
228 return ce->guc_state.sched_state & SCHED_STATE_DESTROYED; in context_destroyed()
232 set_context_destroyed(struct intel_context *ce) in set_context_destroyed() argument
234 lockdep_assert_held(&ce->guc_state.lock); in set_context_destroyed()
235 ce->guc_state.sched_state |= SCHED_STATE_DESTROYED; in set_context_destroyed()
238 static inline bool context_pending_disable(struct intel_context *ce) in context_pending_disable() argument
240 return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE; in context_pending_disable()
243 static inline void set_context_pending_disable(struct intel_context *ce) in set_context_pending_disable() argument
245 lockdep_assert_held(&ce->guc_state.lock); in set_context_pending_disable()
246 ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE; in set_context_pending_disable()
249 static inline void clr_context_pending_disable(struct intel_context *ce) in clr_context_pending_disable() argument
251 lockdep_assert_held(&ce->guc_state.lock); in clr_context_pending_disable()
252 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE; in clr_context_pending_disable()
255 static inline bool context_banned(struct intel_context *ce) in context_banned() argument
257 return ce->guc_state.sched_state & SCHED_STATE_BANNED; in context_banned()
260 static inline void set_context_banned(struct intel_context *ce) in set_context_banned() argument
262 lockdep_assert_held(&ce->guc_state.lock); in set_context_banned()
263 ce->guc_state.sched_state |= SCHED_STATE_BANNED; in set_context_banned()
266 static inline void clr_context_banned(struct intel_context *ce) in clr_context_banned() argument
268 lockdep_assert_held(&ce->guc_state.lock); in clr_context_banned()
269 ce->guc_state.sched_state &= ~SCHED_STATE_BANNED; in clr_context_banned()
272 static inline bool context_enabled(struct intel_context *ce) in context_enabled() argument
274 return ce->guc_state.sched_state & SCHED_STATE_ENABLED; in context_enabled()
277 static inline void set_context_enabled(struct intel_context *ce) in set_context_enabled() argument
279 lockdep_assert_held(&ce->guc_state.lock); in set_context_enabled()
280 ce->guc_state.sched_state |= SCHED_STATE_ENABLED; in set_context_enabled()
283 static inline void clr_context_enabled(struct intel_context *ce) in clr_context_enabled() argument
285 lockdep_assert_held(&ce->guc_state.lock); in clr_context_enabled()
286 ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED; in clr_context_enabled()
289 static inline bool context_pending_enable(struct intel_context *ce) in context_pending_enable() argument
291 return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE; in context_pending_enable()
294 static inline void set_context_pending_enable(struct intel_context *ce) in set_context_pending_enable() argument
296 lockdep_assert_held(&ce->guc_state.lock); in set_context_pending_enable()
297 ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE; in set_context_pending_enable()
300 static inline void clr_context_pending_enable(struct intel_context *ce) in clr_context_pending_enable() argument
302 lockdep_assert_held(&ce->guc_state.lock); in clr_context_pending_enable()
303 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE; in clr_context_pending_enable()
306 static inline bool context_registered(struct intel_context *ce) in context_registered() argument
308 return ce->guc_state.sched_state & SCHED_STATE_REGISTERED; in context_registered()
311 static inline void set_context_registered(struct intel_context *ce) in set_context_registered() argument
313 lockdep_assert_held(&ce->guc_state.lock); in set_context_registered()
314 ce->guc_state.sched_state |= SCHED_STATE_REGISTERED; in set_context_registered()
317 static inline void clr_context_registered(struct intel_context *ce) in clr_context_registered() argument
319 lockdep_assert_held(&ce->guc_state.lock); in clr_context_registered()
320 ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED; in clr_context_registered()
323 static inline bool context_policy_required(struct intel_context *ce) in context_policy_required() argument
325 return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED; in context_policy_required()
328 static inline void set_context_policy_required(struct intel_context *ce) in set_context_policy_required() argument
330 lockdep_assert_held(&ce->guc_state.lock); in set_context_policy_required()
331 ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED; in set_context_policy_required()
334 static inline void clr_context_policy_required(struct intel_context *ce) in clr_context_policy_required() argument
336 lockdep_assert_held(&ce->guc_state.lock); in clr_context_policy_required()
337 ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED; in clr_context_policy_required()
340 static inline bool context_close_done(struct intel_context *ce) in context_close_done() argument
342 return ce->guc_state.sched_state & SCHED_STATE_CLOSED; in context_close_done()
345 static inline void set_context_close_done(struct intel_context *ce) in set_context_close_done() argument
347 lockdep_assert_held(&ce->guc_state.lock); in set_context_close_done()
348 ce->guc_state.sched_state |= SCHED_STATE_CLOSED; in set_context_close_done()
351 static inline u32 context_blocked(struct intel_context *ce) in context_blocked() argument
353 return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >> in context_blocked()
357 static inline void incr_context_blocked(struct intel_context *ce) in incr_context_blocked() argument
359 lockdep_assert_held(&ce->guc_state.lock); in incr_context_blocked()
361 ce->guc_state.sched_state += SCHED_STATE_BLOCKED; in incr_context_blocked()
363 GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */ in incr_context_blocked()
366 static inline void decr_context_blocked(struct intel_context *ce) in decr_context_blocked() argument
368 lockdep_assert_held(&ce->guc_state.lock); in decr_context_blocked()
370 GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */ in decr_context_blocked()
372 ce->guc_state.sched_state -= SCHED_STATE_BLOCKED; in decr_context_blocked()
381 static inline bool context_guc_id_invalid(struct intel_context *ce) in context_guc_id_invalid() argument
383 return ce->guc_id.id == GUC_INVALID_CONTEXT_ID; in context_guc_id_invalid()
386 static inline void set_context_guc_id_invalid(struct intel_context *ce) in set_context_guc_id_invalid() argument
388 ce->guc_id.id = GUC_INVALID_CONTEXT_ID; in set_context_guc_id_invalid()
391 static inline struct intel_guc *ce_to_guc(struct intel_context *ce) in ce_to_guc() argument
393 return &ce->engine->gt->uc.guc; in ce_to_guc()
441 static u32 __get_parent_scratch_offset(struct intel_context *ce) in __get_parent_scratch_offset() argument
443 GEM_BUG_ON(!ce->parallel.guc.parent_page); in __get_parent_scratch_offset()
445 return ce->parallel.guc.parent_page * PAGE_SIZE; in __get_parent_scratch_offset()
448 static u32 __get_wq_offset(struct intel_context *ce) in __get_wq_offset() argument
452 return __get_parent_scratch_offset(ce) + WQ_OFFSET; in __get_wq_offset()
456 __get_parent_scratch(struct intel_context *ce) in __get_parent_scratch() argument
467 (ce->lrc_reg_state + in __get_parent_scratch()
468 ((__get_parent_scratch_offset(ce) - in __get_parent_scratch()
473 __get_process_desc_v69(struct intel_context *ce) in __get_process_desc_v69() argument
475 struct parent_scratch *ps = __get_parent_scratch(ce); in __get_process_desc_v69()
481 __get_wq_desc_v70(struct intel_context *ce) in __get_wq_desc_v70() argument
483 struct parent_scratch *ps = __get_parent_scratch(ce); in __get_wq_desc_v70()
488 static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size) in get_wq_pointer() argument
496 CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) in get_wq_pointer()
498 ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head); in get_wq_pointer()
505 return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)]; in get_wq_pointer()
510 struct intel_context *ce = xa_load(&guc->context_lookup, id); in __get_context() local
514 return ce; in __get_context()
572 struct intel_context *ce) in set_ctx_id_mapping() argument
581 __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC); in set_ctx_id_mapping()
679 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
680 static int try_context_registration(struct intel_context *ce, bool loop);
685 struct intel_context *ce = request_to_scheduling_context(rq); in __guc_add_request() local
697 if (unlikely(!intel_context_is_schedulable(ce))) { in __guc_add_request()
699 intel_engine_signal_breadcrumbs(ce->engine); in __guc_add_request()
703 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); in __guc_add_request()
704 GEM_BUG_ON(context_guc_id_invalid(ce)); in __guc_add_request()
706 if (context_policy_required(ce)) { in __guc_add_request()
707 err = guc_context_policy_init_v70(ce, false); in __guc_add_request()
712 spin_lock(&ce->guc_state.lock); in __guc_add_request()
719 if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce))) in __guc_add_request()
722 enabled = context_enabled(ce) || context_blocked(ce); in __guc_add_request()
726 action[len++] = ce->guc_id.id; in __guc_add_request()
728 set_context_pending_enable(ce); in __guc_add_request()
729 intel_context_get(ce); in __guc_add_request()
733 action[len++] = ce->guc_id.id; in __guc_add_request()
738 trace_intel_context_sched_enable(ce); in __guc_add_request()
740 set_context_enabled(ce); in __guc_add_request()
749 if (intel_context_is_parent(ce)) { in __guc_add_request()
754 clr_context_pending_enable(ce); in __guc_add_request()
755 intel_context_put(ce); in __guc_add_request()
761 spin_unlock(&ce->guc_state.lock); in __guc_add_request()
800 static u32 wq_space_until_wrap(struct intel_context *ce) in wq_space_until_wrap() argument
802 return (WQ_SIZE - ce->parallel.guc.wqi_tail); in wq_space_until_wrap()
805 static void write_wqi(struct intel_context *ce, u32 wqi_size) in write_wqi() argument
812 intel_guc_write_barrier(ce_to_guc(ce)); in write_wqi()
814 ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) & in write_wqi()
816 WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail); in write_wqi()
819 static int guc_wq_noop_append(struct intel_context *ce) in guc_wq_noop_append() argument
821 u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce)); in guc_wq_noop_append()
822 u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1; in guc_wq_noop_append()
831 ce->parallel.guc.wqi_tail = 0; in guc_wq_noop_append()
838 struct intel_context *ce = request_to_scheduling_context(rq); in __guc_wq_item_append() local
840 unsigned int wqi_size = (ce->parallel.number_children + 4) * in __guc_wq_item_append()
847 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref)); in __guc_wq_item_append()
848 GEM_BUG_ON(context_guc_id_invalid(ce)); in __guc_wq_item_append()
849 GEM_BUG_ON(context_wait_for_deregister_to_register(ce)); in __guc_wq_item_append()
850 GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)); in __guc_wq_item_append()
853 if (wqi_size > wq_space_until_wrap(ce)) { in __guc_wq_item_append()
854 ret = guc_wq_noop_append(ce); in __guc_wq_item_append()
859 wqi = get_wq_pointer(ce, wqi_size); in __guc_wq_item_append()
867 *wqi++ = ce->lrc.lrca; in __guc_wq_item_append()
868 *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) | in __guc_wq_item_append()
869 FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64)); in __guc_wq_item_append()
871 for_each_child(ce, child) in __guc_wq_item_append()
874 write_wqi(ce, wqi_size); in __guc_wq_item_append()
882 struct intel_context *ce = request_to_scheduling_context(rq); in guc_wq_item_append() local
885 if (unlikely(!intel_context_is_schedulable(ce))) in guc_wq_item_append()
899 struct intel_context *ce = request_to_scheduling_context(rq); in multi_lrc_submit() local
910 !intel_context_is_schedulable(ce); in multi_lrc_submit()
976 struct intel_context *ce = request_to_scheduling_context(last); in guc_dequeue_one_context() local
978 if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) && in guc_dequeue_one_context()
979 intel_context_is_schedulable(ce))) { in guc_dequeue_one_context()
980 ret = try_context_registration(ce, false); in guc_dequeue_one_context()
1057 static void __guc_context_destroy(struct intel_context *ce);
1058 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
1059 static void guc_signal_context_fence(struct intel_context *ce);
1060 static void guc_cancel_context_requests(struct intel_context *ce);
1061 static void guc_blocked_fence_complete(struct intel_context *ce);
1065 struct intel_context *ce; in scrub_guc_desc_for_outstanding_g2h() local
1070 xa_for_each(&guc->context_lookup, index, ce) { in scrub_guc_desc_for_outstanding_g2h()
1076 bool do_put = kref_get_unless_zero(&ce->ref); in scrub_guc_desc_for_outstanding_g2h()
1080 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) && in scrub_guc_desc_for_outstanding_g2h()
1081 (cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))) { in scrub_guc_desc_for_outstanding_g2h()
1083 intel_context_sched_disable_unpin(ce); in scrub_guc_desc_for_outstanding_g2h()
1086 spin_lock(&ce->guc_state.lock); in scrub_guc_desc_for_outstanding_g2h()
1095 destroyed = context_destroyed(ce); in scrub_guc_desc_for_outstanding_g2h()
1096 pending_enable = context_pending_enable(ce); in scrub_guc_desc_for_outstanding_g2h()
1097 pending_disable = context_pending_disable(ce); in scrub_guc_desc_for_outstanding_g2h()
1098 deregister = context_wait_for_deregister_to_register(ce); in scrub_guc_desc_for_outstanding_g2h()
1099 banned = context_banned(ce); in scrub_guc_desc_for_outstanding_g2h()
1100 init_sched_state(ce); in scrub_guc_desc_for_outstanding_g2h()
1102 spin_unlock(&ce->guc_state.lock); in scrub_guc_desc_for_outstanding_g2h()
1107 guc_signal_context_fence(ce); in scrub_guc_desc_for_outstanding_g2h()
1110 release_guc_id(guc, ce); in scrub_guc_desc_for_outstanding_g2h()
1111 __guc_context_destroy(ce); in scrub_guc_desc_for_outstanding_g2h()
1114 intel_context_put(ce); in scrub_guc_desc_for_outstanding_g2h()
1119 guc_signal_context_fence(ce); in scrub_guc_desc_for_outstanding_g2h()
1121 guc_cancel_context_requests(ce); in scrub_guc_desc_for_outstanding_g2h()
1122 intel_engine_signal_breadcrumbs(ce->engine); in scrub_guc_desc_for_outstanding_g2h()
1124 intel_context_sched_disable_unpin(ce); in scrub_guc_desc_for_outstanding_g2h()
1127 spin_lock(&ce->guc_state.lock); in scrub_guc_desc_for_outstanding_g2h()
1128 guc_blocked_fence_complete(ce); in scrub_guc_desc_for_outstanding_g2h()
1129 spin_unlock(&ce->guc_state.lock); in scrub_guc_desc_for_outstanding_g2h()
1131 intel_context_put(ce); in scrub_guc_desc_for_outstanding_g2h()
1135 intel_context_put(ce); in scrub_guc_desc_for_outstanding_g2h()
1405 static void __guc_context_update_stats(struct intel_context *ce) in __guc_context_update_stats() argument
1407 struct intel_guc *guc = ce_to_guc(ce); in __guc_context_update_stats()
1411 lrc_update_runtime(ce); in __guc_context_update_stats()
1415 static void guc_context_update_stats(struct intel_context *ce) in guc_context_update_stats() argument
1417 if (!intel_context_pin_if_active(ce)) in guc_context_update_stats()
1420 __guc_context_update_stats(ce); in guc_context_update_stats()
1421 intel_context_unpin(ce); in guc_context_update_stats()
1430 struct intel_context *ce; in guc_timestamp_ping() local
1478 xa_for_each(&guc->context_lookup, index, ce) in guc_timestamp_ping()
1479 guc_context_update_stats(ce); in guc_timestamp_ping()
1653 __context_to_physical_engine(struct intel_context *ce) in __context_to_physical_engine() argument
1655 struct intel_engine_cs *engine = ce->engine; in __context_to_physical_engine()
1663 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub) in guc_reset_state() argument
1665 struct intel_engine_cs *engine = __context_to_physical_engine(ce); in guc_reset_state()
1667 if (!intel_context_is_schedulable(ce)) in guc_reset_state()
1670 GEM_BUG_ON(!intel_context_is_pinned(ce)); in guc_reset_state()
1681 lrc_init_regs(ce, engine, true); in guc_reset_state()
1684 lrc_update_regs(ce, engine, head); in guc_reset_state()
1710 __unwind_incomplete_requests(struct intel_context *ce) in __unwind_incomplete_requests() argument
1716 ce->engine->sched_engine; in __unwind_incomplete_requests()
1720 spin_lock(&ce->guc_state.lock); in __unwind_incomplete_requests()
1722 &ce->guc_state.requests, in __unwind_incomplete_requests()
1741 spin_unlock(&ce->guc_state.lock); in __unwind_incomplete_requests()
1745 static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled) in __guc_reset_context() argument
1751 int i, number_children = ce->parallel.number_children; in __guc_reset_context()
1752 struct intel_context *parent = ce; in __guc_reset_context()
1754 GEM_BUG_ON(intel_context_is_child(ce)); in __guc_reset_context()
1756 intel_context_get(ce); in __guc_reset_context()
1763 spin_lock_irqsave(&ce->guc_state.lock, flags); in __guc_reset_context()
1764 clr_context_enabled(ce); in __guc_reset_context()
1765 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in __guc_reset_context()
1772 if (!intel_context_is_pinned(ce)) in __guc_reset_context()
1776 rq = intel_context_get_active_request(ce); in __guc_reset_context()
1778 head = ce->ring->tail; in __guc_reset_context()
1783 guilty = stalled & ce->engine->mask; in __guc_reset_context()
1785 GEM_BUG_ON(i915_active_is_idle(&ce->active)); in __guc_reset_context()
1786 head = intel_ring_wrap(ce->ring, rq->head); in __guc_reset_context()
1791 guc_reset_state(ce, head, guilty); in __guc_reset_context()
1794 ce = list_next_entry(ce, parallel.child_link); in __guc_reset_context()
1803 struct intel_context *ce; in intel_guc_submission_reset() local
1813 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_submission_reset()
1814 if (!kref_get_unless_zero(&ce->ref)) in intel_guc_submission_reset()
1819 if (intel_context_is_pinned(ce) && in intel_guc_submission_reset()
1820 !intel_context_is_child(ce)) in intel_guc_submission_reset()
1821 __guc_reset_context(ce, stalled); in intel_guc_submission_reset()
1823 intel_context_put(ce); in intel_guc_submission_reset()
1833 static void guc_cancel_context_requests(struct intel_context *ce) in guc_cancel_context_requests() argument
1835 struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine; in guc_cancel_context_requests()
1841 spin_lock(&ce->guc_state.lock); in guc_cancel_context_requests()
1842 list_for_each_entry(rq, &ce->guc_state.requests, sched.link) in guc_cancel_context_requests()
1844 spin_unlock(&ce->guc_state.lock); in guc_cancel_context_requests()
1901 struct intel_context *ce; in intel_guc_submission_cancel_requests() local
1906 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_submission_cancel_requests()
1907 if (!kref_get_unless_zero(&ce->ref)) in intel_guc_submission_cancel_requests()
1912 if (intel_context_is_pinned(ce) && in intel_guc_submission_cancel_requests()
1913 !intel_context_is_child(ce)) in intel_guc_submission_cancel_requests()
1914 guc_cancel_context_requests(ce); in intel_guc_submission_cancel_requests()
1916 intel_context_put(ce); in intel_guc_submission_cancel_requests()
2042 struct intel_context *ce = request_to_scheduling_context(rq); in need_tasklet() local
2046 !ctx_id_mapped(guc, ce->guc_id.id); in need_tasklet()
2066 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce) in new_guc_id() argument
2070 GEM_BUG_ON(intel_context_is_child(ce)); in new_guc_id()
2072 if (intel_context_is_parent(ce)) in new_guc_id()
2075 order_base_2(ce->parallel.number_children in new_guc_id()
2086 if (!intel_context_is_parent(ce)) in new_guc_id()
2089 ce->guc_id.id = ret; in new_guc_id()
2093 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce) in __release_guc_id() argument
2095 GEM_BUG_ON(intel_context_is_child(ce)); in __release_guc_id()
2097 if (!context_guc_id_invalid(ce)) { in __release_guc_id()
2098 if (intel_context_is_parent(ce)) { in __release_guc_id()
2100 ce->guc_id.id, in __release_guc_id()
2101 order_base_2(ce->parallel.number_children in __release_guc_id()
2106 ce->guc_id.id); in __release_guc_id()
2108 clr_ctx_id_mapping(guc, ce->guc_id.id); in __release_guc_id()
2109 set_context_guc_id_invalid(ce); in __release_guc_id()
2111 if (!list_empty(&ce->guc_id.link)) in __release_guc_id()
2112 list_del_init(&ce->guc_id.link); in __release_guc_id()
2115 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce) in release_guc_id() argument
2120 __release_guc_id(guc, ce); in release_guc_id()
2124 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce) in steal_guc_id() argument
2129 GEM_BUG_ON(intel_context_is_child(ce)); in steal_guc_id()
2130 GEM_BUG_ON(intel_context_is_parent(ce)); in steal_guc_id()
2143 ce->guc_id.id = cn->guc_id.id; in steal_guc_id()
2161 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce) in assign_guc_id() argument
2166 GEM_BUG_ON(intel_context_is_child(ce)); in assign_guc_id()
2168 ret = new_guc_id(guc, ce); in assign_guc_id()
2170 if (intel_context_is_parent(ce)) in assign_guc_id()
2173 ret = steal_guc_id(guc, ce); in assign_guc_id()
2178 if (intel_context_is_parent(ce)) { in assign_guc_id()
2182 for_each_child(ce, child) in assign_guc_id()
2183 child->guc_id.id = ce->guc_id.id + i++; in assign_guc_id()
2190 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce) in pin_guc_id() argument
2195 GEM_BUG_ON(atomic_read(&ce->guc_id.ref)); in pin_guc_id()
2200 might_lock(&ce->guc_state.lock); in pin_guc_id()
2202 if (context_guc_id_invalid(ce)) { in pin_guc_id()
2203 ret = assign_guc_id(guc, ce); in pin_guc_id()
2208 if (!list_empty(&ce->guc_id.link)) in pin_guc_id()
2209 list_del_init(&ce->guc_id.link); in pin_guc_id()
2210 atomic_inc(&ce->guc_id.ref); in pin_guc_id()
2226 ce->engine->props.timeslice_duration_ms << in pin_guc_id()
2240 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce) in unpin_guc_id() argument
2244 GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0); in unpin_guc_id()
2245 GEM_BUG_ON(intel_context_is_child(ce)); in unpin_guc_id()
2247 if (unlikely(context_guc_id_invalid(ce) || in unpin_guc_id()
2248 intel_context_is_parent(ce))) in unpin_guc_id()
2252 if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) && in unpin_guc_id()
2253 !atomic_read(&ce->guc_id.ref)) in unpin_guc_id()
2254 list_add_tail(&ce->guc_id.link, in unpin_guc_id()
2260 struct intel_context *ce, in __guc_action_register_multi_lrc_v69() argument
2269 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE); in __guc_action_register_multi_lrc_v69()
2273 action[len++] = ce->parallel.number_children + 1; in __guc_action_register_multi_lrc_v69()
2275 for_each_child(ce, child) { in __guc_action_register_multi_lrc_v69()
2284 struct intel_context *ce, in __guc_action_register_multi_lrc_v70() argument
2293 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE); in __guc_action_register_multi_lrc_v70()
2305 action[len++] = ce->parallel.number_children + 1; in __guc_action_register_multi_lrc_v70()
2310 for_each_child(ce, child) { in __guc_action_register_multi_lrc_v70()
2364 static void prepare_context_registration_info_v69(struct intel_context *ce);
2365 static void prepare_context_registration_info_v70(struct intel_context *ce,
2369 register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop) in register_context_v69() argument
2372 ce->guc_id.id * sizeof(struct guc_lrc_desc_v69); in register_context_v69()
2374 prepare_context_registration_info_v69(ce); in register_context_v69()
2376 if (intel_context_is_parent(ce)) in register_context_v69()
2377 return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id, in register_context_v69()
2380 return __guc_action_register_context_v69(guc, ce->guc_id.id, in register_context_v69()
2385 register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop) in register_context_v70() argument
2389 prepare_context_registration_info_v70(ce, &info); in register_context_v70()
2391 if (intel_context_is_parent(ce)) in register_context_v70()
2392 return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop); in register_context_v70()
2397 static int register_context(struct intel_context *ce, bool loop) in register_context() argument
2399 struct intel_guc *guc = ce_to_guc(ce); in register_context()
2402 GEM_BUG_ON(intel_context_is_child(ce)); in register_context()
2403 trace_intel_context_register(ce); in register_context()
2406 ret = register_context_v70(guc, ce, loop); in register_context()
2408 ret = register_context_v69(guc, ce, loop); in register_context()
2413 spin_lock_irqsave(&ce->guc_state.lock, flags); in register_context()
2414 set_context_registered(ce); in register_context()
2415 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in register_context()
2418 guc_context_policy_init_v70(ce, loop); in register_context()
2437 static int deregister_context(struct intel_context *ce, u32 guc_id) in deregister_context() argument
2439 struct intel_guc *guc = ce_to_guc(ce); in deregister_context()
2441 GEM_BUG_ON(intel_context_is_child(ce)); in deregister_context()
2442 trace_intel_context_deregister(ce); in deregister_context()
2447 static inline void clear_children_join_go_memory(struct intel_context *ce) in clear_children_join_go_memory() argument
2449 struct parent_scratch *ps = __get_parent_scratch(ce); in clear_children_join_go_memory()
2453 for (i = 0; i < ce->parallel.number_children + 1; ++i) in clear_children_join_go_memory()
2457 static inline u32 get_children_go_value(struct intel_context *ce) in get_children_go_value() argument
2459 return __get_parent_scratch(ce)->go.semaphore; in get_children_go_value()
2462 static inline u32 get_children_join_value(struct intel_context *ce, in get_children_join_value() argument
2465 return __get_parent_scratch(ce)->join[child_index].semaphore; in get_children_join_value()
2515 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop) in guc_context_policy_init_v70() argument
2517 struct intel_engine_cs *engine = ce->engine; in guc_context_policy_init_v70()
2533 __guc_context_policy_start_klv(&policy, ce->guc_id.id); in guc_context_policy_init_v70()
2535 __guc_context_policy_add_priority(&policy, ce->guc_state.prio); in guc_context_policy_init_v70()
2544 spin_lock_irqsave(&ce->guc_state.lock, flags); in guc_context_policy_init_v70()
2546 set_context_policy_required(ce); in guc_context_policy_init_v70()
2548 clr_context_policy_required(ce); in guc_context_policy_init_v70()
2549 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_policy_init_v70()
2591 static void prepare_context_registration_info_v69(struct intel_context *ce) in prepare_context_registration_info_v69() argument
2593 struct intel_engine_cs *engine = ce->engine; in prepare_context_registration_info_v69()
2595 u32 ctx_id = ce->guc_id.id; in prepare_context_registration_info_v69()
2606 i915_gem_object_is_lmem(ce->ring->vma->obj)); in prepare_context_registration_info_v69()
2612 desc->hw_context_desc = ce->lrc.lrca; in prepare_context_registration_info_v69()
2613 desc->priority = ce->guc_state.prio; in prepare_context_registration_info_v69()
2621 if (intel_context_is_parent(ce)) { in prepare_context_registration_info_v69()
2624 ce->parallel.guc.wqi_tail = 0; in prepare_context_registration_info_v69()
2625 ce->parallel.guc.wqi_head = 0; in prepare_context_registration_info_v69()
2627 desc->process_desc = i915_ggtt_offset(ce->state) + in prepare_context_registration_info_v69()
2628 __get_parent_scratch_offset(ce); in prepare_context_registration_info_v69()
2629 desc->wq_addr = i915_ggtt_offset(ce->state) + in prepare_context_registration_info_v69()
2630 __get_wq_offset(ce); in prepare_context_registration_info_v69()
2633 pdesc = __get_process_desc_v69(ce); in prepare_context_registration_info_v69()
2635 pdesc->stage_id = ce->guc_id.id; in prepare_context_registration_info_v69()
2640 ce->parallel.guc.wq_head = &pdesc->head; in prepare_context_registration_info_v69()
2641 ce->parallel.guc.wq_tail = &pdesc->tail; in prepare_context_registration_info_v69()
2642 ce->parallel.guc.wq_status = &pdesc->wq_status; in prepare_context_registration_info_v69()
2644 for_each_child(ce, child) { in prepare_context_registration_info_v69()
2650 desc->priority = ce->guc_state.prio; in prepare_context_registration_info_v69()
2655 clear_children_join_go_memory(ce); in prepare_context_registration_info_v69()
2659 static void prepare_context_registration_info_v70(struct intel_context *ce, in prepare_context_registration_info_v70() argument
2662 struct intel_engine_cs *engine = ce->engine; in prepare_context_registration_info_v70()
2664 u32 ctx_id = ce->guc_id.id; in prepare_context_registration_info_v70()
2673 i915_gem_object_is_lmem(ce->ring->vma->obj)); in prepare_context_registration_info_v70()
2683 info->hwlrca_lo = lower_32_bits(ce->lrc.lrca); in prepare_context_registration_info_v70()
2684 info->hwlrca_hi = upper_32_bits(ce->lrc.lrca); in prepare_context_registration_info_v70()
2686 info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio); in prepare_context_registration_info_v70()
2693 if (intel_context_is_parent(ce)) { in prepare_context_registration_info_v70()
2697 ce->parallel.guc.wqi_tail = 0; in prepare_context_registration_info_v70()
2698 ce->parallel.guc.wqi_head = 0; in prepare_context_registration_info_v70()
2700 wq_desc_offset = i915_ggtt_offset(ce->state) + in prepare_context_registration_info_v70()
2701 __get_parent_scratch_offset(ce); in prepare_context_registration_info_v70()
2702 wq_base_offset = i915_ggtt_offset(ce->state) + in prepare_context_registration_info_v70()
2703 __get_wq_offset(ce); in prepare_context_registration_info_v70()
2710 wq_desc = __get_wq_desc_v70(ce); in prepare_context_registration_info_v70()
2714 ce->parallel.guc.wq_head = &wq_desc->head; in prepare_context_registration_info_v70()
2715 ce->parallel.guc.wq_tail = &wq_desc->tail; in prepare_context_registration_info_v70()
2716 ce->parallel.guc.wq_status = &wq_desc->wq_status; in prepare_context_registration_info_v70()
2718 clear_children_join_go_memory(ce); in prepare_context_registration_info_v70()
2722 static int try_context_registration(struct intel_context *ce, bool loop) in try_context_registration() argument
2724 struct intel_engine_cs *engine = ce->engine; in try_context_registration()
2728 u32 ctx_id = ce->guc_id.id; in try_context_registration()
2732 GEM_BUG_ON(!sched_state_is_init(ce)); in try_context_registration()
2737 set_ctx_id_mapping(guc, ctx_id, ce); in try_context_registration()
2751 trace_intel_context_steal_guc_id(ce); in try_context_registration()
2755 spin_lock_irqsave(&ce->guc_state.lock, flags); in try_context_registration()
2758 set_context_wait_for_deregister_to_register(ce); in try_context_registration()
2759 intel_context_get(ce); in try_context_registration()
2761 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in try_context_registration()
2772 ret = deregister_context(ce, ce->guc_id.id); in try_context_registration()
2777 ret = register_context(ce, loop); in try_context_registration()
2789 static int __guc_context_pre_pin(struct intel_context *ce, in __guc_context_pre_pin() argument
2794 return lrc_pre_pin(ce, engine, ww, vaddr); in __guc_context_pre_pin()
2797 static int __guc_context_pin(struct intel_context *ce, in __guc_context_pin() argument
2801 if (i915_ggtt_offset(ce->state) != in __guc_context_pin()
2802 (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) in __guc_context_pin()
2803 set_bit(CONTEXT_LRCA_DIRTY, &ce->flags); in __guc_context_pin()
2810 return lrc_pin(ce, engine, vaddr); in __guc_context_pin()
2813 static int guc_context_pre_pin(struct intel_context *ce, in guc_context_pre_pin() argument
2817 return __guc_context_pre_pin(ce, ce->engine, ww, vaddr); in guc_context_pre_pin()
2820 static int guc_context_pin(struct intel_context *ce, void *vaddr) in guc_context_pin() argument
2822 int ret = __guc_context_pin(ce, ce->engine, vaddr); in guc_context_pin()
2824 if (likely(!ret && !intel_context_is_barrier(ce))) in guc_context_pin()
2825 intel_engine_pm_get(ce->engine); in guc_context_pin()
2830 static void guc_context_unpin(struct intel_context *ce) in guc_context_unpin() argument
2832 struct intel_guc *guc = ce_to_guc(ce); in guc_context_unpin()
2834 __guc_context_update_stats(ce); in guc_context_unpin()
2835 unpin_guc_id(guc, ce); in guc_context_unpin()
2836 lrc_unpin(ce); in guc_context_unpin()
2838 if (likely(!intel_context_is_barrier(ce))) in guc_context_unpin()
2839 intel_engine_pm_put_async(ce->engine); in guc_context_unpin()
2842 static void guc_context_post_unpin(struct intel_context *ce) in guc_context_post_unpin() argument
2844 lrc_post_unpin(ce); in guc_context_post_unpin()
2848 struct intel_context *ce) in __guc_context_sched_enable() argument
2852 ce->guc_id.id, in __guc_context_sched_enable()
2856 trace_intel_context_sched_enable(ce); in __guc_context_sched_enable()
2863 struct intel_context *ce, in __guc_context_sched_disable() argument
2874 GEM_BUG_ON(intel_context_is_child(ce)); in __guc_context_sched_disable()
2875 trace_intel_context_sched_disable(ce); in __guc_context_sched_disable()
2881 static void guc_blocked_fence_complete(struct intel_context *ce) in guc_blocked_fence_complete() argument
2883 lockdep_assert_held(&ce->guc_state.lock); in guc_blocked_fence_complete()
2885 if (!i915_sw_fence_done(&ce->guc_state.blocked)) in guc_blocked_fence_complete()
2886 i915_sw_fence_complete(&ce->guc_state.blocked); in guc_blocked_fence_complete()
2889 static void guc_blocked_fence_reinit(struct intel_context *ce) in guc_blocked_fence_reinit() argument
2891 lockdep_assert_held(&ce->guc_state.lock); in guc_blocked_fence_reinit()
2892 GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked)); in guc_blocked_fence_reinit()
2899 i915_sw_fence_fini(&ce->guc_state.blocked); in guc_blocked_fence_reinit()
2900 i915_sw_fence_reinit(&ce->guc_state.blocked); in guc_blocked_fence_reinit()
2901 i915_sw_fence_await(&ce->guc_state.blocked); in guc_blocked_fence_reinit()
2902 i915_sw_fence_commit(&ce->guc_state.blocked); in guc_blocked_fence_reinit()
2905 static u16 prep_context_pending_disable(struct intel_context *ce) in prep_context_pending_disable() argument
2907 lockdep_assert_held(&ce->guc_state.lock); in prep_context_pending_disable()
2909 set_context_pending_disable(ce); in prep_context_pending_disable()
2910 clr_context_enabled(ce); in prep_context_pending_disable()
2911 guc_blocked_fence_reinit(ce); in prep_context_pending_disable()
2912 intel_context_get(ce); in prep_context_pending_disable()
2914 return ce->guc_id.id; in prep_context_pending_disable()
2917 static struct i915_sw_fence *guc_context_block(struct intel_context *ce) in guc_context_block() argument
2919 struct intel_guc *guc = ce_to_guc(ce); in guc_context_block()
2921 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; in guc_context_block()
2926 GEM_BUG_ON(intel_context_is_child(ce)); in guc_context_block()
2928 spin_lock_irqsave(&ce->guc_state.lock, flags); in guc_context_block()
2930 incr_context_blocked(ce); in guc_context_block()
2932 enabled = context_enabled(ce); in guc_context_block()
2935 clr_context_enabled(ce); in guc_context_block()
2936 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_block()
2937 return &ce->guc_state.blocked; in guc_context_block()
2944 atomic_add(2, &ce->pin_count); in guc_context_block()
2946 guc_id = prep_context_pending_disable(ce); in guc_context_block()
2948 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_block()
2951 __guc_context_sched_disable(guc, ce, guc_id); in guc_context_block()
2953 return &ce->guc_state.blocked; in guc_context_block()
2963 static bool context_cant_unblock(struct intel_context *ce) in context_cant_unblock() argument
2965 lockdep_assert_held(&ce->guc_state.lock); in context_cant_unblock()
2967 return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) || in context_cant_unblock()
2968 context_guc_id_invalid(ce) || in context_cant_unblock()
2969 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) || in context_cant_unblock()
2970 !intel_context_is_pinned(ce); in context_cant_unblock()
2973 static void guc_context_unblock(struct intel_context *ce) in guc_context_unblock() argument
2975 struct intel_guc *guc = ce_to_guc(ce); in guc_context_unblock()
2977 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm; in guc_context_unblock()
2981 GEM_BUG_ON(context_enabled(ce)); in guc_context_unblock()
2982 GEM_BUG_ON(intel_context_is_child(ce)); in guc_context_unblock()
2984 spin_lock_irqsave(&ce->guc_state.lock, flags); in guc_context_unblock()
2987 context_cant_unblock(ce))) { in guc_context_unblock()
2991 set_context_pending_enable(ce); in guc_context_unblock()
2992 set_context_enabled(ce); in guc_context_unblock()
2993 intel_context_get(ce); in guc_context_unblock()
2996 decr_context_blocked(ce); in guc_context_unblock()
2998 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_unblock()
3002 __guc_context_sched_enable(guc, ce); in guc_context_unblock()
3006 static void guc_context_cancel_request(struct intel_context *ce, in guc_context_cancel_request() argument
3015 intel_context_get(ce); in guc_context_cancel_request()
3020 guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head), in guc_context_cancel_request()
3025 intel_context_put(ce); in guc_context_cancel_request()
3051 guc_context_revoke(struct intel_context *ce, struct i915_request *rq, in guc_context_revoke() argument
3054 struct intel_guc *guc = ce_to_guc(ce); in guc_context_revoke()
3056 &ce->engine->gt->i915->runtime_pm; in guc_context_revoke()
3060 GEM_BUG_ON(intel_context_is_child(ce)); in guc_context_revoke()
3064 spin_lock_irqsave(&ce->guc_state.lock, flags); in guc_context_revoke()
3065 set_context_banned(ce); in guc_context_revoke()
3068 (!context_enabled(ce) && !context_pending_disable(ce))) { in guc_context_revoke()
3069 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_revoke()
3071 guc_cancel_context_requests(ce); in guc_context_revoke()
3072 intel_engine_signal_breadcrumbs(ce->engine); in guc_context_revoke()
3073 } else if (!context_pending_disable(ce)) { in guc_context_revoke()
3080 atomic_add(2, &ce->pin_count); in guc_context_revoke()
3082 guc_id = prep_context_pending_disable(ce); in guc_context_revoke()
3083 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_revoke()
3093 __guc_context_sched_disable(guc, ce, guc_id); in guc_context_revoke()
3096 if (!context_guc_id_invalid(ce)) in guc_context_revoke()
3099 ce->guc_id.id, in guc_context_revoke()
3101 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_revoke()
3105 static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce, in do_sched_disable() argument
3107 __releases(ce->guc_state.lock) in do_sched_disable()
3109 struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm; in do_sched_disable()
3113 lockdep_assert_held(&ce->guc_state.lock); in do_sched_disable()
3114 guc_id = prep_context_pending_disable(ce); in do_sched_disable()
3116 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in do_sched_disable()
3119 __guc_context_sched_disable(guc, ce, guc_id); in do_sched_disable()
3123 struct intel_context *ce) in bypass_sched_disable() argument
3125 lockdep_assert_held(&ce->guc_state.lock); in bypass_sched_disable()
3126 GEM_BUG_ON(intel_context_is_child(ce)); in bypass_sched_disable()
3128 if (submission_disabled(guc) || context_guc_id_invalid(ce) || in bypass_sched_disable()
3129 !ctx_id_mapped(guc, ce->guc_id.id)) { in bypass_sched_disable()
3130 clr_context_enabled(ce); in bypass_sched_disable()
3134 return !context_enabled(ce); in bypass_sched_disable()
3139 struct intel_context *ce = in __delay_sched_disable() local
3140 container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work); in __delay_sched_disable()
3141 struct intel_guc *guc = ce_to_guc(ce); in __delay_sched_disable()
3144 spin_lock_irqsave(&ce->guc_state.lock, flags); in __delay_sched_disable()
3146 if (bypass_sched_disable(guc, ce)) { in __delay_sched_disable()
3147 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in __delay_sched_disable()
3148 intel_context_sched_disable_unpin(ce); in __delay_sched_disable()
3150 do_sched_disable(guc, ce, flags); in __delay_sched_disable()
3154 static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce) in guc_id_pressure() argument
3160 if (intel_context_is_parent(ce)) in guc_id_pressure()
3170 static void guc_context_sched_disable(struct intel_context *ce) in guc_context_sched_disable() argument
3172 struct intel_guc *guc = ce_to_guc(ce); in guc_context_sched_disable()
3176 spin_lock_irqsave(&ce->guc_state.lock, flags); in guc_context_sched_disable()
3178 if (bypass_sched_disable(guc, ce)) { in guc_context_sched_disable()
3179 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_sched_disable()
3180 intel_context_sched_disable_unpin(ce); in guc_context_sched_disable()
3181 } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) && in guc_context_sched_disable()
3183 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_sched_disable()
3185 &ce->guc_state.sched_disable_delay_work, in guc_context_sched_disable()
3188 do_sched_disable(guc, ce, flags); in guc_context_sched_disable()
3192 static void guc_context_close(struct intel_context *ce) in guc_context_close() argument
3196 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) && in guc_context_close()
3197 cancel_delayed_work(&ce->guc_state.sched_disable_delay_work)) in guc_context_close()
3198 __delay_sched_disable(&ce->guc_state.sched_disable_delay_work.work); in guc_context_close()
3200 spin_lock_irqsave(&ce->guc_state.lock, flags); in guc_context_close()
3201 set_context_close_done(ce); in guc_context_close()
3202 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_context_close()
3205 static inline void guc_lrc_desc_unpin(struct intel_context *ce) in guc_lrc_desc_unpin() argument
3207 struct intel_guc *guc = ce_to_guc(ce); in guc_lrc_desc_unpin()
3213 GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id)); in guc_lrc_desc_unpin()
3214 GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id)); in guc_lrc_desc_unpin()
3215 GEM_BUG_ON(context_enabled(ce)); in guc_lrc_desc_unpin()
3218 spin_lock_irqsave(&ce->guc_state.lock, flags); in guc_lrc_desc_unpin()
3222 set_context_destroyed(ce); in guc_lrc_desc_unpin()
3223 clr_context_registered(ce); in guc_lrc_desc_unpin()
3225 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_lrc_desc_unpin()
3227 release_guc_id(guc, ce); in guc_lrc_desc_unpin()
3228 __guc_context_destroy(ce); in guc_lrc_desc_unpin()
3232 deregister_context(ce, ce->guc_id.id); in guc_lrc_desc_unpin()
3235 static void __guc_context_destroy(struct intel_context *ce) in __guc_context_destroy() argument
3237 GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] || in __guc_context_destroy()
3238 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] || in __guc_context_destroy()
3239 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] || in __guc_context_destroy()
3240 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]); in __guc_context_destroy()
3242 lrc_fini(ce); in __guc_context_destroy()
3243 intel_context_fini(ce); in __guc_context_destroy()
3245 if (intel_engine_is_virtual(ce->engine)) { in __guc_context_destroy()
3247 container_of(ce, typeof(*ve), context); in __guc_context_destroy()
3254 intel_context_free(ce); in __guc_context_destroy()
3260 struct intel_context *ce; in guc_flush_destroyed_contexts() local
3268 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts, in guc_flush_destroyed_contexts()
3271 if (ce) in guc_flush_destroyed_contexts()
3272 list_del_init(&ce->destroyed_link); in guc_flush_destroyed_contexts()
3275 if (!ce) in guc_flush_destroyed_contexts()
3278 release_guc_id(guc, ce); in guc_flush_destroyed_contexts()
3279 __guc_context_destroy(ce); in guc_flush_destroyed_contexts()
3285 struct intel_context *ce; in deregister_destroyed_contexts() local
3290 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts, in deregister_destroyed_contexts()
3293 if (ce) in deregister_destroyed_contexts()
3294 list_del_init(&ce->destroyed_link); in deregister_destroyed_contexts()
3297 if (!ce) in deregister_destroyed_contexts()
3300 guc_lrc_desc_unpin(ce); in deregister_destroyed_contexts()
3317 struct intel_context *ce = container_of(kref, typeof(*ce), ref); in guc_context_destroy() local
3318 struct intel_guc *guc = ce_to_guc(ce); in guc_context_destroy()
3328 destroy = submission_disabled(guc) || context_guc_id_invalid(ce) || in guc_context_destroy()
3329 !ctx_id_mapped(guc, ce->guc_id.id); in guc_context_destroy()
3331 if (!list_empty(&ce->guc_id.link)) in guc_context_destroy()
3332 list_del_init(&ce->guc_id.link); in guc_context_destroy()
3333 list_add_tail(&ce->destroyed_link, in guc_context_destroy()
3336 __release_guc_id(guc, ce); in guc_context_destroy()
3340 __guc_context_destroy(ce); in guc_context_destroy()
3352 static int guc_context_alloc(struct intel_context *ce) in guc_context_alloc() argument
3354 return lrc_alloc(ce, ce->engine); in guc_context_alloc()
3358 struct intel_context *ce) in __guc_context_set_prio() argument
3363 __guc_context_policy_start_klv(&policy, ce->guc_id.id); in __guc_context_set_prio()
3364 __guc_context_policy_add_priority(&policy, ce->guc_state.prio); in __guc_context_set_prio()
3369 ce->guc_id.id, in __guc_context_set_prio()
3370 ce->guc_state.prio, in __guc_context_set_prio()
3378 struct intel_context *ce, in guc_context_set_prio() argument
3383 lockdep_assert_held(&ce->guc_state.lock); in guc_context_set_prio()
3385 if (ce->guc_state.prio == prio || submission_disabled(guc) || in guc_context_set_prio()
3386 !context_registered(ce)) { in guc_context_set_prio()
3387 ce->guc_state.prio = prio; in guc_context_set_prio()
3391 ce->guc_state.prio = prio; in guc_context_set_prio()
3392 __guc_context_set_prio(guc, ce); in guc_context_set_prio()
3394 trace_intel_context_set_prio(ce); in guc_context_set_prio()
3409 static inline void add_context_inflight_prio(struct intel_context *ce, in add_context_inflight_prio() argument
3412 lockdep_assert_held(&ce->guc_state.lock); in add_context_inflight_prio()
3413 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); in add_context_inflight_prio()
3415 ++ce->guc_state.prio_count[guc_prio]; in add_context_inflight_prio()
3418 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]); in add_context_inflight_prio()
3421 static inline void sub_context_inflight_prio(struct intel_context *ce, in sub_context_inflight_prio() argument
3424 lockdep_assert_held(&ce->guc_state.lock); in sub_context_inflight_prio()
3425 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count)); in sub_context_inflight_prio()
3428 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]); in sub_context_inflight_prio()
3430 --ce->guc_state.prio_count[guc_prio]; in sub_context_inflight_prio()
3433 static inline void update_context_prio(struct intel_context *ce) in update_context_prio() argument
3435 struct intel_guc *guc = &ce->engine->gt->uc.guc; in update_context_prio()
3441 lockdep_assert_held(&ce->guc_state.lock); in update_context_prio()
3443 for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) { in update_context_prio()
3444 if (ce->guc_state.prio_count[i]) { in update_context_prio()
3445 guc_context_set_prio(guc, ce, i); in update_context_prio()
3459 struct intel_context *ce = request_to_scheduling_context(rq); in add_to_context() local
3462 GEM_BUG_ON(intel_context_is_child(ce)); in add_to_context()
3465 spin_lock(&ce->guc_state.lock); in add_to_context()
3466 list_move_tail(&rq->sched.link, &ce->guc_state.requests); in add_to_context()
3470 add_context_inflight_prio(ce, rq->guc_prio); in add_to_context()
3472 sub_context_inflight_prio(ce, rq->guc_prio); in add_to_context()
3474 add_context_inflight_prio(ce, rq->guc_prio); in add_to_context()
3476 update_context_prio(ce); in add_to_context()
3478 spin_unlock(&ce->guc_state.lock); in add_to_context()
3481 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce) in guc_prio_fini() argument
3483 lockdep_assert_held(&ce->guc_state.lock); in guc_prio_fini()
3487 sub_context_inflight_prio(ce, rq->guc_prio); in guc_prio_fini()
3488 update_context_prio(ce); in guc_prio_fini()
3495 struct intel_context *ce = request_to_scheduling_context(rq); in remove_from_context() local
3497 GEM_BUG_ON(intel_context_is_child(ce)); in remove_from_context()
3499 spin_lock_irq(&ce->guc_state.lock); in remove_from_context()
3507 guc_prio_fini(rq, ce); in remove_from_context()
3509 spin_unlock_irq(&ce->guc_state.lock); in remove_from_context()
3511 atomic_dec(&ce->guc_id.ref); in remove_from_context()
3552 static void __guc_signal_context_fence(struct intel_context *ce) in __guc_signal_context_fence() argument
3556 lockdep_assert_held(&ce->guc_state.lock); in __guc_signal_context_fence()
3558 if (!list_empty(&ce->guc_state.fences)) in __guc_signal_context_fence()
3559 trace_intel_context_fence_release(ce); in __guc_signal_context_fence()
3565 list_for_each_entry_safe(rq, rn, &ce->guc_state.fences, in __guc_signal_context_fence()
3571 INIT_LIST_HEAD(&ce->guc_state.fences); in __guc_signal_context_fence()
3574 static void guc_signal_context_fence(struct intel_context *ce) in guc_signal_context_fence() argument
3578 GEM_BUG_ON(intel_context_is_child(ce)); in guc_signal_context_fence()
3580 spin_lock_irqsave(&ce->guc_state.lock, flags); in guc_signal_context_fence()
3581 clr_context_wait_for_deregister_to_register(ce); in guc_signal_context_fence()
3582 __guc_signal_context_fence(ce); in guc_signal_context_fence()
3583 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_signal_context_fence()
3586 static bool context_needs_register(struct intel_context *ce, bool new_guc_id) in context_needs_register() argument
3588 return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) || in context_needs_register()
3589 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) && in context_needs_register()
3590 !submission_disabled(ce_to_guc(ce)); in context_needs_register()
3593 static void guc_context_init(struct intel_context *ce) in guc_context_init() argument
3599 ctx = rcu_dereference(ce->gem_context); in guc_context_init()
3604 ce->guc_state.prio = map_i915_prio_to_guc_prio(prio); in guc_context_init()
3606 INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay_work, in guc_context_init()
3609 set_bit(CONTEXT_GUC_INIT, &ce->flags); in guc_context_init()
3614 struct intel_context *ce = request_to_scheduling_context(rq); in guc_request_alloc() local
3615 struct intel_guc *guc = ce_to_guc(ce); in guc_request_alloc()
3643 if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags))) in guc_request_alloc()
3644 guc_context_init(ce); in guc_request_alloc()
3660 if (cancel_delayed_work_sync(&ce->guc_state.sched_disable_delay_work)) in guc_request_alloc()
3661 intel_context_sched_disable_unpin(ce); in guc_request_alloc()
3662 else if (intel_context_is_closed(ce)) in guc_request_alloc()
3663 if (wait_for(context_close_done(ce), 1500)) in guc_request_alloc()
3682 if (atomic_add_unless(&ce->guc_id.ref, 1, 0)) in guc_request_alloc()
3685 ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */ in guc_request_alloc()
3688 if (context_needs_register(ce, !!ret)) { in guc_request_alloc()
3689 ret = try_context_registration(ce, true); in guc_request_alloc()
3695 atomic_dec(&ce->guc_id.ref); in guc_request_alloc()
3696 unpin_guc_id(guc, ce); in guc_request_alloc()
3701 clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags); in guc_request_alloc()
3711 spin_lock_irqsave(&ce->guc_state.lock, flags); in guc_request_alloc()
3712 if (context_wait_for_deregister_to_register(ce) || in guc_request_alloc()
3713 context_pending_disable(ce)) { in guc_request_alloc()
3717 list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences); in guc_request_alloc()
3719 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in guc_request_alloc()
3724 static int guc_virtual_context_pre_pin(struct intel_context *ce, in guc_virtual_context_pre_pin() argument
3728 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); in guc_virtual_context_pre_pin()
3730 return __guc_context_pre_pin(ce, engine, ww, vaddr); in guc_virtual_context_pre_pin()
3733 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr) in guc_virtual_context_pin() argument
3735 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); in guc_virtual_context_pin()
3736 int ret = __guc_context_pin(ce, engine, vaddr); in guc_virtual_context_pin()
3737 intel_engine_mask_t tmp, mask = ce->engine->mask; in guc_virtual_context_pin()
3740 for_each_engine_masked(engine, ce->engine->gt, mask, tmp) in guc_virtual_context_pin()
3746 static void guc_virtual_context_unpin(struct intel_context *ce) in guc_virtual_context_unpin() argument
3748 intel_engine_mask_t tmp, mask = ce->engine->mask; in guc_virtual_context_unpin()
3750 struct intel_guc *guc = ce_to_guc(ce); in guc_virtual_context_unpin()
3752 GEM_BUG_ON(context_enabled(ce)); in guc_virtual_context_unpin()
3753 GEM_BUG_ON(intel_context_is_barrier(ce)); in guc_virtual_context_unpin()
3755 unpin_guc_id(guc, ce); in guc_virtual_context_unpin()
3756 lrc_unpin(ce); in guc_virtual_context_unpin()
3758 for_each_engine_masked(engine, ce->engine->gt, mask, tmp) in guc_virtual_context_unpin()
3762 static void guc_virtual_context_enter(struct intel_context *ce) in guc_virtual_context_enter() argument
3764 intel_engine_mask_t tmp, mask = ce->engine->mask; in guc_virtual_context_enter()
3767 for_each_engine_masked(engine, ce->engine->gt, mask, tmp) in guc_virtual_context_enter()
3770 intel_timeline_enter(ce->timeline); in guc_virtual_context_enter()
3773 static void guc_virtual_context_exit(struct intel_context *ce) in guc_virtual_context_exit() argument
3775 intel_engine_mask_t tmp, mask = ce->engine->mask; in guc_virtual_context_exit()
3778 for_each_engine_masked(engine, ce->engine->gt, mask, tmp) in guc_virtual_context_exit()
3781 intel_timeline_exit(ce->timeline); in guc_virtual_context_exit()
3784 static int guc_virtual_context_alloc(struct intel_context *ce) in guc_virtual_context_alloc() argument
3786 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); in guc_virtual_context_alloc()
3788 return lrc_alloc(ce, engine); in guc_virtual_context_alloc()
3817 static int guc_parent_context_pin(struct intel_context *ce, void *vaddr) in guc_parent_context_pin() argument
3819 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); in guc_parent_context_pin()
3820 struct intel_guc *guc = ce_to_guc(ce); in guc_parent_context_pin()
3823 GEM_BUG_ON(!intel_context_is_parent(ce)); in guc_parent_context_pin()
3824 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); in guc_parent_context_pin()
3826 ret = pin_guc_id(guc, ce); in guc_parent_context_pin()
3830 return __guc_context_pin(ce, engine, vaddr); in guc_parent_context_pin()
3833 static int guc_child_context_pin(struct intel_context *ce, void *vaddr) in guc_child_context_pin() argument
3835 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0); in guc_child_context_pin()
3837 GEM_BUG_ON(!intel_context_is_child(ce)); in guc_child_context_pin()
3838 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); in guc_child_context_pin()
3840 __intel_context_pin(ce->parallel.parent); in guc_child_context_pin()
3841 return __guc_context_pin(ce, engine, vaddr); in guc_child_context_pin()
3844 static void guc_parent_context_unpin(struct intel_context *ce) in guc_parent_context_unpin() argument
3846 struct intel_guc *guc = ce_to_guc(ce); in guc_parent_context_unpin()
3848 GEM_BUG_ON(context_enabled(ce)); in guc_parent_context_unpin()
3849 GEM_BUG_ON(intel_context_is_barrier(ce)); in guc_parent_context_unpin()
3850 GEM_BUG_ON(!intel_context_is_parent(ce)); in guc_parent_context_unpin()
3851 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); in guc_parent_context_unpin()
3853 unpin_guc_id(guc, ce); in guc_parent_context_unpin()
3854 lrc_unpin(ce); in guc_parent_context_unpin()
3857 static void guc_child_context_unpin(struct intel_context *ce) in guc_child_context_unpin() argument
3859 GEM_BUG_ON(context_enabled(ce)); in guc_child_context_unpin()
3860 GEM_BUG_ON(intel_context_is_barrier(ce)); in guc_child_context_unpin()
3861 GEM_BUG_ON(!intel_context_is_child(ce)); in guc_child_context_unpin()
3862 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); in guc_child_context_unpin()
3864 lrc_unpin(ce); in guc_child_context_unpin()
3867 static void guc_child_context_post_unpin(struct intel_context *ce) in guc_child_context_post_unpin() argument
3869 GEM_BUG_ON(!intel_context_is_child(ce)); in guc_child_context_post_unpin()
3870 GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent)); in guc_child_context_post_unpin()
3871 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine)); in guc_child_context_post_unpin()
3873 lrc_post_unpin(ce); in guc_child_context_post_unpin()
3874 intel_context_unpin(ce->parallel.parent); in guc_child_context_post_unpin()
3879 struct intel_context *ce = container_of(kref, typeof(*ce), ref); in guc_child_context_destroy() local
3881 __guc_context_destroy(ce); in guc_child_context_destroy()
3957 struct intel_context *parent = NULL, *ce, *err; in guc_create_parallel() local
3970 ce = intel_engine_create_virtual(siblings, num_siblings, in guc_create_parallel()
3972 if (IS_ERR(ce)) { in guc_create_parallel()
3973 err = ERR_CAST(ce); in guc_create_parallel()
3978 parent = ce; in guc_create_parallel()
3981 ce->ops = &virtual_child_context_ops; in guc_create_parallel()
3982 intel_context_bind_parent_child(parent, ce); in guc_create_parallel()
3994 for_each_child(parent, ce) { in guc_create_parallel()
3995 ce->engine->emit_bb_start = in guc_create_parallel()
3997 ce->engine->emit_fini_breadcrumb = in guc_create_parallel()
3999 ce->engine->emit_fini_breadcrumb_dw = 16; in guc_create_parallel()
4071 struct intel_context *ce = request_to_scheduling_context(rq); in guc_bump_inflight_request_prio() local
4081 spin_lock(&ce->guc_state.lock); in guc_bump_inflight_request_prio()
4084 sub_context_inflight_prio(ce, rq->guc_prio); in guc_bump_inflight_request_prio()
4086 add_context_inflight_prio(ce, rq->guc_prio); in guc_bump_inflight_request_prio()
4087 update_context_prio(ce); in guc_bump_inflight_request_prio()
4089 spin_unlock(&ce->guc_state.lock); in guc_bump_inflight_request_prio()
4094 struct intel_context *ce = request_to_scheduling_context(rq); in guc_retire_inflight_request_prio() local
4096 spin_lock(&ce->guc_state.lock); in guc_retire_inflight_request_prio()
4097 guc_prio_fini(rq, ce); in guc_retire_inflight_request_prio()
4098 spin_unlock(&ce->guc_state.lock); in guc_retire_inflight_request_prio()
4183 struct intel_context *ce) in guc_kernel_context_pin() argument
4194 if (context_guc_id_invalid(ce)) { in guc_kernel_context_pin()
4195 ret = pin_guc_id(guc, ce); in guc_kernel_context_pin()
4201 if (!test_bit(CONTEXT_GUC_INIT, &ce->flags)) in guc_kernel_context_pin()
4202 guc_context_init(ce); in guc_kernel_context_pin()
4204 ret = try_context_registration(ce, true); in guc_kernel_context_pin()
4206 unpin_guc_id(guc, ce); in guc_kernel_context_pin()
4235 struct intel_context *ce; in guc_init_submission() local
4237 list_for_each_entry(ce, &engine->pinned_contexts_list, in guc_init_submission()
4239 int ret = guc_kernel_context_pin(guc, ce); in guc_init_submission()
4608 struct intel_context *ce; in g2h_context_lookup() local
4615 ce = __get_context(guc, ctx_id); in g2h_context_lookup()
4616 if (unlikely(!ce)) { in g2h_context_lookup()
4621 if (unlikely(intel_context_is_child(ce))) { in g2h_context_lookup()
4626 return ce; in g2h_context_lookup()
4633 struct intel_context *ce; in intel_guc_deregister_done_process_msg() local
4642 ce = g2h_context_lookup(guc, ctx_id); in intel_guc_deregister_done_process_msg()
4643 if (unlikely(!ce)) in intel_guc_deregister_done_process_msg()
4646 trace_intel_context_deregister_done(ce); in intel_guc_deregister_done_process_msg()
4649 if (unlikely(ce->drop_deregister)) { in intel_guc_deregister_done_process_msg()
4650 ce->drop_deregister = false; in intel_guc_deregister_done_process_msg()
4655 if (context_wait_for_deregister_to_register(ce)) { in intel_guc_deregister_done_process_msg()
4657 &ce->engine->gt->i915->runtime_pm; in intel_guc_deregister_done_process_msg()
4665 register_context(ce, true); in intel_guc_deregister_done_process_msg()
4666 guc_signal_context_fence(ce); in intel_guc_deregister_done_process_msg()
4667 intel_context_put(ce); in intel_guc_deregister_done_process_msg()
4668 } else if (context_destroyed(ce)) { in intel_guc_deregister_done_process_msg()
4671 release_guc_id(guc, ce); in intel_guc_deregister_done_process_msg()
4672 __guc_context_destroy(ce); in intel_guc_deregister_done_process_msg()
4684 struct intel_context *ce; in intel_guc_sched_done_process_msg() local
4694 ce = g2h_context_lookup(guc, ctx_id); in intel_guc_sched_done_process_msg()
4695 if (unlikely(!ce)) in intel_guc_sched_done_process_msg()
4698 if (unlikely(context_destroyed(ce) || in intel_guc_sched_done_process_msg()
4699 (!context_pending_enable(ce) && in intel_guc_sched_done_process_msg()
4700 !context_pending_disable(ce)))) { in intel_guc_sched_done_process_msg()
4702 ce->guc_state.sched_state, ctx_id); in intel_guc_sched_done_process_msg()
4706 trace_intel_context_sched_done(ce); in intel_guc_sched_done_process_msg()
4708 if (context_pending_enable(ce)) { in intel_guc_sched_done_process_msg()
4710 if (unlikely(ce->drop_schedule_enable)) { in intel_guc_sched_done_process_msg()
4711 ce->drop_schedule_enable = false; in intel_guc_sched_done_process_msg()
4716 spin_lock_irqsave(&ce->guc_state.lock, flags); in intel_guc_sched_done_process_msg()
4717 clr_context_pending_enable(ce); in intel_guc_sched_done_process_msg()
4718 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in intel_guc_sched_done_process_msg()
4719 } else if (context_pending_disable(ce)) { in intel_guc_sched_done_process_msg()
4723 if (unlikely(ce->drop_schedule_disable)) { in intel_guc_sched_done_process_msg()
4724 ce->drop_schedule_disable = false; in intel_guc_sched_done_process_msg()
4736 intel_context_sched_disable_unpin(ce); in intel_guc_sched_done_process_msg()
4738 spin_lock_irqsave(&ce->guc_state.lock, flags); in intel_guc_sched_done_process_msg()
4739 banned = context_banned(ce); in intel_guc_sched_done_process_msg()
4740 clr_context_banned(ce); in intel_guc_sched_done_process_msg()
4741 clr_context_pending_disable(ce); in intel_guc_sched_done_process_msg()
4742 __guc_signal_context_fence(ce); in intel_guc_sched_done_process_msg()
4743 guc_blocked_fence_complete(ce); in intel_guc_sched_done_process_msg()
4744 spin_unlock_irqrestore(&ce->guc_state.lock, flags); in intel_guc_sched_done_process_msg()
4747 guc_cancel_context_requests(ce); in intel_guc_sched_done_process_msg()
4748 intel_engine_signal_breadcrumbs(ce->engine); in intel_guc_sched_done_process_msg()
4753 intel_context_put(ce); in intel_guc_sched_done_process_msg()
4759 struct intel_context *ce) in capture_error_state() argument
4766 if (intel_engine_is_virtual(ce->engine)) { in capture_error_state()
4768 intel_engine_mask_t tmp, virtual_mask = ce->engine->mask; in capture_error_state()
4771 for_each_engine_masked(e, ce->engine->gt, virtual_mask, tmp) { in capture_error_state()
4772 bool match = intel_guc_capture_is_matching_engine(gt, ce, e); in capture_error_state()
4775 intel_engine_set_hung_context(e, ce); in capture_error_state()
4784 ce->guc_id.id, ce->engine->name); in capture_error_state()
4788 intel_engine_set_hung_context(ce->engine, ce); in capture_error_state()
4789 engine_mask = ce->engine->mask; in capture_error_state()
4790 i915_increase_reset_engine_count(&i915->gpu_error, ce->engine); in capture_error_state()
4797 static void guc_context_replay(struct intel_context *ce) in guc_context_replay() argument
4799 struct i915_sched_engine *sched_engine = ce->engine->sched_engine; in guc_context_replay()
4801 __guc_reset_context(ce, ce->engine->mask); in guc_context_replay()
4806 struct intel_context *ce) in guc_handle_context_reset() argument
4808 trace_intel_context_reset(ce); in guc_handle_context_reset()
4811 ce->guc_id.id, ce->engine->name, in guc_handle_context_reset()
4812 str_yes_no(intel_context_is_exiting(ce)), in guc_handle_context_reset()
4813 str_yes_no(intel_context_is_banned(ce))); in guc_handle_context_reset()
4815 if (likely(intel_context_is_schedulable(ce))) { in guc_handle_context_reset()
4816 capture_error_state(guc, ce); in guc_handle_context_reset()
4817 guc_context_replay(ce); in guc_handle_context_reset()
4820 ce->guc_id.id, ce->engine->name); in guc_handle_context_reset()
4827 struct intel_context *ce; in intel_guc_context_reset_process_msg() local
4845 ce = g2h_context_lookup(guc, ctx_id); in intel_guc_context_reset_process_msg()
4846 if (ce) in intel_guc_context_reset_process_msg()
4847 intel_context_get(ce); in intel_guc_context_reset_process_msg()
4850 if (unlikely(!ce)) in intel_guc_context_reset_process_msg()
4853 guc_handle_context_reset(guc, ce); in intel_guc_context_reset_process_msg()
4854 intel_context_put(ce); in intel_guc_context_reset_process_msg()
4969 struct intel_context *ce; in intel_guc_find_hung_context() local
4979 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_find_hung_context()
4982 if (!kref_get_unless_zero(&ce->ref)) in intel_guc_find_hung_context()
4987 if (!intel_context_is_pinned(ce)) in intel_guc_find_hung_context()
4990 if (intel_engine_is_virtual(ce->engine)) { in intel_guc_find_hung_context()
4991 if (!(ce->engine->mask & engine->mask)) in intel_guc_find_hung_context()
4994 if (ce->engine != engine) in intel_guc_find_hung_context()
4999 spin_lock(&ce->guc_state.lock); in intel_guc_find_hung_context()
5000 list_for_each_entry(rq, &ce->guc_state.requests, sched.link) { in intel_guc_find_hung_context()
5007 spin_unlock(&ce->guc_state.lock); in intel_guc_find_hung_context()
5010 intel_engine_set_hung_context(engine, ce); in intel_guc_find_hung_context()
5013 intel_context_put(ce); in intel_guc_find_hung_context()
5019 intel_context_put(ce); in intel_guc_find_hung_context()
5031 struct intel_context *ce; in intel_guc_dump_active_requests() local
5040 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_dump_active_requests()
5041 if (!kref_get_unless_zero(&ce->ref)) in intel_guc_dump_active_requests()
5046 if (!intel_context_is_pinned(ce)) in intel_guc_dump_active_requests()
5049 if (intel_engine_is_virtual(ce->engine)) { in intel_guc_dump_active_requests()
5050 if (!(ce->engine->mask & engine->mask)) in intel_guc_dump_active_requests()
5053 if (ce->engine != engine) in intel_guc_dump_active_requests()
5057 spin_lock(&ce->guc_state.lock); in intel_guc_dump_active_requests()
5058 intel_engine_dump_active_requests(&ce->guc_state.requests, in intel_guc_dump_active_requests()
5060 spin_unlock(&ce->guc_state.lock); in intel_guc_dump_active_requests()
5063 intel_context_put(ce); in intel_guc_dump_active_requests()
5103 struct intel_context *ce) in guc_log_context_priority() argument
5107 drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio); in guc_log_context_priority()
5112 i, ce->guc_state.prio_count[i]); in guc_log_context_priority()
5118 struct intel_context *ce) in guc_log_context() argument
5120 drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id); in guc_log_context()
5121 drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); in guc_log_context()
5123 ce->ring->head, in guc_log_context()
5124 ce->lrc_reg_state[CTX_RING_HEAD]); in guc_log_context()
5126 ce->ring->tail, in guc_log_context()
5127 ce->lrc_reg_state[CTX_RING_TAIL]); in guc_log_context()
5129 atomic_read(&ce->pin_count)); in guc_log_context()
5131 atomic_read(&ce->guc_id.ref)); in guc_log_context()
5133 ce->guc_state.sched_state); in guc_log_context()
5139 struct intel_context *ce; in intel_guc_submission_print_context_info() local
5144 xa_for_each(&guc->context_lookup, index, ce) { in intel_guc_submission_print_context_info()
5145 GEM_BUG_ON(intel_context_is_child(ce)); in intel_guc_submission_print_context_info()
5147 guc_log_context(p, ce); in intel_guc_submission_print_context_info()
5148 guc_log_context_priority(p, ce); in intel_guc_submission_print_context_info()
5150 if (intel_context_is_parent(ce)) { in intel_guc_submission_print_context_info()
5154 ce->parallel.number_children); in intel_guc_submission_print_context_info()
5156 if (ce->parallel.guc.wq_status) { in intel_guc_submission_print_context_info()
5158 READ_ONCE(*ce->parallel.guc.wq_head)); in intel_guc_submission_print_context_info()
5160 READ_ONCE(*ce->parallel.guc.wq_tail)); in intel_guc_submission_print_context_info()
5162 READ_ONCE(*ce->parallel.guc.wq_status)); in intel_guc_submission_print_context_info()
5165 if (ce->engine->emit_bb_start == in intel_guc_submission_print_context_info()
5170 get_children_go_value(ce)); in intel_guc_submission_print_context_info()
5171 for (i = 0; i < ce->parallel.number_children; ++i) in intel_guc_submission_print_context_info()
5173 get_children_join_value(ce, i)); in intel_guc_submission_print_context_info()
5176 for_each_child(ce, child) in intel_guc_submission_print_context_info()
5183 static inline u32 get_children_go_addr(struct intel_context *ce) in get_children_go_addr() argument
5185 GEM_BUG_ON(!intel_context_is_parent(ce)); in get_children_go_addr()
5187 return i915_ggtt_offset(ce->state) + in get_children_go_addr()
5188 __get_parent_scratch_offset(ce) + in get_children_go_addr()
5192 static inline u32 get_children_join_addr(struct intel_context *ce, in get_children_join_addr() argument
5195 GEM_BUG_ON(!intel_context_is_parent(ce)); in get_children_join_addr()
5197 return i915_ggtt_offset(ce->state) + in get_children_join_addr()
5198 __get_parent_scratch_offset(ce) + in get_children_join_addr()
5210 struct intel_context *ce = rq->context; in emit_bb_start_parent_no_preempt_mid_batch() local
5214 GEM_BUG_ON(!intel_context_is_parent(ce)); in emit_bb_start_parent_no_preempt_mid_batch()
5216 cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children); in emit_bb_start_parent_no_preempt_mid_batch()
5221 for (i = 0; i < ce->parallel.number_children; ++i) { in emit_bb_start_parent_no_preempt_mid_batch()
5227 *cs++ = get_children_join_addr(ce, i); in emit_bb_start_parent_no_preempt_mid_batch()
5238 get_children_go_addr(ce), in emit_bb_start_parent_no_preempt_mid_batch()
5257 struct intel_context *ce = rq->context; in emit_bb_start_child_no_preempt_mid_batch() local
5258 struct intel_context *parent = intel_context_to_parent(ce); in emit_bb_start_child_no_preempt_mid_batch()
5261 GEM_BUG_ON(!intel_context_is_child(ce)); in emit_bb_start_child_no_preempt_mid_batch()
5271 ce->parallel.child_index), in emit_bb_start_child_no_preempt_mid_batch()
5301 struct intel_context *ce = rq->context; in __emit_fini_breadcrumb_parent_no_preempt_mid_batch() local
5304 GEM_BUG_ON(!intel_context_is_parent(ce)); in __emit_fini_breadcrumb_parent_no_preempt_mid_batch()
5307 for (i = 0; i < ce->parallel.number_children; ++i) { in __emit_fini_breadcrumb_parent_no_preempt_mid_batch()
5313 *cs++ = get_children_join_addr(ce, i); in __emit_fini_breadcrumb_parent_no_preempt_mid_batch()
5324 get_children_go_addr(ce), in __emit_fini_breadcrumb_parent_no_preempt_mid_batch()
5351 struct intel_context *ce = rq->context; in emit_fini_breadcrumb_parent_no_preempt_mid_batch() local
5355 GEM_BUG_ON(!intel_context_is_parent(ce)); in emit_fini_breadcrumb_parent_no_preempt_mid_batch()
5363 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN)); in emit_fini_breadcrumb_parent_no_preempt_mid_batch()
5364 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN; in emit_fini_breadcrumb_parent_no_preempt_mid_batch()
5384 ce->engine->emit_fini_breadcrumb_dw != cs); in emit_fini_breadcrumb_parent_no_preempt_mid_batch()
5395 struct intel_context *ce = rq->context; in __emit_fini_breadcrumb_child_no_preempt_mid_batch() local
5396 struct intel_context *parent = intel_context_to_parent(ce); in __emit_fini_breadcrumb_child_no_preempt_mid_batch()
5398 GEM_BUG_ON(!intel_context_is_child(ce)); in __emit_fini_breadcrumb_child_no_preempt_mid_batch()
5408 ce->parallel.child_index), in __emit_fini_breadcrumb_child_no_preempt_mid_batch()
5427 struct intel_context *ce = rq->context; in emit_fini_breadcrumb_child_no_preempt_mid_batch() local
5431 GEM_BUG_ON(!intel_context_is_child(ce)); in emit_fini_breadcrumb_child_no_preempt_mid_batch()
5439 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN)); in emit_fini_breadcrumb_child_no_preempt_mid_batch()
5440 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN; in emit_fini_breadcrumb_child_no_preempt_mid_batch()
5460 ce->engine->emit_fini_breadcrumb_dw != cs); in emit_fini_breadcrumb_child_no_preempt_mid_batch()