1 /*
2 * Copyright © 2008-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef I915_REQUEST_H
26 #define I915_REQUEST_H
27
28 #include <linux/dma-fence.h>
29 #include <linux/hrtimer.h>
30 #include <linux/irq_work.h>
31 #include <linux/llist.h>
32 #include <linux/lockdep.h>
33
34 #include "gem/i915_gem_context_types.h"
35 #include "gt/intel_context_types.h"
36 #include "gt/intel_engine_types.h"
37 #include "gt/intel_timeline_types.h"
38
39 #include "i915_gem.h"
40 #include "i915_scheduler.h"
41 #include "i915_selftest.h"
42 #include "i915_sw_fence.h"
43 #include "i915_vma_resource.h"
44
45 #include <uapi/drm/i915_drm.h>
46
47 struct drm_file;
48 struct drm_i915_gem_object;
49 struct drm_printer;
50 struct i915_deps;
51 struct i915_request;
52
53 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
54 struct i915_capture_list {
55 struct i915_vma_resource *vma_res;
56 struct i915_capture_list *next;
57 };
58
59 void i915_request_free_capture_list(struct i915_capture_list *capture);
60 #else
61 #define i915_request_free_capture_list(_a) do {} while (0)
62 #endif
63
64 #define RQ_TRACE(rq, fmt, ...) do { \
65 const struct i915_request *rq__ = (rq); \
66 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
67 rq__->fence.context, rq__->fence.seqno, \
68 hwsp_seqno(rq__), ##__VA_ARGS__); \
69 } while (0)
70
71 enum {
72 /*
73 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
74 *
75 * Set by __i915_request_submit() on handing over to HW, and cleared
76 * by __i915_request_unsubmit() if we preempt this request.
77 *
78 * Finally cleared for consistency on retiring the request, when
79 * we know the HW is no longer running this request.
80 *
81 * See i915_request_is_active()
82 */
83 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
84
85 /*
86 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
87 *
88 * Using the scheduler, when a request is ready for execution it is put
89 * into the priority queue, and removed from that queue when transferred
90 * to the HW runlists. We want to track its membership within the
91 * priority queue so that we can easily check before rescheduling.
92 *
93 * See i915_request_in_priority_queue()
94 */
95 I915_FENCE_FLAG_PQUEUE,
96
97 /*
98 * I915_FENCE_FLAG_HOLD - this request is currently on hold
99 *
100 * This request has been suspended, pending an ongoing investigation.
101 */
102 I915_FENCE_FLAG_HOLD,
103
104 /*
105 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial
106 * breadcrumb that marks the end of semaphore waits and start of the
107 * user payload.
108 */
109 I915_FENCE_FLAG_INITIAL_BREADCRUMB,
110
111 /*
112 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
113 *
114 * Internal bookkeeping used by the breadcrumb code to track when
115 * a request is on the various signal_list.
116 */
117 I915_FENCE_FLAG_SIGNAL,
118
119 /*
120 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
121 *
122 * The execution of some requests should not be interrupted. This is
123 * a sensitive operation as it makes the request super important,
124 * blocking other higher priority work. Abuse of this flag will
125 * lead to quality of service issues.
126 */
127 I915_FENCE_FLAG_NOPREEMPT,
128
129 /*
130 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
131 *
132 * A high priority sentinel request may be submitted to clear the
133 * submission queue. As it will be the only request in-flight, upon
134 * execution all other active requests will have been preempted and
135 * unsubmitted. This preemptive pulse is used to re-evaluate the
136 * in-flight requests, particularly in cases where an active context
137 * is banned and those active requests need to be cancelled.
138 */
139 I915_FENCE_FLAG_SENTINEL,
140
141 /*
142 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
143 *
144 * Some requests are more important than others! In particular, a
145 * request that the user is waiting on is typically required for
146 * interactive latency, for which we want to minimise by upclocking
147 * the GPU. Here we track such boost requests on a per-request basis.
148 */
149 I915_FENCE_FLAG_BOOST,
150
151 /*
152 * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a
153 * parent-child relationship (parallel submission, multi-lrc) should
154 * trigger a submission to the GuC rather than just moving the context
155 * tail.
156 */
157 I915_FENCE_FLAG_SUBMIT_PARALLEL,
158
159 /*
160 * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a
161 * parent-child relationship (parallel submission, multi-lrc) that
162 * hit an error while generating requests in the execbuf IOCTL.
163 * Indicates this request should be skipped as another request in
164 * submission / relationship encoutered an error.
165 */
166 I915_FENCE_FLAG_SKIP_PARALLEL,
167
168 /*
169 * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite
170 * fence (dma_fence_array) and i915 generated for parallel submission.
171 */
172 I915_FENCE_FLAG_COMPOSITE,
173 };
174
175 /**
176 * Request queue structure.
177 *
178 * The request queue allows us to note sequence numbers that have been emitted
179 * and may be associated with active buffers to be retired.
180 *
181 * By keeping this list, we can avoid having to do questionable sequence
182 * number comparisons on buffer last_read|write_seqno. It also allows an
183 * emission time to be associated with the request for tracking how far ahead
184 * of the GPU the submission is.
185 *
186 * When modifying this structure be very aware that we perform a lockless
187 * RCU lookup of it that may race against reallocation of the struct
188 * from the slab freelist. We intentionally do not zero the structure on
189 * allocation so that the lookup can use the dangling pointers (and is
190 * cogniscent that those pointers may be wrong). Instead, everything that
191 * needs to be initialised must be done so explicitly.
192 *
193 * The requests are reference counted.
194 */
195 struct i915_request {
196 struct dma_fence fence;
197 spinlock_t lock;
198
199 /**
200 * Context and ring buffer related to this request
201 * Contexts are refcounted, so when this request is associated with a
202 * context, we must increment the context's refcount, to guarantee that
203 * it persists while any request is linked to it. Requests themselves
204 * are also refcounted, so the request will only be freed when the last
205 * reference to it is dismissed, and the code in
206 * i915_request_free() will then decrement the refcount on the
207 * context.
208 */
209 struct intel_engine_cs *engine;
210 struct intel_context *context;
211 struct intel_ring *ring;
212 struct intel_timeline __rcu *timeline;
213
214 struct list_head signal_link;
215 struct llist_node signal_node;
216
217 /*
218 * The rcu epoch of when this request was allocated. Used to judiciously
219 * apply backpressure on future allocations to ensure that under
220 * mempressure there is sufficient RCU ticks for us to reclaim our
221 * RCU protected slabs.
222 */
223 unsigned long rcustate;
224
225 /*
226 * We pin the timeline->mutex while constructing the request to
227 * ensure that no caller accidentally drops it during construction.
228 * The timeline->mutex must be held to ensure that only this caller
229 * can use the ring and manipulate the associated timeline during
230 * construction.
231 */
232 struct pin_cookie cookie;
233
234 /*
235 * Fences for the various phases in the request's lifetime.
236 *
237 * The submit fence is used to await upon all of the request's
238 * dependencies. When it is signaled, the request is ready to run.
239 * It is used by the driver to then queue the request for execution.
240 */
241 struct i915_sw_fence submit;
242 union {
243 wait_queue_entry_t submitq;
244 struct i915_sw_dma_fence_cb dmaq;
245 struct i915_request_duration_cb {
246 struct dma_fence_cb cb;
247 ktime_t emitted;
248 } duration;
249 };
250 struct llist_head execute_cb;
251 struct i915_sw_fence semaphore;
252 /**
253 * @submit_work: complete submit fence from an IRQ if needed for
254 * locking hierarchy reasons.
255 */
256 struct irq_work submit_work;
257
258 /*
259 * A list of everyone we wait upon, and everyone who waits upon us.
260 * Even though we will not be submitted to the hardware before the
261 * submit fence is signaled (it waits for all external events as well
262 * as our own requests), the scheduler still needs to know the
263 * dependency tree for the lifetime of the request (from execbuf
264 * to retirement), i.e. bidirectional dependency information for the
265 * request not tied to individual fences.
266 */
267 struct i915_sched_node sched;
268 struct i915_dependency dep;
269 intel_engine_mask_t execution_mask;
270
271 /*
272 * A convenience pointer to the current breadcrumb value stored in
273 * the HW status page (or our timeline's local equivalent). The full
274 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
275 */
276 const u32 *hwsp_seqno;
277
278 /** Position in the ring of the start of the request */
279 u32 head;
280
281 /** Position in the ring of the start of the user packets */
282 u32 infix;
283
284 /**
285 * Position in the ring of the start of the postfix.
286 * This is required to calculate the maximum available ring space
287 * without overwriting the postfix.
288 */
289 u32 postfix;
290
291 /** Position in the ring of the end of the whole request */
292 u32 tail;
293
294 /** Position in the ring of the end of any workarounds after the tail */
295 u32 wa_tail;
296
297 /** Preallocate space in the ring for the emitting the request */
298 u32 reserved_space;
299
300 /** Batch buffer pointer for selftest internal use. */
301 I915_SELFTEST_DECLARE(struct i915_vma *batch);
302
303 struct i915_vma_resource *batch_res;
304
305 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
306 /**
307 * Additional buffers requested by userspace to be captured upon
308 * a GPU hang. The vma/obj on this list are protected by their
309 * active reference - all objects on this list must also be
310 * on the active_list (of their final request).
311 */
312 struct i915_capture_list *capture_list;
313 #endif
314
315 /** Time at which this request was emitted, in jiffies. */
316 unsigned long emitted_jiffies;
317
318 /** timeline->request entry for this request */
319 struct list_head link;
320
321 /** Watchdog support fields. */
322 struct i915_request_watchdog {
323 struct llist_node link;
324 struct hrtimer timer;
325 } watchdog;
326
327 /**
328 * @guc_fence_link: Requests may need to be stalled when using GuC
329 * submission waiting for certain GuC operations to complete. If that is
330 * the case, stalled requests are added to a per context list of stalled
331 * requests. The below list_head is the link in that list. Protected by
332 * ce->guc_state.lock.
333 */
334 struct list_head guc_fence_link;
335
336 /**
337 * @guc_prio: Priority level while the request is in flight. Differs
338 * from i915 scheduler priority. See comment above
339 * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by
340 * ce->guc_active.lock. Two special values (GUC_PRIO_INIT and
341 * GUC_PRIO_FINI) outside the GuC priority range are used to indicate
342 * if the priority has not been initialized yet or if no more updates
343 * are possible because the request has completed.
344 */
345 #define GUC_PRIO_INIT 0xff
346 #define GUC_PRIO_FINI 0xfe
347 u8 guc_prio;
348
349 I915_SELFTEST_DECLARE(struct {
350 struct list_head link;
351 unsigned long delay;
352 } mock;)
353 };
354
355 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
356
357 extern const struct dma_fence_ops i915_fence_ops;
358
dma_fence_is_i915(const struct dma_fence * fence)359 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
360 {
361 return fence->ops == &i915_fence_ops;
362 }
363
364 struct kmem_cache *i915_request_slab_cache(void);
365
366 struct i915_request * __must_check
367 __i915_request_create(struct intel_context *ce, gfp_t gfp);
368 struct i915_request * __must_check
369 i915_request_create(struct intel_context *ce);
370
371 void __i915_request_skip(struct i915_request *rq);
372 bool i915_request_set_error_once(struct i915_request *rq, int error);
373 struct i915_request *i915_request_mark_eio(struct i915_request *rq);
374
375 struct i915_request *__i915_request_commit(struct i915_request *request);
376 void __i915_request_queue(struct i915_request *rq,
377 const struct i915_sched_attr *attr);
378 void __i915_request_queue_bh(struct i915_request *rq);
379
380 bool i915_request_retire(struct i915_request *rq);
381 void i915_request_retire_upto(struct i915_request *rq);
382
383 static inline struct i915_request *
to_request(struct dma_fence * fence)384 to_request(struct dma_fence *fence)
385 {
386 /* We assume that NULL fence/request are interoperable */
387 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
388 GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
389 return container_of(fence, struct i915_request, fence);
390 }
391
392 static inline struct i915_request *
i915_request_get(struct i915_request * rq)393 i915_request_get(struct i915_request *rq)
394 {
395 return to_request(dma_fence_get(&rq->fence));
396 }
397
398 static inline struct i915_request *
i915_request_get_rcu(struct i915_request * rq)399 i915_request_get_rcu(struct i915_request *rq)
400 {
401 return to_request(dma_fence_get_rcu(&rq->fence));
402 }
403
404 static inline void
i915_request_put(struct i915_request * rq)405 i915_request_put(struct i915_request *rq)
406 {
407 dma_fence_put(&rq->fence);
408 }
409
410 int i915_request_await_object(struct i915_request *to,
411 struct drm_i915_gem_object *obj,
412 bool write);
413 int i915_request_await_dma_fence(struct i915_request *rq,
414 struct dma_fence *fence);
415 int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps);
416 int i915_request_await_execution(struct i915_request *rq,
417 struct dma_fence *fence);
418
419 void i915_request_add(struct i915_request *rq);
420
421 bool __i915_request_submit(struct i915_request *request);
422 void i915_request_submit(struct i915_request *request);
423
424 void __i915_request_unsubmit(struct i915_request *request);
425 void i915_request_unsubmit(struct i915_request *request);
426
427 void i915_request_cancel(struct i915_request *rq, int error);
428
429 long i915_request_wait_timeout(struct i915_request *rq,
430 unsigned int flags,
431 long timeout)
432 __attribute__((nonnull(1)));
433
434 long i915_request_wait(struct i915_request *rq,
435 unsigned int flags,
436 long timeout)
437 __attribute__((nonnull(1)));
438 #define I915_WAIT_INTERRUPTIBLE BIT(0)
439 #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */
440 #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
441
442 void i915_request_show(struct drm_printer *m,
443 const struct i915_request *rq,
444 const char *prefix,
445 int indent);
446
i915_request_signaled(const struct i915_request * rq)447 static inline bool i915_request_signaled(const struct i915_request *rq)
448 {
449 /* The request may live longer than its HWSP, so check flags first! */
450 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
451 }
452
i915_request_is_active(const struct i915_request * rq)453 static inline bool i915_request_is_active(const struct i915_request *rq)
454 {
455 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
456 }
457
i915_request_in_priority_queue(const struct i915_request * rq)458 static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
459 {
460 return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
461 }
462
463 static inline bool
i915_request_has_initial_breadcrumb(const struct i915_request * rq)464 i915_request_has_initial_breadcrumb(const struct i915_request *rq)
465 {
466 return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
467 }
468
469 /**
470 * Returns true if seq1 is later than seq2.
471 */
i915_seqno_passed(u32 seq1,u32 seq2)472 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
473 {
474 return (s32)(seq1 - seq2) >= 0;
475 }
476
__hwsp_seqno(const struct i915_request * rq)477 static inline u32 __hwsp_seqno(const struct i915_request *rq)
478 {
479 const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
480
481 return READ_ONCE(*hwsp);
482 }
483
484 /**
485 * hwsp_seqno - the current breadcrumb value in the HW status page
486 * @rq: the request, to chase the relevant HW status page
487 *
488 * The emphasis in naming here is that hwsp_seqno() is not a property of the
489 * request, but an indication of the current HW state (associated with this
490 * request). Its value will change as the GPU executes more requests.
491 *
492 * Returns the current breadcrumb value in the associated HW status page (or
493 * the local timeline's equivalent) for this request. The request itself
494 * has the associated breadcrumb value of rq->fence.seqno, when the HW
495 * status page has that breadcrumb or later, this request is complete.
496 */
hwsp_seqno(const struct i915_request * rq)497 static inline u32 hwsp_seqno(const struct i915_request *rq)
498 {
499 u32 seqno;
500
501 rcu_read_lock(); /* the HWSP may be freed at runtime */
502 seqno = __hwsp_seqno(rq);
503 rcu_read_unlock();
504
505 return seqno;
506 }
507
__i915_request_has_started(const struct i915_request * rq)508 static inline bool __i915_request_has_started(const struct i915_request *rq)
509 {
510 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
511 }
512
513 /**
514 * i915_request_started - check if the request has begun being executed
515 * @rq: the request
516 *
517 * If the timeline is not using initial breadcrumbs, a request is
518 * considered started if the previous request on its timeline (i.e.
519 * context) has been signaled.
520 *
521 * If the timeline is using semaphores, it will also be emitting an
522 * "initial breadcrumb" after the semaphores are complete and just before
523 * it began executing the user payload. A request can therefore be active
524 * on the HW and not yet started as it is still busywaiting on its
525 * dependencies (via HW semaphores).
526 *
527 * If the request has started, its dependencies will have been signaled
528 * (either by fences or by semaphores) and it will have begun processing
529 * the user payload.
530 *
531 * However, even if a request has started, it may have been preempted and
532 * so no longer active, or it may have already completed.
533 *
534 * See also i915_request_is_active().
535 *
536 * Returns true if the request has begun executing the user payload, or
537 * has completed:
538 */
i915_request_started(const struct i915_request * rq)539 static inline bool i915_request_started(const struct i915_request *rq)
540 {
541 bool result;
542
543 if (i915_request_signaled(rq))
544 return true;
545
546 result = true;
547 rcu_read_lock(); /* the HWSP may be freed at runtime */
548 if (likely(!i915_request_signaled(rq)))
549 /* Remember: started but may have since been preempted! */
550 result = __i915_request_has_started(rq);
551 rcu_read_unlock();
552
553 return result;
554 }
555
556 /**
557 * i915_request_is_running - check if the request may actually be executing
558 * @rq: the request
559 *
560 * Returns true if the request is currently submitted to hardware, has passed
561 * its start point (i.e. the context is setup and not busywaiting). Note that
562 * it may no longer be running by the time the function returns!
563 */
i915_request_is_running(const struct i915_request * rq)564 static inline bool i915_request_is_running(const struct i915_request *rq)
565 {
566 bool result;
567
568 if (!i915_request_is_active(rq))
569 return false;
570
571 rcu_read_lock();
572 result = __i915_request_has_started(rq) && i915_request_is_active(rq);
573 rcu_read_unlock();
574
575 return result;
576 }
577
578 /**
579 * i915_request_is_ready - check if the request is ready for execution
580 * @rq: the request
581 *
582 * Upon construction, the request is instructed to wait upon various
583 * signals before it is ready to be executed by the HW. That is, we do
584 * not want to start execution and read data before it is written. In practice,
585 * this is controlled with a mixture of interrupts and semaphores. Once
586 * the submit fence is completed, the backend scheduler will place the
587 * request into its queue and from there submit it for execution. So we
588 * can detect when a request is eligible for execution (and is under control
589 * of the scheduler) by querying where it is in any of the scheduler's lists.
590 *
591 * Returns true if the request is ready for execution (it may be inflight),
592 * false otherwise.
593 */
i915_request_is_ready(const struct i915_request * rq)594 static inline bool i915_request_is_ready(const struct i915_request *rq)
595 {
596 return !list_empty(&rq->sched.link);
597 }
598
__i915_request_is_complete(const struct i915_request * rq)599 static inline bool __i915_request_is_complete(const struct i915_request *rq)
600 {
601 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
602 }
603
i915_request_completed(const struct i915_request * rq)604 static inline bool i915_request_completed(const struct i915_request *rq)
605 {
606 bool result;
607
608 if (i915_request_signaled(rq))
609 return true;
610
611 result = true;
612 rcu_read_lock(); /* the HWSP may be freed at runtime */
613 if (likely(!i915_request_signaled(rq)))
614 result = __i915_request_is_complete(rq);
615 rcu_read_unlock();
616
617 return result;
618 }
619
i915_request_mark_complete(struct i915_request * rq)620 static inline void i915_request_mark_complete(struct i915_request *rq)
621 {
622 WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
623 (u32 *)&rq->fence.seqno);
624 }
625
i915_request_has_waitboost(const struct i915_request * rq)626 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
627 {
628 return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
629 }
630
i915_request_has_nopreempt(const struct i915_request * rq)631 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
632 {
633 /* Preemption should only be disabled very rarely */
634 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
635 }
636
i915_request_has_sentinel(const struct i915_request * rq)637 static inline bool i915_request_has_sentinel(const struct i915_request *rq)
638 {
639 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
640 }
641
i915_request_on_hold(const struct i915_request * rq)642 static inline bool i915_request_on_hold(const struct i915_request *rq)
643 {
644 return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
645 }
646
i915_request_set_hold(struct i915_request * rq)647 static inline void i915_request_set_hold(struct i915_request *rq)
648 {
649 set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
650 }
651
i915_request_clear_hold(struct i915_request * rq)652 static inline void i915_request_clear_hold(struct i915_request *rq)
653 {
654 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
655 }
656
657 static inline struct intel_timeline *
i915_request_timeline(const struct i915_request * rq)658 i915_request_timeline(const struct i915_request *rq)
659 {
660 /* Valid only while the request is being constructed (or retired). */
661 return rcu_dereference_protected(rq->timeline,
662 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
663 test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
664 }
665
666 static inline struct i915_gem_context *
i915_request_gem_context(const struct i915_request * rq)667 i915_request_gem_context(const struct i915_request *rq)
668 {
669 /* Valid only while the request is being constructed (or retired). */
670 return rcu_dereference_protected(rq->context->gem_context, true);
671 }
672
673 static inline struct intel_timeline *
i915_request_active_timeline(const struct i915_request * rq)674 i915_request_active_timeline(const struct i915_request *rq)
675 {
676 /*
677 * When in use during submission, we are protected by a guarantee that
678 * the context/timeline is pinned and must remain pinned until after
679 * this submission.
680 */
681 return rcu_dereference_protected(rq->timeline,
682 lockdep_is_held(&rq->engine->sched_engine->lock));
683 }
684
685 static inline u32
i915_request_active_seqno(const struct i915_request * rq)686 i915_request_active_seqno(const struct i915_request *rq)
687 {
688 u32 hwsp_phys_base =
689 page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset);
690 u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno);
691
692 /*
693 * Because of wraparound, we cannot simply take tl->hwsp_offset,
694 * but instead use the fact that the relative for vaddr is the
695 * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset
696 * and combine them with the relative offset in rq->hwsp_seqno.
697 *
698 * As rw->hwsp_seqno is rewritten when signaled, this only works
699 * when the request isn't signaled yet, but at that point you
700 * no longer need the offset.
701 */
702
703 return hwsp_phys_base + hwsp_relative_offset;
704 }
705
706 bool
707 i915_request_active_engine(struct i915_request *rq,
708 struct intel_engine_cs **active);
709
710 void i915_request_notify_execute_cb_imm(struct i915_request *rq);
711
712 enum i915_request_state {
713 I915_REQUEST_UNKNOWN = 0,
714 I915_REQUEST_COMPLETE,
715 I915_REQUEST_PENDING,
716 I915_REQUEST_QUEUED,
717 I915_REQUEST_ACTIVE,
718 };
719
720 enum i915_request_state i915_test_request_state(struct i915_request *rq);
721
722 void i915_request_module_exit(void);
723 int i915_request_module_init(void);
724
725 #endif /* I915_REQUEST_H */
726