1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/completion.h>
27
28 #include <drm/drm_print.h>
29 #include <drm/gpu_scheduler.h>
30
31 #include "gpu_scheduler_trace.h"
32
33 #define to_drm_sched_job(sched_job) \
34 container_of((sched_job), struct drm_sched_job, queue_node)
35
36 /**
37 * drm_sched_entity_init - Init a context entity used by scheduler when
38 * submit to HW ring.
39 *
40 * @entity: scheduler entity to init
41 * @priority: priority of the entity
42 * @sched_list: the list of drm scheds on which jobs from this
43 * entity can be submitted
44 * @num_sched_list: number of drm sched in sched_list
45 * @guilty: atomic_t set to 1 when a job on this queue
46 * is found to be guilty causing a timeout
47 *
48 * Note that the &sched_list must have at least one element to schedule the entity.
49 *
50 * For changing @priority later on at runtime see
51 * drm_sched_entity_set_priority(). For changing the set of schedulers
52 * @sched_list at runtime see drm_sched_entity_modify_sched().
53 *
54 * An entity is cleaned up by callind drm_sched_entity_fini(). See also
55 * drm_sched_entity_destroy().
56 *
57 * Returns 0 on success or a negative error code on failure.
58 */
drm_sched_entity_init(struct drm_sched_entity * entity,enum drm_sched_priority priority,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list,atomic_t * guilty)59 int drm_sched_entity_init(struct drm_sched_entity *entity,
60 enum drm_sched_priority priority,
61 struct drm_gpu_scheduler **sched_list,
62 unsigned int num_sched_list,
63 atomic_t *guilty)
64 {
65 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
66 return -EINVAL;
67
68 memset(entity, 0, sizeof(struct drm_sched_entity));
69 INIT_LIST_HEAD(&entity->list);
70 entity->rq = NULL;
71 entity->guilty = guilty;
72 entity->num_sched_list = num_sched_list;
73 entity->priority = priority;
74 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
75 RCU_INIT_POINTER(entity->last_scheduled, NULL);
76 RB_CLEAR_NODE(&entity->rb_tree_node);
77
78 if(num_sched_list)
79 entity->rq = &sched_list[0]->sched_rq[entity->priority];
80
81 init_completion(&entity->entity_idle);
82
83 /* We start in an idle state. */
84 complete_all(&entity->entity_idle);
85
86 spin_lock_init(&entity->rq_lock);
87 spsc_queue_init(&entity->job_queue);
88
89 atomic_set(&entity->fence_seq, 0);
90 entity->fence_context = dma_fence_context_alloc(2);
91
92 return 0;
93 }
94 EXPORT_SYMBOL(drm_sched_entity_init);
95
96 /**
97 * drm_sched_entity_modify_sched - Modify sched of an entity
98 * @entity: scheduler entity to init
99 * @sched_list: the list of new drm scheds which will replace
100 * existing entity->sched_list
101 * @num_sched_list: number of drm sched in sched_list
102 *
103 * Note that this must be called under the same common lock for @entity as
104 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
105 * guarantee through some other means that this is never called while new jobs
106 * can be pushed to @entity.
107 */
drm_sched_entity_modify_sched(struct drm_sched_entity * entity,struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)108 void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
109 struct drm_gpu_scheduler **sched_list,
110 unsigned int num_sched_list)
111 {
112 WARN_ON(!num_sched_list || !sched_list);
113
114 entity->sched_list = sched_list;
115 entity->num_sched_list = num_sched_list;
116 }
117 EXPORT_SYMBOL(drm_sched_entity_modify_sched);
118
drm_sched_entity_is_idle(struct drm_sched_entity * entity)119 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
120 {
121 rmb(); /* for list_empty to work without lock */
122
123 if (list_empty(&entity->list) ||
124 spsc_queue_count(&entity->job_queue) == 0 ||
125 entity->stopped)
126 return true;
127
128 return false;
129 }
130
131 /* Return true if entity could provide a job. */
drm_sched_entity_is_ready(struct drm_sched_entity * entity)132 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
133 {
134 if (spsc_queue_peek(&entity->job_queue) == NULL)
135 return false;
136
137 if (READ_ONCE(entity->dependency))
138 return false;
139
140 return true;
141 }
142
143 /**
144 * drm_sched_entity_error - return error of last scheduled job
145 * @entity: scheduler entity to check
146 *
147 * Opportunistically return the error of the last scheduled job. Result can
148 * change any time when new jobs are pushed to the hw.
149 */
drm_sched_entity_error(struct drm_sched_entity * entity)150 int drm_sched_entity_error(struct drm_sched_entity *entity)
151 {
152 struct dma_fence *fence;
153 int r;
154
155 rcu_read_lock();
156 fence = rcu_dereference(entity->last_scheduled);
157 r = fence ? fence->error : 0;
158 rcu_read_unlock();
159
160 return r;
161 }
162 EXPORT_SYMBOL(drm_sched_entity_error);
163
drm_sched_entity_kill_jobs_work(struct work_struct * wrk)164 static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
165 {
166 struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
167
168 drm_sched_fence_finished(job->s_fence, -ESRCH);
169 WARN_ON(job->s_fence->parent);
170 job->sched->ops->free_job(job);
171 }
172
173 /* Signal the scheduler finished fence when the entity in question is killed. */
drm_sched_entity_kill_jobs_cb(struct dma_fence * f,struct dma_fence_cb * cb)174 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
175 struct dma_fence_cb *cb)
176 {
177 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
178 finish_cb);
179 unsigned long index;
180
181 dma_fence_put(f);
182
183 /* Wait for all dependencies to avoid data corruptions */
184 xa_for_each(&job->dependencies, index, f) {
185 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
186
187 if (s_fence && f == &s_fence->scheduled) {
188 /* The dependencies array had a reference on the scheduled
189 * fence, and the finished fence refcount might have
190 * dropped to zero. Use dma_fence_get_rcu() so we get
191 * a NULL fence in that case.
192 */
193 f = dma_fence_get_rcu(&s_fence->finished);
194
195 /* Now that we have a reference on the finished fence,
196 * we can release the reference the dependencies array
197 * had on the scheduled fence.
198 */
199 dma_fence_put(&s_fence->scheduled);
200 }
201
202 xa_erase(&job->dependencies, index);
203 if (f && !dma_fence_add_callback(f, &job->finish_cb,
204 drm_sched_entity_kill_jobs_cb))
205 return;
206
207 dma_fence_put(f);
208 }
209
210 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
211 schedule_work(&job->work);
212 }
213
214 /* Remove the entity from the scheduler and kill all pending jobs */
drm_sched_entity_kill(struct drm_sched_entity * entity)215 static void drm_sched_entity_kill(struct drm_sched_entity *entity)
216 {
217 struct drm_sched_job *job;
218 struct dma_fence *prev;
219
220 if (!entity->rq)
221 return;
222
223 spin_lock(&entity->rq_lock);
224 entity->stopped = true;
225 drm_sched_rq_remove_entity(entity->rq, entity);
226 spin_unlock(&entity->rq_lock);
227
228 /* Make sure this entity is not used by the scheduler at the moment */
229 wait_for_completion(&entity->entity_idle);
230
231 /* The entity is guaranteed to not be used by the scheduler */
232 prev = rcu_dereference_check(entity->last_scheduled, true);
233 dma_fence_get(prev);
234 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
235 struct drm_sched_fence *s_fence = job->s_fence;
236
237 dma_fence_get(&s_fence->finished);
238 if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
239 drm_sched_entity_kill_jobs_cb))
240 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
241
242 prev = &s_fence->finished;
243 }
244 dma_fence_put(prev);
245 }
246
247 /**
248 * drm_sched_entity_flush - Flush a context entity
249 *
250 * @entity: scheduler entity
251 * @timeout: time to wait in for Q to become empty in jiffies.
252 *
253 * Splitting drm_sched_entity_fini() into two functions, The first one does the
254 * waiting, removes the entity from the runqueue and returns an error when the
255 * process was killed.
256 *
257 * Returns the remaining time in jiffies left from the input timeout
258 */
drm_sched_entity_flush(struct drm_sched_entity * entity,long timeout)259 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
260 {
261 struct drm_gpu_scheduler *sched;
262 struct task_struct *last_user;
263 long ret = timeout;
264
265 if (!entity->rq)
266 return 0;
267
268 sched = entity->rq->sched;
269 /**
270 * The client will not queue more IBs during this fini, consume existing
271 * queued IBs or discard them on SIGKILL
272 */
273 if (current->flags & PF_EXITING) {
274 if (timeout)
275 ret = wait_event_timeout(
276 sched->job_scheduled,
277 drm_sched_entity_is_idle(entity),
278 timeout);
279 } else {
280 wait_event_killable(sched->job_scheduled,
281 drm_sched_entity_is_idle(entity));
282 }
283
284 /* For killed process disable any more IBs enqueue right now */
285 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
286 if ((!last_user || last_user == current->group_leader) &&
287 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
288 drm_sched_entity_kill(entity);
289
290 return ret;
291 }
292 EXPORT_SYMBOL(drm_sched_entity_flush);
293
294 /**
295 * drm_sched_entity_fini - Destroy a context entity
296 *
297 * @entity: scheduler entity
298 *
299 * Cleanups up @entity which has been initialized by drm_sched_entity_init().
300 *
301 * If there are potentially job still in flight or getting newly queued
302 * drm_sched_entity_flush() must be called first. This function then goes over
303 * the entity and signals all jobs with an error code if the process was killed.
304 */
drm_sched_entity_fini(struct drm_sched_entity * entity)305 void drm_sched_entity_fini(struct drm_sched_entity *entity)
306 {
307 /*
308 * If consumption of existing IBs wasn't completed. Forcefully remove
309 * them here. Also makes sure that the scheduler won't touch this entity
310 * any more.
311 */
312 drm_sched_entity_kill(entity);
313
314 if (entity->dependency) {
315 dma_fence_remove_callback(entity->dependency, &entity->cb);
316 dma_fence_put(entity->dependency);
317 entity->dependency = NULL;
318 }
319
320 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
321 RCU_INIT_POINTER(entity->last_scheduled, NULL);
322 }
323 EXPORT_SYMBOL(drm_sched_entity_fini);
324
325 /**
326 * drm_sched_entity_destroy - Destroy a context entity
327 * @entity: scheduler entity
328 *
329 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
330 * convenience wrapper.
331 */
drm_sched_entity_destroy(struct drm_sched_entity * entity)332 void drm_sched_entity_destroy(struct drm_sched_entity *entity)
333 {
334 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
335 drm_sched_entity_fini(entity);
336 }
337 EXPORT_SYMBOL(drm_sched_entity_destroy);
338
339 /* drm_sched_entity_clear_dep - callback to clear the entities dependency */
drm_sched_entity_clear_dep(struct dma_fence * f,struct dma_fence_cb * cb)340 static void drm_sched_entity_clear_dep(struct dma_fence *f,
341 struct dma_fence_cb *cb)
342 {
343 struct drm_sched_entity *entity =
344 container_of(cb, struct drm_sched_entity, cb);
345
346 entity->dependency = NULL;
347 dma_fence_put(f);
348 }
349
350 /*
351 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
352 * wake up scheduler
353 */
drm_sched_entity_wakeup(struct dma_fence * f,struct dma_fence_cb * cb)354 static void drm_sched_entity_wakeup(struct dma_fence *f,
355 struct dma_fence_cb *cb)
356 {
357 struct drm_sched_entity *entity =
358 container_of(cb, struct drm_sched_entity, cb);
359
360 drm_sched_entity_clear_dep(f, cb);
361 drm_sched_wakeup_if_can_queue(entity->rq->sched);
362 }
363
364 /**
365 * drm_sched_entity_set_priority - Sets priority of the entity
366 *
367 * @entity: scheduler entity
368 * @priority: scheduler priority
369 *
370 * Update the priority of runqueus used for the entity.
371 */
drm_sched_entity_set_priority(struct drm_sched_entity * entity,enum drm_sched_priority priority)372 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
373 enum drm_sched_priority priority)
374 {
375 spin_lock(&entity->rq_lock);
376 entity->priority = priority;
377 spin_unlock(&entity->rq_lock);
378 }
379 EXPORT_SYMBOL(drm_sched_entity_set_priority);
380
381 /*
382 * Add a callback to the current dependency of the entity to wake up the
383 * scheduler when the entity becomes available.
384 */
drm_sched_entity_add_dependency_cb(struct drm_sched_entity * entity)385 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
386 {
387 struct drm_gpu_scheduler *sched = entity->rq->sched;
388 struct dma_fence *fence = entity->dependency;
389 struct drm_sched_fence *s_fence;
390
391 if (fence->context == entity->fence_context ||
392 fence->context == entity->fence_context + 1) {
393 /*
394 * Fence is a scheduled/finished fence from a job
395 * which belongs to the same entity, we can ignore
396 * fences from ourself
397 */
398 dma_fence_put(entity->dependency);
399 return false;
400 }
401
402 s_fence = to_drm_sched_fence(fence);
403 if (!fence->error && s_fence && s_fence->sched == sched &&
404 !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
405
406 /*
407 * Fence is from the same scheduler, only need to wait for
408 * it to be scheduled
409 */
410 fence = dma_fence_get(&s_fence->scheduled);
411 dma_fence_put(entity->dependency);
412 entity->dependency = fence;
413 if (!dma_fence_add_callback(fence, &entity->cb,
414 drm_sched_entity_clear_dep))
415 return true;
416
417 /* Ignore it when it is already scheduled */
418 dma_fence_put(fence);
419 return false;
420 }
421
422 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
423 drm_sched_entity_wakeup))
424 return true;
425
426 dma_fence_put(entity->dependency);
427 return false;
428 }
429
430 static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job * job,struct drm_sched_entity * entity)431 drm_sched_job_dependency(struct drm_sched_job *job,
432 struct drm_sched_entity *entity)
433 {
434 struct dma_fence *f;
435
436 /* We keep the fence around, so we can iterate over all dependencies
437 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
438 * before killing the job.
439 */
440 f = xa_load(&job->dependencies, job->last_dependency);
441 if (f) {
442 job->last_dependency++;
443 return dma_fence_get(f);
444 }
445
446 if (job->sched->ops->prepare_job)
447 return job->sched->ops->prepare_job(job, entity);
448
449 return NULL;
450 }
451
drm_sched_entity_pop_job(struct drm_sched_entity * entity)452 struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
453 {
454 struct drm_sched_job *sched_job;
455
456 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
457 if (!sched_job)
458 return NULL;
459
460 while ((entity->dependency =
461 drm_sched_job_dependency(sched_job, entity))) {
462 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
463
464 if (drm_sched_entity_add_dependency_cb(entity))
465 return NULL;
466 }
467
468 /* skip jobs from entity that marked guilty */
469 if (entity->guilty && atomic_read(entity->guilty))
470 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
471
472 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
473 rcu_assign_pointer(entity->last_scheduled,
474 dma_fence_get(&sched_job->s_fence->finished));
475
476 /*
477 * If the queue is empty we allow drm_sched_entity_select_rq() to
478 * locklessly access ->last_scheduled. This only works if we set the
479 * pointer before we dequeue and if we a write barrier here.
480 */
481 smp_wmb();
482
483 spsc_queue_pop(&entity->job_queue);
484
485 /*
486 * Update the entity's location in the min heap according to
487 * the timestamp of the next job, if any.
488 */
489 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
490 struct drm_sched_job *next;
491
492 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
493 if (next)
494 drm_sched_rq_update_fifo(entity, next->submit_ts);
495 }
496
497 /* Jobs and entities might have different lifecycles. Since we're
498 * removing the job from the entities queue, set the jobs entity pointer
499 * to NULL to prevent any future access of the entity through this job.
500 */
501 sched_job->entity = NULL;
502
503 return sched_job;
504 }
505
drm_sched_entity_select_rq(struct drm_sched_entity * entity)506 void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
507 {
508 struct dma_fence *fence;
509 struct drm_gpu_scheduler *sched;
510 struct drm_sched_rq *rq;
511
512 /* single possible engine and already selected */
513 if (!entity->sched_list)
514 return;
515
516 /* queue non-empty, stay on the same engine */
517 if (spsc_queue_count(&entity->job_queue))
518 return;
519
520 /*
521 * Only when the queue is empty are we guaranteed that the scheduler
522 * thread cannot change ->last_scheduled. To enforce ordering we need
523 * a read barrier here. See drm_sched_entity_pop_job() for the other
524 * side.
525 */
526 smp_rmb();
527
528 fence = rcu_dereference_check(entity->last_scheduled, true);
529
530 /* stay on the same engine if the previous job hasn't finished */
531 if (fence && !dma_fence_is_signaled(fence))
532 return;
533
534 spin_lock(&entity->rq_lock);
535 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
536 rq = sched ? &sched->sched_rq[entity->priority] : NULL;
537 if (rq != entity->rq) {
538 drm_sched_rq_remove_entity(entity->rq, entity);
539 entity->rq = rq;
540 }
541 spin_unlock(&entity->rq_lock);
542
543 if (entity->num_sched_list == 1)
544 entity->sched_list = NULL;
545 }
546
547 /**
548 * drm_sched_entity_push_job - Submit a job to the entity's job queue
549 * @sched_job: job to submit
550 *
551 * Note: To guarantee that the order of insertion to queue matches the job's
552 * fence sequence number this function should be called with drm_sched_job_arm()
553 * under common lock for the struct drm_sched_entity that was set up for
554 * @sched_job in drm_sched_job_init().
555 *
556 * Returns 0 for success, negative error code otherwise.
557 */
drm_sched_entity_push_job(struct drm_sched_job * sched_job)558 void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
559 {
560 struct drm_sched_entity *entity = sched_job->entity;
561 bool first;
562 ktime_t submit_ts;
563
564 trace_drm_sched_job(sched_job, entity);
565 atomic_inc(entity->rq->sched->score);
566 WRITE_ONCE(entity->last_user, current->group_leader);
567
568 /*
569 * After the sched_job is pushed into the entity queue, it may be
570 * completed and freed up at any time. We can no longer access it.
571 * Make sure to set the submit_ts first, to avoid a race.
572 */
573 sched_job->submit_ts = submit_ts = ktime_get();
574 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
575
576 /* first job wakes up scheduler */
577 if (first) {
578 /* Add the entity to the run queue */
579 spin_lock(&entity->rq_lock);
580 if (entity->stopped) {
581 spin_unlock(&entity->rq_lock);
582
583 DRM_ERROR("Trying to push to a killed entity\n");
584 return;
585 }
586
587 drm_sched_rq_add_entity(entity->rq, entity);
588 spin_unlock(&entity->rq_lock);
589
590 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
591 drm_sched_rq_update_fifo(entity, submit_ts);
592
593 drm_sched_wakeup_if_can_queue(entity->rq->sched);
594 }
595 }
596 EXPORT_SYMBOL(drm_sched_entity_push_job);
597