1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46 
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #include <linux/dma-resv.h>
52 #include <uapi/linux/sched/types.h>
53 
54 #include <drm/drm_print.h>
55 #include <drm/drm_gem.h>
56 #include <drm/gpu_scheduler.h>
57 #include <drm/spsc_queue.h>
58 
59 #define CREATE_TRACE_POINTS
60 #include "gpu_scheduler_trace.h"
61 
62 #define to_drm_sched_job(sched_job)		\
63 		container_of((sched_job), struct drm_sched_job, queue_node)
64 
65 /**
66  * drm_sched_rq_init - initialize a given run queue struct
67  *
68  * @sched: scheduler instance to associate with this run queue
69  * @rq: scheduler run queue
70  *
71  * Initializes a scheduler runqueue.
72  */
drm_sched_rq_init(struct drm_gpu_scheduler * sched,struct drm_sched_rq * rq)73 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
74 			      struct drm_sched_rq *rq)
75 {
76 	spin_lock_init(&rq->lock);
77 	INIT_LIST_HEAD(&rq->entities);
78 	rq->current_entity = NULL;
79 	rq->sched = sched;
80 }
81 
82 /**
83  * drm_sched_rq_add_entity - add an entity
84  *
85  * @rq: scheduler run queue
86  * @entity: scheduler entity
87  *
88  * Adds a scheduler entity to the run queue.
89  */
drm_sched_rq_add_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)90 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
91 			     struct drm_sched_entity *entity)
92 {
93 	if (!list_empty(&entity->list))
94 		return;
95 	spin_lock(&rq->lock);
96 	atomic_inc(rq->sched->score);
97 	list_add_tail(&entity->list, &rq->entities);
98 	spin_unlock(&rq->lock);
99 }
100 
101 /**
102  * drm_sched_rq_remove_entity - remove an entity
103  *
104  * @rq: scheduler run queue
105  * @entity: scheduler entity
106  *
107  * Removes a scheduler entity from the run queue.
108  */
drm_sched_rq_remove_entity(struct drm_sched_rq * rq,struct drm_sched_entity * entity)109 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
110 				struct drm_sched_entity *entity)
111 {
112 	if (list_empty(&entity->list))
113 		return;
114 	spin_lock(&rq->lock);
115 	atomic_dec(rq->sched->score);
116 	list_del_init(&entity->list);
117 	if (rq->current_entity == entity)
118 		rq->current_entity = NULL;
119 	spin_unlock(&rq->lock);
120 }
121 
122 /**
123  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
124  *
125  * @rq: scheduler run queue to check.
126  *
127  * Try to find a ready entity, returns NULL if none found.
128  */
129 static struct drm_sched_entity *
drm_sched_rq_select_entity(struct drm_sched_rq * rq)130 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
131 {
132 	struct drm_sched_entity *entity;
133 
134 	spin_lock(&rq->lock);
135 
136 	entity = rq->current_entity;
137 	if (entity) {
138 		list_for_each_entry_continue(entity, &rq->entities, list) {
139 			if (drm_sched_entity_is_ready(entity)) {
140 				rq->current_entity = entity;
141 				reinit_completion(&entity->entity_idle);
142 				spin_unlock(&rq->lock);
143 				return entity;
144 			}
145 		}
146 	}
147 
148 	list_for_each_entry(entity, &rq->entities, list) {
149 
150 		if (drm_sched_entity_is_ready(entity)) {
151 			rq->current_entity = entity;
152 			reinit_completion(&entity->entity_idle);
153 			spin_unlock(&rq->lock);
154 			return entity;
155 		}
156 
157 		if (entity == rq->current_entity)
158 			break;
159 	}
160 
161 	spin_unlock(&rq->lock);
162 
163 	return NULL;
164 }
165 
166 /**
167  * drm_sched_job_done - complete a job
168  * @s_job: pointer to the job which is done
169  *
170  * Finish the job's fence and wake up the worker thread.
171  */
drm_sched_job_done(struct drm_sched_job * s_job)172 static void drm_sched_job_done(struct drm_sched_job *s_job)
173 {
174 	struct drm_sched_fence *s_fence = s_job->s_fence;
175 	struct drm_gpu_scheduler *sched = s_fence->sched;
176 
177 	atomic_dec(&sched->hw_rq_count);
178 	atomic_dec(sched->score);
179 
180 	trace_drm_sched_process_job(s_fence);
181 
182 	dma_fence_get(&s_fence->finished);
183 	drm_sched_fence_finished(s_fence);
184 	dma_fence_put(&s_fence->finished);
185 	wake_up_interruptible(&sched->wake_up_worker);
186 }
187 
188 /**
189  * drm_sched_job_done_cb - the callback for a done job
190  * @f: fence
191  * @cb: fence callbacks
192  */
drm_sched_job_done_cb(struct dma_fence * f,struct dma_fence_cb * cb)193 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
194 {
195 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
196 
197 	drm_sched_job_done(s_job);
198 }
199 
200 /**
201  * drm_sched_dependency_optimized - test if the dependency can be optimized
202  *
203  * @fence: the dependency fence
204  * @entity: the entity which depends on the above fence
205  *
206  * Returns true if the dependency can be optimized and false otherwise
207  */
drm_sched_dependency_optimized(struct dma_fence * fence,struct drm_sched_entity * entity)208 bool drm_sched_dependency_optimized(struct dma_fence* fence,
209 				    struct drm_sched_entity *entity)
210 {
211 	struct drm_gpu_scheduler *sched = entity->rq->sched;
212 	struct drm_sched_fence *s_fence;
213 
214 	if (!fence || dma_fence_is_signaled(fence))
215 		return false;
216 	if (fence->context == entity->fence_context)
217 		return true;
218 	s_fence = to_drm_sched_fence(fence);
219 	if (s_fence && s_fence->sched == sched)
220 		return true;
221 
222 	return false;
223 }
224 EXPORT_SYMBOL(drm_sched_dependency_optimized);
225 
226 /**
227  * drm_sched_start_timeout - start timeout for reset worker
228  *
229  * @sched: scheduler instance to start the worker for
230  *
231  * Start the timeout for the given scheduler.
232  */
drm_sched_start_timeout(struct drm_gpu_scheduler * sched)233 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
234 {
235 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
236 	    !list_empty(&sched->pending_list))
237 		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
238 }
239 
240 /**
241  * drm_sched_fault - immediately start timeout handler
242  *
243  * @sched: scheduler where the timeout handling should be started.
244  *
245  * Start timeout handling immediately when the driver detects a hardware fault.
246  */
drm_sched_fault(struct drm_gpu_scheduler * sched)247 void drm_sched_fault(struct drm_gpu_scheduler *sched)
248 {
249 	mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
250 }
251 EXPORT_SYMBOL(drm_sched_fault);
252 
253 /**
254  * drm_sched_suspend_timeout - Suspend scheduler job timeout
255  *
256  * @sched: scheduler instance for which to suspend the timeout
257  *
258  * Suspend the delayed work timeout for the scheduler. This is done by
259  * modifying the delayed work timeout to an arbitrary large value,
260  * MAX_SCHEDULE_TIMEOUT in this case.
261  *
262  * Returns the timeout remaining
263  *
264  */
drm_sched_suspend_timeout(struct drm_gpu_scheduler * sched)265 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
266 {
267 	unsigned long sched_timeout, now = jiffies;
268 
269 	sched_timeout = sched->work_tdr.timer.expires;
270 
271 	/*
272 	 * Modify the timeout to an arbitrarily large value. This also prevents
273 	 * the timeout to be restarted when new submissions arrive
274 	 */
275 	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
276 			&& time_after(sched_timeout, now))
277 		return sched_timeout - now;
278 	else
279 		return sched->timeout;
280 }
281 EXPORT_SYMBOL(drm_sched_suspend_timeout);
282 
283 /**
284  * drm_sched_resume_timeout - Resume scheduler job timeout
285  *
286  * @sched: scheduler instance for which to resume the timeout
287  * @remaining: remaining timeout
288  *
289  * Resume the delayed work timeout for the scheduler.
290  */
drm_sched_resume_timeout(struct drm_gpu_scheduler * sched,unsigned long remaining)291 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
292 		unsigned long remaining)
293 {
294 	spin_lock(&sched->job_list_lock);
295 
296 	if (list_empty(&sched->pending_list))
297 		cancel_delayed_work(&sched->work_tdr);
298 	else
299 		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
300 
301 	spin_unlock(&sched->job_list_lock);
302 }
303 EXPORT_SYMBOL(drm_sched_resume_timeout);
304 
drm_sched_job_begin(struct drm_sched_job * s_job)305 static void drm_sched_job_begin(struct drm_sched_job *s_job)
306 {
307 	struct drm_gpu_scheduler *sched = s_job->sched;
308 
309 	spin_lock(&sched->job_list_lock);
310 	list_add_tail(&s_job->list, &sched->pending_list);
311 	drm_sched_start_timeout(sched);
312 	spin_unlock(&sched->job_list_lock);
313 }
314 
drm_sched_job_timedout(struct work_struct * work)315 static void drm_sched_job_timedout(struct work_struct *work)
316 {
317 	struct drm_gpu_scheduler *sched;
318 	struct drm_sched_job *job;
319 	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
320 
321 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
322 
323 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
324 	spin_lock(&sched->job_list_lock);
325 	job = list_first_entry_or_null(&sched->pending_list,
326 				       struct drm_sched_job, list);
327 
328 	if (job) {
329 		/*
330 		 * Remove the bad job so it cannot be freed by concurrent
331 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
332 		 * is parked at which point it's safe.
333 		 */
334 		list_del_init(&job->list);
335 		spin_unlock(&sched->job_list_lock);
336 
337 		status = job->sched->ops->timedout_job(job);
338 
339 		/*
340 		 * Guilty job did complete and hence needs to be manually removed
341 		 * See drm_sched_stop doc.
342 		 */
343 		if (sched->free_guilty) {
344 			job->sched->ops->free_job(job);
345 			sched->free_guilty = false;
346 		}
347 	} else {
348 		spin_unlock(&sched->job_list_lock);
349 	}
350 
351 	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
352 		spin_lock(&sched->job_list_lock);
353 		drm_sched_start_timeout(sched);
354 		spin_unlock(&sched->job_list_lock);
355 	}
356 }
357 
358  /**
359   * drm_sched_increase_karma - Update sched_entity guilty flag
360   *
361   * @bad: The job guilty of time out
362   *
363   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
364   * limit of the scheduler then the respective sched entity is marked guilty and
365   * jobs from it will not be scheduled further
366   */
drm_sched_increase_karma(struct drm_sched_job * bad)367 void drm_sched_increase_karma(struct drm_sched_job *bad)
368 {
369 	drm_sched_increase_karma_ext(bad, 1);
370 }
371 EXPORT_SYMBOL(drm_sched_increase_karma);
372 
drm_sched_reset_karma(struct drm_sched_job * bad)373 void drm_sched_reset_karma(struct drm_sched_job *bad)
374 {
375 	drm_sched_increase_karma_ext(bad, 0);
376 }
377 EXPORT_SYMBOL(drm_sched_reset_karma);
378 
379 /**
380  * drm_sched_stop - stop the scheduler
381  *
382  * @sched: scheduler instance
383  * @bad: job which caused the time out
384  *
385  * Stop the scheduler and also removes and frees all completed jobs.
386  * Note: bad job will not be freed as it might be used later and so it's
387  * callers responsibility to release it manually if it's not part of the
388  * pending list any more.
389  *
390  */
drm_sched_stop(struct drm_gpu_scheduler * sched,struct drm_sched_job * bad)391 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
392 {
393 	struct drm_sched_job *s_job, *tmp;
394 
395 	kthread_park(sched->thread);
396 
397 	/*
398 	 * Reinsert back the bad job here - now it's safe as
399 	 * drm_sched_get_cleanup_job cannot race against us and release the
400 	 * bad job at this point - we parked (waited for) any in progress
401 	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
402 	 * now until the scheduler thread is unparked.
403 	 */
404 	if (bad && bad->sched == sched)
405 		/*
406 		 * Add at the head of the queue to reflect it was the earliest
407 		 * job extracted.
408 		 */
409 		list_add(&bad->list, &sched->pending_list);
410 
411 	/*
412 	 * Iterate the job list from later to  earlier one and either deactive
413 	 * their HW callbacks or remove them from pending list if they already
414 	 * signaled.
415 	 * This iteration is thread safe as sched thread is stopped.
416 	 */
417 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
418 					 list) {
419 		if (s_job->s_fence->parent &&
420 		    dma_fence_remove_callback(s_job->s_fence->parent,
421 					      &s_job->cb)) {
422 			dma_fence_put(s_job->s_fence->parent);
423 			s_job->s_fence->parent = NULL;
424 			atomic_dec(&sched->hw_rq_count);
425 		} else {
426 			/*
427 			 * remove job from pending_list.
428 			 * Locking here is for concurrent resume timeout
429 			 */
430 			spin_lock(&sched->job_list_lock);
431 			list_del_init(&s_job->list);
432 			spin_unlock(&sched->job_list_lock);
433 
434 			/*
435 			 * Wait for job's HW fence callback to finish using s_job
436 			 * before releasing it.
437 			 *
438 			 * Job is still alive so fence refcount at least 1
439 			 */
440 			dma_fence_wait(&s_job->s_fence->finished, false);
441 
442 			/*
443 			 * We must keep bad job alive for later use during
444 			 * recovery by some of the drivers but leave a hint
445 			 * that the guilty job must be released.
446 			 */
447 			if (bad != s_job)
448 				sched->ops->free_job(s_job);
449 			else
450 				sched->free_guilty = true;
451 		}
452 	}
453 
454 	/*
455 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
456 	 * avoids the pending timeout work in progress to fire right away after
457 	 * this TDR finished and before the newly restarted jobs had a
458 	 * chance to complete.
459 	 */
460 	cancel_delayed_work(&sched->work_tdr);
461 }
462 
463 EXPORT_SYMBOL(drm_sched_stop);
464 
465 /**
466  * drm_sched_start - recover jobs after a reset
467  *
468  * @sched: scheduler instance
469  * @full_recovery: proceed with complete sched restart
470  *
471  */
drm_sched_start(struct drm_gpu_scheduler * sched,bool full_recovery)472 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
473 {
474 	struct drm_sched_job *s_job, *tmp;
475 	int r;
476 
477 	/*
478 	 * Locking the list is not required here as the sched thread is parked
479 	 * so no new jobs are being inserted or removed. Also concurrent
480 	 * GPU recovers can't run in parallel.
481 	 */
482 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
483 		struct dma_fence *fence = s_job->s_fence->parent;
484 
485 		atomic_inc(&sched->hw_rq_count);
486 
487 		if (!full_recovery)
488 			continue;
489 
490 		if (fence) {
491 			r = dma_fence_add_callback(fence, &s_job->cb,
492 						   drm_sched_job_done_cb);
493 			if (r == -ENOENT)
494 				drm_sched_job_done(s_job);
495 			else if (r)
496 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
497 					  r);
498 		} else
499 			drm_sched_job_done(s_job);
500 	}
501 
502 	if (full_recovery) {
503 		spin_lock(&sched->job_list_lock);
504 		drm_sched_start_timeout(sched);
505 		spin_unlock(&sched->job_list_lock);
506 	}
507 
508 	kthread_unpark(sched->thread);
509 }
510 EXPORT_SYMBOL(drm_sched_start);
511 
512 /**
513  * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
514  *
515  * @sched: scheduler instance
516  *
517  */
drm_sched_resubmit_jobs(struct drm_gpu_scheduler * sched)518 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
519 {
520 	drm_sched_resubmit_jobs_ext(sched, INT_MAX);
521 }
522 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
523 
524 /**
525  * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
526  *
527  * @sched: scheduler instance
528  * @max: job numbers to relaunch
529  *
530  */
drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler * sched,int max)531 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
532 {
533 	struct drm_sched_job *s_job, *tmp;
534 	uint64_t guilty_context;
535 	bool found_guilty = false;
536 	struct dma_fence *fence;
537 	int i = 0;
538 
539 	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
540 		struct drm_sched_fence *s_fence = s_job->s_fence;
541 
542 		if (i >= max)
543 			break;
544 
545 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
546 			found_guilty = true;
547 			guilty_context = s_job->s_fence->scheduled.context;
548 		}
549 
550 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
551 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
552 
553 		fence = sched->ops->run_job(s_job);
554 		i++;
555 
556 		if (IS_ERR_OR_NULL(fence)) {
557 			if (IS_ERR(fence))
558 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
559 
560 			s_job->s_fence->parent = NULL;
561 		} else {
562 
563 			s_job->s_fence->parent = dma_fence_get(fence);
564 
565 			/* Drop for orignal kref_init */
566 			dma_fence_put(fence);
567 		}
568 	}
569 }
570 EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
571 
572 /**
573  * drm_sched_job_init - init a scheduler job
574  * @job: scheduler job to init
575  * @entity: scheduler entity to use
576  * @owner: job owner for debugging
577  *
578  * Refer to drm_sched_entity_push_job() documentation
579  * for locking considerations.
580  *
581  * Drivers must make sure drm_sched_job_cleanup() if this function returns
582  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
583  *
584  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
585  * has died, which can mean that there's no valid runqueue for a @entity.
586  * This function returns -ENOENT in this case (which probably should be -EIO as
587  * a more meanigful return value).
588  *
589  * Returns 0 for success, negative error code otherwise.
590  */
drm_sched_job_init(struct drm_sched_job * job,struct drm_sched_entity * entity,void * owner)591 int drm_sched_job_init(struct drm_sched_job *job,
592 		       struct drm_sched_entity *entity,
593 		       void *owner)
594 {
595 	if (!entity->rq)
596 		return -ENOENT;
597 
598 	job->entity = entity;
599 	job->s_fence = drm_sched_fence_alloc(entity, owner);
600 	if (!job->s_fence)
601 		return -ENOMEM;
602 
603 	INIT_LIST_HEAD(&job->list);
604 
605 	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
606 
607 	return 0;
608 }
609 EXPORT_SYMBOL(drm_sched_job_init);
610 
611 /**
612  * drm_sched_job_arm - arm a scheduler job for execution
613  * @job: scheduler job to arm
614  *
615  * This arms a scheduler job for execution. Specifically it initializes the
616  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
617  * or other places that need to track the completion of this job.
618  *
619  * Refer to drm_sched_entity_push_job() documentation for locking
620  * considerations.
621  *
622  * This can only be called if drm_sched_job_init() succeeded.
623  */
drm_sched_job_arm(struct drm_sched_job * job)624 void drm_sched_job_arm(struct drm_sched_job *job)
625 {
626 	struct drm_gpu_scheduler *sched;
627 	struct drm_sched_entity *entity = job->entity;
628 
629 	BUG_ON(!entity);
630 	drm_sched_entity_select_rq(entity);
631 	sched = entity->rq->sched;
632 
633 	job->sched = sched;
634 	job->s_priority = entity->rq - sched->sched_rq;
635 	job->id = atomic64_inc_return(&sched->job_id_count);
636 
637 	drm_sched_fence_init(job->s_fence, job->entity);
638 }
639 EXPORT_SYMBOL(drm_sched_job_arm);
640 
641 /**
642  * drm_sched_job_add_dependency - adds the fence as a job dependency
643  * @job: scheduler job to add the dependencies to
644  * @fence: the dma_fence to add to the list of dependencies.
645  *
646  * Note that @fence is consumed in both the success and error cases.
647  *
648  * Returns:
649  * 0 on success, or an error on failing to expand the array.
650  */
drm_sched_job_add_dependency(struct drm_sched_job * job,struct dma_fence * fence)651 int drm_sched_job_add_dependency(struct drm_sched_job *job,
652 				 struct dma_fence *fence)
653 {
654 	struct dma_fence *entry;
655 	unsigned long index;
656 	u32 id = 0;
657 	int ret;
658 
659 	if (!fence)
660 		return 0;
661 
662 	/* Deduplicate if we already depend on a fence from the same context.
663 	 * This lets the size of the array of deps scale with the number of
664 	 * engines involved, rather than the number of BOs.
665 	 */
666 	xa_for_each(&job->dependencies, index, entry) {
667 		if (entry->context != fence->context)
668 			continue;
669 
670 		if (dma_fence_is_later(fence, entry)) {
671 			dma_fence_put(entry);
672 			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
673 		} else {
674 			dma_fence_put(fence);
675 		}
676 		return 0;
677 	}
678 
679 	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
680 	if (ret != 0)
681 		dma_fence_put(fence);
682 
683 	return ret;
684 }
685 EXPORT_SYMBOL(drm_sched_job_add_dependency);
686 
687 /**
688  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
689  *   dependencies
690  * @job: scheduler job to add the dependencies to
691  * @obj: the gem object to add new dependencies from.
692  * @write: whether the job might write the object (so we need to depend on
693  * shared fences in the reservation object).
694  *
695  * This should be called after drm_gem_lock_reservations() on your array of
696  * GEM objects used in the job but before updating the reservations with your
697  * own fences.
698  *
699  * Returns:
700  * 0 on success, or an error on failing to expand the array.
701  */
drm_sched_job_add_implicit_dependencies(struct drm_sched_job * job,struct drm_gem_object * obj,bool write)702 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
703 					    struct drm_gem_object *obj,
704 					    bool write)
705 {
706 	struct dma_resv_iter cursor;
707 	struct dma_fence *fence;
708 	int ret;
709 
710 	dma_resv_assert_held(obj->resv);
711 
712 	dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write),
713 				fence) {
714 		/* Make sure to grab an additional ref on the added fence */
715 		dma_fence_get(fence);
716 		ret = drm_sched_job_add_dependency(job, fence);
717 		if (ret) {
718 			dma_fence_put(fence);
719 			return ret;
720 		}
721 	}
722 	return 0;
723 }
724 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
725 
726 
727 /**
728  * drm_sched_job_cleanup - clean up scheduler job resources
729  * @job: scheduler job to clean up
730  *
731  * Cleans up the resources allocated with drm_sched_job_init().
732  *
733  * Drivers should call this from their error unwind code if @job is aborted
734  * before drm_sched_job_arm() is called.
735  *
736  * After that point of no return @job is committed to be executed by the
737  * scheduler, and this function should be called from the
738  * &drm_sched_backend_ops.free_job callback.
739  */
drm_sched_job_cleanup(struct drm_sched_job * job)740 void drm_sched_job_cleanup(struct drm_sched_job *job)
741 {
742 	struct dma_fence *fence;
743 	unsigned long index;
744 
745 	if (kref_read(&job->s_fence->finished.refcount)) {
746 		/* drm_sched_job_arm() has been called */
747 		dma_fence_put(&job->s_fence->finished);
748 	} else {
749 		/* aborted job before committing to run it */
750 		drm_sched_fence_free(job->s_fence);
751 	}
752 
753 	job->s_fence = NULL;
754 
755 	xa_for_each(&job->dependencies, index, fence) {
756 		dma_fence_put(fence);
757 	}
758 	xa_destroy(&job->dependencies);
759 
760 }
761 EXPORT_SYMBOL(drm_sched_job_cleanup);
762 
763 /**
764  * drm_sched_ready - is the scheduler ready
765  *
766  * @sched: scheduler instance
767  *
768  * Return true if we can push more jobs to the hw, otherwise false.
769  */
drm_sched_ready(struct drm_gpu_scheduler * sched)770 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
771 {
772 	return atomic_read(&sched->hw_rq_count) <
773 		sched->hw_submission_limit;
774 }
775 
776 /**
777  * drm_sched_wakeup - Wake up the scheduler when it is ready
778  *
779  * @sched: scheduler instance
780  *
781  */
drm_sched_wakeup(struct drm_gpu_scheduler * sched)782 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
783 {
784 	if (drm_sched_ready(sched))
785 		wake_up_interruptible(&sched->wake_up_worker);
786 }
787 
788 /**
789  * drm_sched_select_entity - Select next entity to process
790  *
791  * @sched: scheduler instance
792  *
793  * Returns the entity to process or NULL if none are found.
794  */
795 static struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler * sched)796 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
797 {
798 	struct drm_sched_entity *entity;
799 	int i;
800 
801 	if (!drm_sched_ready(sched))
802 		return NULL;
803 
804 	/* Kernel run queue has higher priority than normal run queue*/
805 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
806 		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
807 		if (entity)
808 			break;
809 	}
810 
811 	return entity;
812 }
813 
814 /**
815  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
816  *
817  * @sched: scheduler instance
818  *
819  * Returns the next finished job from the pending list (if there is one)
820  * ready for it to be destroyed.
821  */
822 static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler * sched)823 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
824 {
825 	struct drm_sched_job *job, *next;
826 
827 	spin_lock(&sched->job_list_lock);
828 
829 	job = list_first_entry_or_null(&sched->pending_list,
830 				       struct drm_sched_job, list);
831 
832 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
833 		/* remove job from pending_list */
834 		list_del_init(&job->list);
835 
836 		/* cancel this job's TO timer */
837 		cancel_delayed_work(&sched->work_tdr);
838 		/* make the scheduled timestamp more accurate */
839 		next = list_first_entry_or_null(&sched->pending_list,
840 						typeof(*next), list);
841 
842 		if (next) {
843 			next->s_fence->scheduled.timestamp =
844 				job->s_fence->finished.timestamp;
845 			/* start TO timer for next job */
846 			drm_sched_start_timeout(sched);
847 		}
848 	} else {
849 		job = NULL;
850 	}
851 
852 	spin_unlock(&sched->job_list_lock);
853 
854 	return job;
855 }
856 
857 /**
858  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
859  * @sched_list: list of drm_gpu_schedulers
860  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
861  *
862  * Returns pointer of the sched with the least load or NULL if none of the
863  * drm_gpu_schedulers are ready
864  */
865 struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler ** sched_list,unsigned int num_sched_list)866 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
867 		     unsigned int num_sched_list)
868 {
869 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
870 	int i;
871 	unsigned int min_score = UINT_MAX, num_score;
872 
873 	for (i = 0; i < num_sched_list; ++i) {
874 		sched = sched_list[i];
875 
876 		if (!sched->ready) {
877 			DRM_WARN("scheduler %s is not ready, skipping",
878 				 sched->name);
879 			continue;
880 		}
881 
882 		num_score = atomic_read(sched->score);
883 		if (num_score < min_score) {
884 			min_score = num_score;
885 			picked_sched = sched;
886 		}
887 	}
888 
889 	return picked_sched;
890 }
891 EXPORT_SYMBOL(drm_sched_pick_best);
892 
893 /**
894  * drm_sched_blocked - check if the scheduler is blocked
895  *
896  * @sched: scheduler instance
897  *
898  * Returns true if blocked, otherwise false.
899  */
drm_sched_blocked(struct drm_gpu_scheduler * sched)900 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
901 {
902 	if (kthread_should_park()) {
903 		kthread_parkme();
904 		return true;
905 	}
906 
907 	return false;
908 }
909 
910 /**
911  * drm_sched_main - main scheduler thread
912  *
913  * @param: scheduler instance
914  *
915  * Returns 0.
916  */
drm_sched_main(void * param)917 static int drm_sched_main(void *param)
918 {
919 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
920 	int r;
921 
922 	sched_set_fifo_low(current);
923 
924 	while (!kthread_should_stop()) {
925 		struct drm_sched_entity *entity = NULL;
926 		struct drm_sched_fence *s_fence;
927 		struct drm_sched_job *sched_job;
928 		struct dma_fence *fence;
929 		struct drm_sched_job *cleanup_job = NULL;
930 
931 		wait_event_interruptible(sched->wake_up_worker,
932 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
933 					 (!drm_sched_blocked(sched) &&
934 					  (entity = drm_sched_select_entity(sched))) ||
935 					 kthread_should_stop());
936 
937 		if (cleanup_job)
938 			sched->ops->free_job(cleanup_job);
939 
940 		if (!entity)
941 			continue;
942 
943 		sched_job = drm_sched_entity_pop_job(entity);
944 
945 		if (!sched_job) {
946 			complete(&entity->entity_idle);
947 			continue;
948 		}
949 
950 		s_fence = sched_job->s_fence;
951 
952 		atomic_inc(&sched->hw_rq_count);
953 		drm_sched_job_begin(sched_job);
954 
955 		trace_drm_run_job(sched_job, entity);
956 		fence = sched->ops->run_job(sched_job);
957 		complete(&entity->entity_idle);
958 		drm_sched_fence_scheduled(s_fence);
959 
960 		if (!IS_ERR_OR_NULL(fence)) {
961 			s_fence->parent = dma_fence_get(fence);
962 			/* Drop for original kref_init of the fence */
963 			dma_fence_put(fence);
964 
965 			r = dma_fence_add_callback(fence, &sched_job->cb,
966 						   drm_sched_job_done_cb);
967 			if (r == -ENOENT)
968 				drm_sched_job_done(sched_job);
969 			else if (r)
970 				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
971 					  r);
972 		} else {
973 			if (IS_ERR(fence))
974 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
975 
976 			drm_sched_job_done(sched_job);
977 		}
978 
979 		wake_up(&sched->job_scheduled);
980 	}
981 	return 0;
982 }
983 
984 /**
985  * drm_sched_init - Init a gpu scheduler instance
986  *
987  * @sched: scheduler instance
988  * @ops: backend operations for this scheduler
989  * @hw_submission: number of hw submissions that can be in flight
990  * @hang_limit: number of times to allow a job to hang before dropping it
991  * @timeout: timeout value in jiffies for the scheduler
992  * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
993  *		used
994  * @score: optional score atomic shared with other schedulers
995  * @name: name used for debugging
996  * @dev: target &struct device
997  *
998  * Return 0 on success, otherwise error code.
999  */
drm_sched_init(struct drm_gpu_scheduler * sched,const struct drm_sched_backend_ops * ops,unsigned hw_submission,unsigned hang_limit,long timeout,struct workqueue_struct * timeout_wq,atomic_t * score,const char * name,struct device * dev)1000 int drm_sched_init(struct drm_gpu_scheduler *sched,
1001 		   const struct drm_sched_backend_ops *ops,
1002 		   unsigned hw_submission, unsigned hang_limit,
1003 		   long timeout, struct workqueue_struct *timeout_wq,
1004 		   atomic_t *score, const char *name, struct device *dev)
1005 {
1006 	int i, ret;
1007 	sched->ops = ops;
1008 	sched->hw_submission_limit = hw_submission;
1009 	sched->name = name;
1010 	sched->timeout = timeout;
1011 	sched->timeout_wq = timeout_wq ? : system_wq;
1012 	sched->hang_limit = hang_limit;
1013 	sched->score = score ? score : &sched->_score;
1014 	sched->dev = dev;
1015 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1016 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
1017 
1018 	init_waitqueue_head(&sched->wake_up_worker);
1019 	init_waitqueue_head(&sched->job_scheduled);
1020 	INIT_LIST_HEAD(&sched->pending_list);
1021 	spin_lock_init(&sched->job_list_lock);
1022 	atomic_set(&sched->hw_rq_count, 0);
1023 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1024 	atomic_set(&sched->_score, 0);
1025 	atomic64_set(&sched->job_id_count, 0);
1026 
1027 	/* Each scheduler will run on a seperate kernel thread */
1028 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1029 	if (IS_ERR(sched->thread)) {
1030 		ret = PTR_ERR(sched->thread);
1031 		sched->thread = NULL;
1032 		DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1033 		return ret;
1034 	}
1035 
1036 	sched->ready = true;
1037 	return 0;
1038 }
1039 EXPORT_SYMBOL(drm_sched_init);
1040 
1041 /**
1042  * drm_sched_fini - Destroy a gpu scheduler
1043  *
1044  * @sched: scheduler instance
1045  *
1046  * Tears down and cleans up the scheduler.
1047  */
drm_sched_fini(struct drm_gpu_scheduler * sched)1048 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1049 {
1050 	struct drm_sched_entity *s_entity;
1051 	int i;
1052 
1053 	if (sched->thread)
1054 		kthread_stop(sched->thread);
1055 
1056 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1057 		struct drm_sched_rq *rq = &sched->sched_rq[i];
1058 
1059 		if (!rq)
1060 			continue;
1061 
1062 		spin_lock(&rq->lock);
1063 		list_for_each_entry(s_entity, &rq->entities, list)
1064 			/*
1065 			 * Prevents reinsertion and marks job_queue as idle,
1066 			 * it will removed from rq in drm_sched_entity_fini
1067 			 * eventually
1068 			 */
1069 			s_entity->stopped = true;
1070 		spin_unlock(&rq->lock);
1071 
1072 	}
1073 
1074 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1075 	wake_up_all(&sched->job_scheduled);
1076 
1077 	/* Confirm no work left behind accessing device structures */
1078 	cancel_delayed_work_sync(&sched->work_tdr);
1079 
1080 	sched->ready = false;
1081 }
1082 EXPORT_SYMBOL(drm_sched_fini);
1083 
1084 /**
1085  * drm_sched_increase_karma_ext - Update sched_entity guilty flag
1086  *
1087  * @bad: The job guilty of time out
1088  * @type: type for increase/reset karma
1089  *
1090  */
drm_sched_increase_karma_ext(struct drm_sched_job * bad,int type)1091 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
1092 {
1093 	int i;
1094 	struct drm_sched_entity *tmp;
1095 	struct drm_sched_entity *entity;
1096 	struct drm_gpu_scheduler *sched = bad->sched;
1097 
1098 	/* don't change @bad's karma if it's from KERNEL RQ,
1099 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1100 	 * corrupt but keep in mind that kernel jobs always considered good.
1101 	 */
1102 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1103 		if (type == 0)
1104 			atomic_set(&bad->karma, 0);
1105 		else if (type == 1)
1106 			atomic_inc(&bad->karma);
1107 
1108 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1109 		     i++) {
1110 			struct drm_sched_rq *rq = &sched->sched_rq[i];
1111 
1112 			spin_lock(&rq->lock);
1113 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1114 				if (bad->s_fence->scheduled.context ==
1115 				    entity->fence_context) {
1116 					if (entity->guilty)
1117 						atomic_set(entity->guilty, type);
1118 					break;
1119 				}
1120 			}
1121 			spin_unlock(&rq->lock);
1122 			if (&entity->list != &rq->entities)
1123 				break;
1124 		}
1125 	}
1126 }
1127 EXPORT_SYMBOL(drm_sched_increase_karma_ext);
1128