xref: /openbsd-src/sys/dev/pci/drm/scheduler/sched_main.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46 
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #ifdef __linux__
52 #include <uapi/linux/sched/types.h>
53 #endif
54 
55 #include <drm/drm_print.h>
56 #include <drm/gpu_scheduler.h>
57 #include <drm/spsc_queue.h>
58 
59 #define CREATE_TRACE_POINTS
60 #include "gpu_scheduler_trace.h"
61 
62 #define to_drm_sched_job(sched_job)		\
63 		container_of((sched_job), struct drm_sched_job, queue_node)
64 
65 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
66 
67 /**
68  * drm_sched_rq_init - initialize a given run queue struct
69  *
70  * @rq: scheduler run queue
71  *
72  * Initializes a scheduler runqueue.
73  */
74 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
75 			      struct drm_sched_rq *rq)
76 {
77 	mtx_init(&rq->lock, IPL_NONE);
78 	INIT_LIST_HEAD(&rq->entities);
79 	rq->current_entity = NULL;
80 	rq->sched = sched;
81 }
82 
83 /**
84  * drm_sched_rq_add_entity - add an entity
85  *
86  * @rq: scheduler run queue
87  * @entity: scheduler entity
88  *
89  * Adds a scheduler entity to the run queue.
90  */
91 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
92 			     struct drm_sched_entity *entity)
93 {
94 	if (!list_empty(&entity->list))
95 		return;
96 	spin_lock(&rq->lock);
97 	atomic_inc(&rq->sched->score);
98 	list_add_tail(&entity->list, &rq->entities);
99 	spin_unlock(&rq->lock);
100 }
101 
102 /**
103  * drm_sched_rq_remove_entity - remove an entity
104  *
105  * @rq: scheduler run queue
106  * @entity: scheduler entity
107  *
108  * Removes a scheduler entity from the run queue.
109  */
110 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
111 				struct drm_sched_entity *entity)
112 {
113 	if (list_empty(&entity->list))
114 		return;
115 	spin_lock(&rq->lock);
116 	atomic_dec(&rq->sched->score);
117 	list_del_init(&entity->list);
118 	if (rq->current_entity == entity)
119 		rq->current_entity = NULL;
120 	spin_unlock(&rq->lock);
121 }
122 
123 /**
124  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
125  *
126  * @rq: scheduler run queue to check.
127  *
128  * Try to find a ready entity, returns NULL if none found.
129  */
130 static struct drm_sched_entity *
131 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
132 {
133 	struct drm_sched_entity *entity;
134 
135 	spin_lock(&rq->lock);
136 
137 	entity = rq->current_entity;
138 	if (entity) {
139 		list_for_each_entry_continue(entity, &rq->entities, list) {
140 			if (drm_sched_entity_is_ready(entity)) {
141 				rq->current_entity = entity;
142 				reinit_completion(&entity->entity_idle);
143 				spin_unlock(&rq->lock);
144 				return entity;
145 			}
146 		}
147 	}
148 
149 	list_for_each_entry(entity, &rq->entities, list) {
150 
151 		if (drm_sched_entity_is_ready(entity)) {
152 			rq->current_entity = entity;
153 			reinit_completion(&entity->entity_idle);
154 			spin_unlock(&rq->lock);
155 			return entity;
156 		}
157 
158 		if (entity == rq->current_entity)
159 			break;
160 	}
161 
162 	spin_unlock(&rq->lock);
163 
164 	return NULL;
165 }
166 
167 /**
168  * drm_sched_dependency_optimized
169  *
170  * @fence: the dependency fence
171  * @entity: the entity which depends on the above fence
172  *
173  * Returns true if the dependency can be optimized and false otherwise
174  */
175 bool drm_sched_dependency_optimized(struct dma_fence* fence,
176 				    struct drm_sched_entity *entity)
177 {
178 	struct drm_gpu_scheduler *sched = entity->rq->sched;
179 	struct drm_sched_fence *s_fence;
180 
181 	if (!fence || dma_fence_is_signaled(fence))
182 		return false;
183 	if (fence->context == entity->fence_context)
184 		return true;
185 	s_fence = to_drm_sched_fence(fence);
186 	if (s_fence && s_fence->sched == sched)
187 		return true;
188 
189 	return false;
190 }
191 EXPORT_SYMBOL(drm_sched_dependency_optimized);
192 
193 /**
194  * drm_sched_start_timeout - start timeout for reset worker
195  *
196  * @sched: scheduler instance to start the worker for
197  *
198  * Start the timeout for the given scheduler.
199  */
200 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
201 {
202 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
203 	    !list_empty(&sched->ring_mirror_list))
204 		schedule_delayed_work(&sched->work_tdr, sched->timeout);
205 }
206 
207 /**
208  * drm_sched_fault - immediately start timeout handler
209  *
210  * @sched: scheduler where the timeout handling should be started.
211  *
212  * Start timeout handling immediately when the driver detects a hardware fault.
213  */
214 void drm_sched_fault(struct drm_gpu_scheduler *sched)
215 {
216 	mod_delayed_work(system_wq, &sched->work_tdr, 0);
217 }
218 EXPORT_SYMBOL(drm_sched_fault);
219 
220 /**
221  * drm_sched_suspend_timeout - Suspend scheduler job timeout
222  *
223  * @sched: scheduler instance for which to suspend the timeout
224  *
225  * Suspend the delayed work timeout for the scheduler. This is done by
226  * modifying the delayed work timeout to an arbitrary large value,
227  * MAX_SCHEDULE_TIMEOUT in this case.
228  *
229  * Returns the timeout remaining
230  *
231  */
232 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
233 {
234 	unsigned long sched_timeout, now = jiffies;
235 
236 #ifdef __linux__
237 	sched_timeout = sched->work_tdr.timer.expires;
238 #else
239 	sched_timeout = sched->work_tdr.to.to_time;
240 #endif
241 
242 	/*
243 	 * Modify the timeout to an arbitrarily large value. This also prevents
244 	 * the timeout to be restarted when new submissions arrive
245 	 */
246 	if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
247 			&& time_after(sched_timeout, now))
248 		return sched_timeout - now;
249 	else
250 		return sched->timeout;
251 }
252 EXPORT_SYMBOL(drm_sched_suspend_timeout);
253 
254 /**
255  * drm_sched_resume_timeout - Resume scheduler job timeout
256  *
257  * @sched: scheduler instance for which to resume the timeout
258  * @remaining: remaining timeout
259  *
260  * Resume the delayed work timeout for the scheduler.
261  */
262 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
263 		unsigned long remaining)
264 {
265 	spin_lock(&sched->job_list_lock);
266 
267 	if (list_empty(&sched->ring_mirror_list))
268 		cancel_delayed_work(&sched->work_tdr);
269 	else
270 		mod_delayed_work(system_wq, &sched->work_tdr, remaining);
271 
272 	spin_unlock(&sched->job_list_lock);
273 }
274 EXPORT_SYMBOL(drm_sched_resume_timeout);
275 
276 static void drm_sched_job_begin(struct drm_sched_job *s_job)
277 {
278 	struct drm_gpu_scheduler *sched = s_job->sched;
279 
280 	spin_lock(&sched->job_list_lock);
281 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
282 	drm_sched_start_timeout(sched);
283 	spin_unlock(&sched->job_list_lock);
284 }
285 
286 static void drm_sched_job_timedout(struct work_struct *work)
287 {
288 	struct drm_gpu_scheduler *sched;
289 	struct drm_sched_job *job;
290 
291 	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
292 
293 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
294 	spin_lock(&sched->job_list_lock);
295 	job = list_first_entry_or_null(&sched->ring_mirror_list,
296 				       struct drm_sched_job, node);
297 
298 	if (job) {
299 		/*
300 		 * Remove the bad job so it cannot be freed by concurrent
301 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
302 		 * is parked at which point it's safe.
303 		 */
304 		list_del_init(&job->node);
305 		spin_unlock(&sched->job_list_lock);
306 
307 		job->sched->ops->timedout_job(job);
308 
309 		/*
310 		 * Guilty job did complete and hence needs to be manually removed
311 		 * See drm_sched_stop doc.
312 		 */
313 		if (sched->free_guilty) {
314 			job->sched->ops->free_job(job);
315 			sched->free_guilty = false;
316 		}
317 	} else {
318 		spin_unlock(&sched->job_list_lock);
319 	}
320 
321 	spin_lock(&sched->job_list_lock);
322 	drm_sched_start_timeout(sched);
323 	spin_unlock(&sched->job_list_lock);
324 }
325 
326  /**
327   * drm_sched_increase_karma - Update sched_entity guilty flag
328   *
329   * @bad: The job guilty of time out
330   *
331   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
332   * limit of the scheduler then the respective sched entity is marked guilty and
333   * jobs from it will not be scheduled further
334   */
335 void drm_sched_increase_karma(struct drm_sched_job *bad)
336 {
337 	int i;
338 	struct drm_sched_entity *tmp;
339 	struct drm_sched_entity *entity;
340 	struct drm_gpu_scheduler *sched = bad->sched;
341 
342 	/* don't increase @bad's karma if it's from KERNEL RQ,
343 	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
344 	 * corrupt but keep in mind that kernel jobs always considered good.
345 	 */
346 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
347 		atomic_inc(&bad->karma);
348 		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
349 		     i++) {
350 			struct drm_sched_rq *rq = &sched->sched_rq[i];
351 
352 			spin_lock(&rq->lock);
353 			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
354 				if (bad->s_fence->scheduled.context ==
355 				    entity->fence_context) {
356 					if (atomic_read(&bad->karma) >
357 					    bad->sched->hang_limit)
358 						if (entity->guilty)
359 							atomic_set(entity->guilty, 1);
360 					break;
361 				}
362 			}
363 			spin_unlock(&rq->lock);
364 			if (&entity->list != &rq->entities)
365 				break;
366 		}
367 	}
368 }
369 EXPORT_SYMBOL(drm_sched_increase_karma);
370 
371 /**
372  * drm_sched_stop - stop the scheduler
373  *
374  * @sched: scheduler instance
375  * @bad: job which caused the time out
376  *
377  * Stop the scheduler and also removes and frees all completed jobs.
378  * Note: bad job will not be freed as it might be used later and so it's
379  * callers responsibility to release it manually if it's not part of the
380  * mirror list any more.
381  *
382  */
383 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
384 {
385 	struct drm_sched_job *s_job, *tmp;
386 
387 	kthread_park(sched->thread);
388 
389 	/*
390 	 * Reinsert back the bad job here - now it's safe as
391 	 * drm_sched_get_cleanup_job cannot race against us and release the
392 	 * bad job at this point - we parked (waited for) any in progress
393 	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
394 	 * now until the scheduler thread is unparked.
395 	 */
396 	if (bad && bad->sched == sched)
397 		/*
398 		 * Add at the head of the queue to reflect it was the earliest
399 		 * job extracted.
400 		 */
401 		list_add(&bad->node, &sched->ring_mirror_list);
402 
403 	/*
404 	 * Iterate the job list from later to  earlier one and either deactive
405 	 * their HW callbacks or remove them from mirror list if they already
406 	 * signaled.
407 	 * This iteration is thread safe as sched thread is stopped.
408 	 */
409 	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
410 		if (s_job->s_fence->parent &&
411 		    dma_fence_remove_callback(s_job->s_fence->parent,
412 					      &s_job->cb)) {
413 			atomic_dec(&sched->hw_rq_count);
414 		} else {
415 			/*
416 			 * remove job from ring_mirror_list.
417 			 * Locking here is for concurrent resume timeout
418 			 */
419 			spin_lock(&sched->job_list_lock);
420 			list_del_init(&s_job->node);
421 			spin_unlock(&sched->job_list_lock);
422 
423 			/*
424 			 * Wait for job's HW fence callback to finish using s_job
425 			 * before releasing it.
426 			 *
427 			 * Job is still alive so fence refcount at least 1
428 			 */
429 			dma_fence_wait(&s_job->s_fence->finished, false);
430 
431 			/*
432 			 * We must keep bad job alive for later use during
433 			 * recovery by some of the drivers but leave a hint
434 			 * that the guilty job must be released.
435 			 */
436 			if (bad != s_job)
437 				sched->ops->free_job(s_job);
438 			else
439 				sched->free_guilty = true;
440 		}
441 	}
442 
443 	/*
444 	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
445 	 * avoids the pending timeout work in progress to fire right away after
446 	 * this TDR finished and before the newly restarted jobs had a
447 	 * chance to complete.
448 	 */
449 	cancel_delayed_work(&sched->work_tdr);
450 }
451 
452 EXPORT_SYMBOL(drm_sched_stop);
453 
454 /**
455  * drm_sched_job_recovery - recover jobs after a reset
456  *
457  * @sched: scheduler instance
458  * @full_recovery: proceed with complete sched restart
459  *
460  */
461 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
462 {
463 	struct drm_sched_job *s_job, *tmp;
464 	int r;
465 
466 	/*
467 	 * Locking the list is not required here as the sched thread is parked
468 	 * so no new jobs are being inserted or removed. Also concurrent
469 	 * GPU recovers can't run in parallel.
470 	 */
471 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
472 		struct dma_fence *fence = s_job->s_fence->parent;
473 
474 		atomic_inc(&sched->hw_rq_count);
475 
476 		if (!full_recovery)
477 			continue;
478 
479 		if (fence) {
480 			r = dma_fence_add_callback(fence, &s_job->cb,
481 						   drm_sched_process_job);
482 			if (r == -ENOENT)
483 				drm_sched_process_job(fence, &s_job->cb);
484 			else if (r)
485 				DRM_ERROR("fence add callback failed (%d)\n",
486 					  r);
487 		} else
488 			drm_sched_process_job(NULL, &s_job->cb);
489 	}
490 
491 	if (full_recovery) {
492 		spin_lock(&sched->job_list_lock);
493 		drm_sched_start_timeout(sched);
494 		spin_unlock(&sched->job_list_lock);
495 	}
496 
497 	kthread_unpark(sched->thread);
498 }
499 EXPORT_SYMBOL(drm_sched_start);
500 
501 /**
502  * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
503  *
504  * @sched: scheduler instance
505  *
506  */
507 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
508 {
509 	struct drm_sched_job *s_job, *tmp;
510 	uint64_t guilty_context;
511 	bool found_guilty = false;
512 	struct dma_fence *fence;
513 
514 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
515 		struct drm_sched_fence *s_fence = s_job->s_fence;
516 
517 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
518 			found_guilty = true;
519 			guilty_context = s_job->s_fence->scheduled.context;
520 		}
521 
522 		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
523 			dma_fence_set_error(&s_fence->finished, -ECANCELED);
524 
525 		dma_fence_put(s_job->s_fence->parent);
526 		fence = sched->ops->run_job(s_job);
527 
528 		if (IS_ERR_OR_NULL(fence)) {
529 			if (IS_ERR(fence))
530 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
531 
532 			s_job->s_fence->parent = NULL;
533 		} else {
534 			s_job->s_fence->parent = fence;
535 		}
536 
537 
538 	}
539 }
540 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
541 
542 /**
543  * drm_sched_job_init - init a scheduler job
544  *
545  * @job: scheduler job to init
546  * @entity: scheduler entity to use
547  * @owner: job owner for debugging
548  *
549  * Refer to drm_sched_entity_push_job() documentation
550  * for locking considerations.
551  *
552  * Returns 0 for success, negative error code otherwise.
553  */
554 int drm_sched_job_init(struct drm_sched_job *job,
555 		       struct drm_sched_entity *entity,
556 		       void *owner)
557 {
558 	struct drm_gpu_scheduler *sched;
559 
560 	drm_sched_entity_select_rq(entity);
561 	if (!entity->rq)
562 		return -ENOENT;
563 
564 	sched = entity->rq->sched;
565 
566 	job->sched = sched;
567 	job->entity = entity;
568 	job->s_priority = entity->rq - sched->sched_rq;
569 	job->s_fence = drm_sched_fence_create(entity, owner);
570 	if (!job->s_fence)
571 		return -ENOMEM;
572 	job->id = atomic64_inc_return(&sched->job_id_count);
573 
574 	INIT_LIST_HEAD(&job->node);
575 
576 	return 0;
577 }
578 EXPORT_SYMBOL(drm_sched_job_init);
579 
580 /**
581  * drm_sched_job_cleanup - clean up scheduler job resources
582  *
583  * @job: scheduler job to clean up
584  */
585 void drm_sched_job_cleanup(struct drm_sched_job *job)
586 {
587 	dma_fence_put(&job->s_fence->finished);
588 	job->s_fence = NULL;
589 }
590 EXPORT_SYMBOL(drm_sched_job_cleanup);
591 
592 /**
593  * drm_sched_ready - is the scheduler ready
594  *
595  * @sched: scheduler instance
596  *
597  * Return true if we can push more jobs to the hw, otherwise false.
598  */
599 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
600 {
601 	return atomic_read(&sched->hw_rq_count) <
602 		sched->hw_submission_limit;
603 }
604 
605 /**
606  * drm_sched_wakeup - Wake up the scheduler when it is ready
607  *
608  * @sched: scheduler instance
609  *
610  */
611 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
612 {
613 	if (drm_sched_ready(sched))
614 		wake_up_interruptible(&sched->wake_up_worker);
615 }
616 
617 /**
618  * drm_sched_select_entity - Select next entity to process
619  *
620  * @sched: scheduler instance
621  *
622  * Returns the entity to process or NULL if none are found.
623  */
624 static struct drm_sched_entity *
625 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
626 {
627 	struct drm_sched_entity *entity;
628 	int i;
629 
630 	if (!drm_sched_ready(sched))
631 		return NULL;
632 
633 	/* Kernel run queue has higher priority than normal run queue*/
634 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
635 		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
636 		if (entity)
637 			break;
638 	}
639 
640 	return entity;
641 }
642 
643 /**
644  * drm_sched_process_job - process a job
645  *
646  * @f: fence
647  * @cb: fence callbacks
648  *
649  * Called after job has finished execution.
650  */
651 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
652 {
653 	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
654 	struct drm_sched_fence *s_fence = s_job->s_fence;
655 	struct drm_gpu_scheduler *sched = s_fence->sched;
656 
657 	atomic_dec(&sched->hw_rq_count);
658 	atomic_dec(&sched->score);
659 
660 	trace_drm_sched_process_job(s_fence);
661 
662 	dma_fence_get(&s_fence->finished);
663 	drm_sched_fence_finished(s_fence);
664 	dma_fence_put(&s_fence->finished);
665 	wake_up_interruptible(&sched->wake_up_worker);
666 }
667 
668 /**
669  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
670  *
671  * @sched: scheduler instance
672  *
673  * Returns the next finished job from the mirror list (if there is one)
674  * ready for it to be destroyed.
675  */
676 static struct drm_sched_job *
677 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
678 {
679 	struct drm_sched_job *job;
680 
681 	/*
682 	 * Don't destroy jobs while the timeout worker is running  OR thread
683 	 * is being parked and hence assumed to not touch ring_mirror_list
684 	 */
685 	if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
686 	    !cancel_delayed_work(&sched->work_tdr)) ||
687 	    kthread_should_park())
688 		return NULL;
689 
690 	spin_lock(&sched->job_list_lock);
691 
692 	job = list_first_entry_or_null(&sched->ring_mirror_list,
693 				       struct drm_sched_job, node);
694 
695 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
696 		/* remove job from ring_mirror_list */
697 		list_del_init(&job->node);
698 	} else {
699 		job = NULL;
700 		/* queue timeout for next job */
701 		drm_sched_start_timeout(sched);
702 	}
703 
704 	spin_unlock(&sched->job_list_lock);
705 
706 	return job;
707 }
708 
709 /**
710  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
711  * @sched_list: list of drm_gpu_schedulers
712  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
713  *
714  * Returns pointer of the sched with the least load or NULL if none of the
715  * drm_gpu_schedulers are ready
716  */
717 struct drm_gpu_scheduler *
718 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
719 		     unsigned int num_sched_list)
720 {
721 	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
722 	int i;
723 	unsigned int min_score = UINT_MAX, num_score;
724 
725 	for (i = 0; i < num_sched_list; ++i) {
726 		sched = sched_list[i];
727 
728 		if (!sched->ready) {
729 			DRM_WARN("scheduler %s is not ready, skipping",
730 				 sched->name);
731 			continue;
732 		}
733 
734 		num_score = atomic_read(&sched->score);
735 		if (num_score < min_score) {
736 			min_score = num_score;
737 			picked_sched = sched;
738 		}
739 	}
740 
741 	return picked_sched;
742 }
743 EXPORT_SYMBOL(drm_sched_pick_best);
744 
745 /**
746  * drm_sched_blocked - check if the scheduler is blocked
747  *
748  * @sched: scheduler instance
749  *
750  * Returns true if blocked, otherwise false.
751  */
752 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
753 {
754 	if (kthread_should_park()) {
755 		kthread_parkme();
756 		return true;
757 	}
758 
759 	return false;
760 }
761 
762 /**
763  * drm_sched_main - main scheduler thread
764  *
765  * @param: scheduler instance
766  *
767  * Returns 0.
768  */
769 static int drm_sched_main(void *param)
770 {
771 	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
772 	int r;
773 
774 #ifdef __linux__
775 	sched_set_fifo_low(current);
776 #endif
777 
778 	while (!kthread_should_stop()) {
779 		struct drm_sched_entity *entity = NULL;
780 		struct drm_sched_fence *s_fence;
781 		struct drm_sched_job *sched_job;
782 		struct dma_fence *fence;
783 		struct drm_sched_job *cleanup_job = NULL;
784 
785 		wait_event_interruptible(sched->wake_up_worker,
786 					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
787 					 (!drm_sched_blocked(sched) &&
788 					  (entity = drm_sched_select_entity(sched))) ||
789 					 kthread_should_stop());
790 
791 		if (cleanup_job) {
792 			sched->ops->free_job(cleanup_job);
793 			/* queue timeout for next job */
794 			drm_sched_start_timeout(sched);
795 		}
796 
797 		if (!entity)
798 			continue;
799 
800 		sched_job = drm_sched_entity_pop_job(entity);
801 
802 		complete(&entity->entity_idle);
803 
804 		if (!sched_job)
805 			continue;
806 
807 		s_fence = sched_job->s_fence;
808 
809 		atomic_inc(&sched->hw_rq_count);
810 		drm_sched_job_begin(sched_job);
811 
812 		trace_drm_run_job(sched_job, entity);
813 		fence = sched->ops->run_job(sched_job);
814 		drm_sched_fence_scheduled(s_fence);
815 
816 		if (!IS_ERR_OR_NULL(fence)) {
817 			s_fence->parent = dma_fence_get(fence);
818 			r = dma_fence_add_callback(fence, &sched_job->cb,
819 						   drm_sched_process_job);
820 			if (r == -ENOENT)
821 				drm_sched_process_job(fence, &sched_job->cb);
822 			else if (r)
823 				DRM_ERROR("fence add callback failed (%d)\n",
824 					  r);
825 			dma_fence_put(fence);
826 		} else {
827 			if (IS_ERR(fence))
828 				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
829 
830 			drm_sched_process_job(NULL, &sched_job->cb);
831 		}
832 
833 		wake_up(&sched->job_scheduled);
834 	}
835 	return 0;
836 }
837 
838 /**
839  * drm_sched_init - Init a gpu scheduler instance
840  *
841  * @sched: scheduler instance
842  * @ops: backend operations for this scheduler
843  * @hw_submission: number of hw submissions that can be in flight
844  * @hang_limit: number of times to allow a job to hang before dropping it
845  * @timeout: timeout value in jiffies for the scheduler
846  * @name: name used for debugging
847  *
848  * Return 0 on success, otherwise error code.
849  */
850 int drm_sched_init(struct drm_gpu_scheduler *sched,
851 		   const struct drm_sched_backend_ops *ops,
852 		   unsigned hw_submission,
853 		   unsigned hang_limit,
854 		   long timeout,
855 		   const char *name)
856 {
857 	int i, ret;
858 	sched->ops = ops;
859 	sched->hw_submission_limit = hw_submission;
860 	sched->name = name;
861 	sched->timeout = timeout;
862 	sched->hang_limit = hang_limit;
863 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
864 		drm_sched_rq_init(sched, &sched->sched_rq[i]);
865 
866 	init_waitqueue_head(&sched->wake_up_worker);
867 	init_waitqueue_head(&sched->job_scheduled);
868 	INIT_LIST_HEAD(&sched->ring_mirror_list);
869 	mtx_init(&sched->job_list_lock, IPL_NONE);
870 	atomic_set(&sched->hw_rq_count, 0);
871 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
872 	atomic_set(&sched->score, 0);
873 	atomic64_set(&sched->job_id_count, 0);
874 
875 	/* Each scheduler will run on a seperate kernel thread */
876 	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
877 	if (IS_ERR(sched->thread)) {
878 		ret = PTR_ERR(sched->thread);
879 		sched->thread = NULL;
880 		DRM_ERROR("Failed to create scheduler for %s.\n", name);
881 		return ret;
882 	}
883 
884 	sched->ready = true;
885 	return 0;
886 }
887 EXPORT_SYMBOL(drm_sched_init);
888 
889 /**
890  * drm_sched_fini - Destroy a gpu scheduler
891  *
892  * @sched: scheduler instance
893  *
894  * Tears down and cleans up the scheduler.
895  */
896 void drm_sched_fini(struct drm_gpu_scheduler *sched)
897 {
898 	struct drm_sched_entity *s_entity;
899 	int i;
900 
901 	if (sched->thread)
902 		kthread_stop(sched->thread);
903 
904 	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
905 		struct drm_sched_rq *rq = &sched->sched_rq[i];
906 
907 		if (!rq)
908 			continue;
909 
910 		spin_lock(&rq->lock);
911 		list_for_each_entry(s_entity, &rq->entities, list)
912 			/*
913 			 * Prevents reinsertion and marks job_queue as idle,
914 			 * it will removed from rq in drm_sched_entity_fini
915 			 * eventually
916 			 */
917 			s_entity->stopped = true;
918 		spin_unlock(&rq->lock);
919 
920 	}
921 
922 	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
923 	wake_up_all(&sched->job_scheduled);
924 
925 	/* Confirm no work left behind accessing device structures */
926 	cancel_delayed_work_sync(&sched->work_tdr);
927 
928 	sched->ready = false;
929 }
930 EXPORT_SYMBOL(drm_sched_fini);
931