xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_fence.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /*	$NetBSD: amdgpu_fence.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2009 Jerome Glisse.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  *
27  */
28 /*
29  * Authors:
30  *    Jerome Glisse <glisse@freedesktop.org>
31  *    Dave Airlie
32  */
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: amdgpu_fence.c,v 1.3 2018/08/27 14:04:50 riastradh Exp $");
35 
36 #include <asm/byteorder.h>
37 #include <linux/seq_file.h>
38 #include <linux/atomic.h>
39 #include <linux/wait.h>
40 #include <linux/kref.h>
41 #include <linux/slab.h>
42 #include <linux/firmware.h>
43 #include <drm/drmP.h>
44 #include "amdgpu.h"
45 #include "amdgpu_trace.h"
46 
47 /*
48  * Fences
49  * Fences mark an event in the GPUs pipeline and are used
50  * for GPU/CPU synchronization.  When the fence is written,
51  * it is expected that all buffers associated with that fence
52  * are no longer in use by the associated ring on the GPU and
53  * that the the relevant GPU caches have been flushed.
54  */
55 
56 static struct kmem_cache *amdgpu_fence_slab;
57 static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
58 
59 /**
60  * amdgpu_fence_write - write a fence value
61  *
62  * @ring: ring the fence is associated with
63  * @seq: sequence number to write
64  *
65  * Writes a fence value to memory (all asics).
66  */
67 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
68 {
69 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
70 
71 	if (drv->cpu_addr)
72 		*drv->cpu_addr = cpu_to_le32(seq);
73 }
74 
75 /**
76  * amdgpu_fence_read - read a fence value
77  *
78  * @ring: ring the fence is associated with
79  *
80  * Reads a fence value from memory (all asics).
81  * Returns the value of the fence read from memory.
82  */
83 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
84 {
85 	struct amdgpu_fence_driver *drv = &ring->fence_drv;
86 	u32 seq = 0;
87 
88 	if (drv->cpu_addr)
89 		seq = le32_to_cpu(*drv->cpu_addr);
90 	else
91 		seq = lower_32_bits(atomic64_read(&drv->last_seq));
92 
93 	return seq;
94 }
95 
96 /**
97  * amdgpu_fence_emit - emit a fence on the requested ring
98  *
99  * @ring: ring the fence is associated with
100  * @owner: creator of the fence
101  * @fence: amdgpu fence object
102  *
103  * Emits a fence command on the requested ring (all asics).
104  * Returns 0 on success, -ENOMEM on failure.
105  */
106 int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
107 		      struct amdgpu_fence **fence)
108 {
109 	struct amdgpu_device *adev = ring->adev;
110 
111 	/* we are protected by the ring emission mutex */
112 	*fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
113 	if ((*fence) == NULL) {
114 		return -ENOMEM;
115 	}
116 	(*fence)->seq = ++ring->fence_drv.sync_seq[ring->idx];
117 	(*fence)->ring = ring;
118 	(*fence)->owner = owner;
119 	fence_init(&(*fence)->base, &amdgpu_fence_ops,
120 #ifdef __NetBSD__
121 		&ring->fence_drv.fence_lock,
122 #else
123 		&ring->fence_drv.fence_queue.lock,
124 #endif
125 		adev->fence_context + ring->idx,
126 		(*fence)->seq);
127 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
128 			       (*fence)->seq,
129 			       AMDGPU_FENCE_FLAG_INT);
130 	return 0;
131 }
132 
133 /**
134  * amdgpu_fence_schedule_fallback - schedule fallback check
135  *
136  * @ring: pointer to struct amdgpu_ring
137  *
138  * Start a timer as fallback to our interrupts.
139  */
140 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
141 {
142 	mod_timer(&ring->fence_drv.fallback_timer,
143 		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
144 }
145 
146 /**
147  * amdgpu_fence_activity - check for fence activity
148  *
149  * @ring: pointer to struct amdgpu_ring
150  *
151  * Checks the current fence value and calculates the last
152  * signalled fence value. Returns true if activity occured
153  * on the ring, and the fence_queue should be waken up.
154  */
155 static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
156 {
157 	uint64_t seq, last_seq, last_emitted;
158 	unsigned count_loop = 0;
159 	bool wake = false;
160 
161 	BUG_ON(!spin_is_locked(&ring->fence_drv.fence_lock));
162 
163 	/* Note there is a scenario here for an infinite loop but it's
164 	 * very unlikely to happen. For it to happen, the current polling
165 	 * process need to be interrupted by another process and another
166 	 * process needs to update the last_seq btw the atomic read and
167 	 * xchg of the current process.
168 	 *
169 	 * More over for this to go in infinite loop there need to be
170 	 * continuously new fence signaled ie amdgpu_fence_read needs
171 	 * to return a different value each time for both the currently
172 	 * polling process and the other process that xchg the last_seq
173 	 * btw atomic read and xchg of the current process. And the
174 	 * value the other process set as last seq must be higher than
175 	 * the seq value we just read. Which means that current process
176 	 * need to be interrupted after amdgpu_fence_read and before
177 	 * atomic xchg.
178 	 *
179 	 * To be even more safe we count the number of time we loop and
180 	 * we bail after 10 loop just accepting the fact that we might
181 	 * have temporarly set the last_seq not to the true real last
182 	 * seq but to an older one.
183 	 */
184 	last_seq = atomic64_read(&ring->fence_drv.last_seq);
185 	do {
186 		last_emitted = ring->fence_drv.sync_seq[ring->idx];
187 		seq = amdgpu_fence_read(ring);
188 		seq |= last_seq & 0xffffffff00000000LL;
189 		if (seq < last_seq) {
190 			seq &= 0xffffffff;
191 			seq |= last_emitted & 0xffffffff00000000LL;
192 		}
193 
194 		if (seq <= last_seq || seq > last_emitted) {
195 			break;
196 		}
197 		/* If we loop over we don't want to return without
198 		 * checking if a fence is signaled as it means that the
199 		 * seq we just read is different from the previous on.
200 		 */
201 		wake = true;
202 		last_seq = seq;
203 		if ((count_loop++) > 10) {
204 			/* We looped over too many time leave with the
205 			 * fact that we might have set an older fence
206 			 * seq then the current real last seq as signaled
207 			 * by the hw.
208 			 */
209 			break;
210 		}
211 	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
212 
213 	if (seq < last_emitted)
214 		amdgpu_fence_schedule_fallback(ring);
215 
216 	return wake;
217 }
218 
219 #ifdef __NetBSD__
220 static int amdgpu_fence_check_signaled(struct amdgpu_fence *);
221 
222 static void
223 amdgpu_fence_wakeup_locked(struct amdgpu_ring *ring)
224 {
225 	struct amdgpu_fence *fence, *next;
226 
227 	BUG_ON(!spin_is_locked(&ring->fence_drv.fence_lock));
228 	DRM_SPIN_WAKEUP_ALL(&ring->fence_drv.fence_queue,
229 	    &ring->fence_drv.fence_lock);
230 	TAILQ_FOREACH_SAFE(fence, &ring->fence_drv.fence_check, fence_check,
231 	    next) {
232 		amdgpu_fence_check_signaled(fence);
233 	}
234 }
235 #endif
236 
237 /**
238  * amdgpu_fence_process - process a fence
239  *
240  * @adev: amdgpu_device pointer
241  * @ring: ring index the fence is associated with
242  *
243  * Checks the current fence value and wakes the fence queue
244  * if the sequence number has increased (all asics).
245  */
246 static void amdgpu_fence_process_locked(struct amdgpu_ring *ring)
247 {
248 	if (amdgpu_fence_activity(ring))
249 #ifdef __NetBSD__
250 		amdgpu_fence_wakeup_locked(ring);
251 #else
252 		wake_up_all(&ring->fence_drv.fence_queue);
253 #endif
254 }
255 
256 void amdgpu_fence_process(struct amdgpu_ring *ring)
257 {
258 
259 	spin_lock(&ring->fence_drv.fence_lock);
260 	amdgpu_fence_process_locked(ring);
261 	spin_unlock(&ring->fence_drv.fence_lock);
262 }
263 
264 /**
265  * amdgpu_fence_fallback - fallback for hardware interrupts
266  *
267  * @work: delayed work item
268  *
269  * Checks for fence activity.
270  */
271 static void amdgpu_fence_fallback(unsigned long arg)
272 {
273 	struct amdgpu_ring *ring = (void *)arg;
274 
275 	amdgpu_fence_process(ring);
276 }
277 
278 /**
279  * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
280  *
281  * @ring: ring the fence is associated with
282  * @seq: sequence number
283  *
284  * Check if the last signaled fence sequnce number is >= the requested
285  * sequence number (all asics).
286  * Returns true if the fence has signaled (current fence value
287  * is >= requested value) or false if it has not (current fence
288  * value is < the requested value.  Helper function for
289  * amdgpu_fence_signaled().
290  */
291 static bool amdgpu_fence_seq_signaled(struct amdgpu_ring *ring, u64 seq)
292 {
293 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
294 		return true;
295 
296 	/* poll new last sequence at least once */
297 	amdgpu_fence_process(ring);
298 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
299 		return true;
300 
301 	return false;
302 }
303 
304 /*
305  * amdgpu_ring_wait_seq_timeout - wait for seq of the specific ring to signal
306  * @ring: ring to wait on for the seq number
307  * @seq: seq number wait for
308  *
309  * return value:
310  * 0: seq signaled, and gpu not hang
311  * -EDEADL: GPU hang detected
312  * -EINVAL: some paramter is not valid
313  */
314 static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
315 {
316 	bool signaled = false;
317 
318 	BUG_ON(!ring);
319 	BUG_ON(!spin_is_locked(&ring->fence_drv.fence_lock));
320 	if (seq > ring->fence_drv.sync_seq[ring->idx])
321 		return -EINVAL;
322 
323 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
324 		return 0;
325 
326 	amdgpu_fence_schedule_fallback(ring);
327 #ifdef __NetBSD__
328 	/* XXX How is this ever supposed to wake up in the EDEADLK case?  */
329 	int r __unused;
330 	DRM_SPIN_WAIT_NOINTR_UNTIL(r, &ring->fence_drv.fence_queue,
331 	    &ring->fence_drv.fence_lock,
332 	    (signaled = amdgpu_fence_seq_signaled(ring, seq)));
333 #else
334 	wait_event(ring->fence_drv.fence_queue, (
335 		   (signaled = amdgpu_fence_seq_signaled(ring, seq))));
336 #endif
337 
338 	if (signaled)
339 		return 0;
340 	else
341 		return -EDEADLK;
342 }
343 
344 /**
345  * amdgpu_fence_wait_next - wait for the next fence to signal
346  *
347  * @adev: amdgpu device pointer
348  * @ring: ring index the fence is associated with
349  *
350  * Wait for the next fence on the requested ring to signal (all asics).
351  * Returns 0 if the next fence has passed, error for all other cases.
352  * Caller must hold ring lock.
353  */
354 int amdgpu_fence_wait_next(struct amdgpu_ring *ring)
355 {
356 	uint64_t seq = atomic64_read(&ring->fence_drv.last_seq) + 1ULL;
357 
358 	if (seq >= ring->fence_drv.sync_seq[ring->idx])
359 		return -ENOENT;
360 
361 	return amdgpu_fence_ring_wait_seq(ring, seq);
362 }
363 
364 /**
365  * amdgpu_fence_wait_empty - wait for all fences to signal
366  *
367  * @adev: amdgpu device pointer
368  * @ring: ring index the fence is associated with
369  *
370  * Wait for all fences on the requested ring to signal (all asics).
371  * Returns 0 if the fences have passed, error for all other cases.
372  * Caller must hold ring lock.
373  */
374 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
375 {
376 	uint64_t seq = ring->fence_drv.sync_seq[ring->idx];
377 
378 	if (!seq)
379 		return 0;
380 
381 	return amdgpu_fence_ring_wait_seq(ring, seq);
382 }
383 
384 /**
385  * amdgpu_fence_count_emitted - get the count of emitted fences
386  *
387  * @ring: ring the fence is associated with
388  *
389  * Get the number of fences emitted on the requested ring (all asics).
390  * Returns the number of emitted fences on the ring.  Used by the
391  * dynpm code to ring track activity.
392  */
393 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
394 {
395 	uint64_t emitted;
396 
397 	/* We are not protected by ring lock when reading the last sequence
398 	 * but it's ok to report slightly wrong fence count here.
399 	 */
400 	amdgpu_fence_process(ring);
401 	emitted = ring->fence_drv.sync_seq[ring->idx]
402 		- atomic64_read(&ring->fence_drv.last_seq);
403 	/* to avoid 32bits warp around */
404 	if (emitted > 0x10000000)
405 		emitted = 0x10000000;
406 
407 	return (unsigned)emitted;
408 }
409 
410 /**
411  * amdgpu_fence_need_sync - do we need a semaphore
412  *
413  * @fence: amdgpu fence object
414  * @dst_ring: which ring to check against
415  *
416  * Check if the fence needs to be synced against another ring
417  * (all asics).  If so, we need to emit a semaphore.
418  * Returns true if we need to sync with another ring, false if
419  * not.
420  */
421 bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
422 			    struct amdgpu_ring *dst_ring)
423 {
424 	struct amdgpu_fence_driver *fdrv;
425 
426 	if (!fence)
427 		return false;
428 
429 	if (fence->ring == dst_ring)
430 		return false;
431 
432 	/* we are protected by the ring mutex */
433 	fdrv = &dst_ring->fence_drv;
434 	if (fence->seq <= fdrv->sync_seq[fence->ring->idx])
435 		return false;
436 
437 	return true;
438 }
439 
440 /**
441  * amdgpu_fence_note_sync - record the sync point
442  *
443  * @fence: amdgpu fence object
444  * @dst_ring: which ring to check against
445  *
446  * Note the sequence number at which point the fence will
447  * be synced with the requested ring (all asics).
448  */
449 void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
450 			    struct amdgpu_ring *dst_ring)
451 {
452 	struct amdgpu_fence_driver *dst, *src;
453 	unsigned i;
454 
455 	if (!fence)
456 		return;
457 
458 	if (fence->ring == dst_ring)
459 		return;
460 
461 	/* we are protected by the ring mutex */
462 	src = &fence->ring->fence_drv;
463 	dst = &dst_ring->fence_drv;
464 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
465 		if (i == dst_ring->idx)
466 			continue;
467 
468 		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
469 	}
470 }
471 
472 /**
473  * amdgpu_fence_driver_start_ring - make the fence driver
474  * ready for use on the requested ring.
475  *
476  * @ring: ring to start the fence driver on
477  * @irq_src: interrupt source to use for this ring
478  * @irq_type: interrupt type to use for this ring
479  *
480  * Make the fence driver ready for processing (all asics).
481  * Not all asics have all rings, so each asic will only
482  * start the fence driver on the rings it has.
483  * Returns 0 for success, errors for failure.
484  */
485 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
486 				   struct amdgpu_irq_src *irq_src,
487 				   unsigned irq_type)
488 {
489 	struct amdgpu_device *adev = ring->adev;
490 	uint64_t index;
491 
492 	if (ring != &adev->uvd.ring) {
493 		ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
494 		ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
495 	} else {
496 		/* put fence directly behind firmware */
497 #ifdef __NetBSD__		/* XXX ALIGN means something else.  */
498 		index = round_up(adev->uvd.fw->size, 8);
499 #else
500 		index = ALIGN(adev->uvd.fw->size, 8);
501 #endif
502 		ring->fence_drv.cpu_addr = (void *)((char *)adev->uvd.cpu_addr + index);
503 		ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
504 	}
505 	amdgpu_fence_write(ring, atomic64_read(&ring->fence_drv.last_seq));
506 	amdgpu_irq_get(adev, irq_src, irq_type);
507 
508 	ring->fence_drv.irq_src = irq_src;
509 	ring->fence_drv.irq_type = irq_type;
510 	ring->fence_drv.initialized = true;
511 
512 	dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016"PRIx64", "
513 		 "cpu addr 0x%p\n", ring->idx,
514 		 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
515 	return 0;
516 }
517 
518 /**
519  * amdgpu_fence_driver_init_ring - init the fence driver
520  * for the requested ring.
521  *
522  * @ring: ring to init the fence driver on
523  *
524  * Init the fence driver for the requested ring (all asics).
525  * Helper function for amdgpu_fence_driver_init().
526  */
527 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
528 {
529 	int i, r;
530 
531 	ring->fence_drv.cpu_addr = NULL;
532 	ring->fence_drv.gpu_addr = 0;
533 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
534 		ring->fence_drv.sync_seq[i] = 0;
535 
536 	atomic64_set(&ring->fence_drv.last_seq, 0);
537 	ring->fence_drv.initialized = false;
538 
539 	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
540 		    (unsigned long)ring);
541 
542 #ifdef __NetBSD__
543 	spin_lock_init(&ring->fence_drv.fence_lock);
544 	DRM_INIT_WAITQUEUE(&ring->fence_drv.fence_queue, "amdfence");
545 	TAILQ_INIT(&ring->fence_drv.fence_check);
546 #else
547 	init_waitqueue_head(&ring->fence_drv.fence_queue);
548 #endif
549 
550 	if (amdgpu_enable_scheduler) {
551 		long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
552 		if (timeout == 0) {
553 			/*
554 			 * FIXME:
555 			 * Delayed workqueue cannot use it directly,
556 			 * so the scheduler will not use delayed workqueue if
557 			 * MAX_SCHEDULE_TIMEOUT is set.
558 			 * Currently keep it simple and silly.
559 			 */
560 			timeout = MAX_SCHEDULE_TIMEOUT;
561 		}
562 		r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
563 				   amdgpu_sched_hw_submission,
564 				   timeout, ring->name);
565 		if (r) {
566 			DRM_ERROR("Failed to create scheduler on ring %s.\n",
567 				  ring->name);
568 			return r;
569 		}
570 	}
571 
572 	return 0;
573 }
574 
575 /**
576  * amdgpu_fence_driver_init - init the fence driver
577  * for all possible rings.
578  *
579  * @adev: amdgpu device pointer
580  *
581  * Init the fence driver for all possible rings (all asics).
582  * Not all asics have all rings, so each asic will only
583  * start the fence driver on the rings it has using
584  * amdgpu_fence_driver_start_ring().
585  * Returns 0 for success.
586  */
587 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
588 {
589 	if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
590 		amdgpu_fence_slab = kmem_cache_create(
591 			"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
592 			SLAB_HWCACHE_ALIGN, NULL);
593 		if (!amdgpu_fence_slab)
594 			return -ENOMEM;
595 	}
596 	if (amdgpu_debugfs_fence_init(adev))
597 		dev_err(adev->dev, "fence debugfs file creation failed\n");
598 
599 	return 0;
600 }
601 
602 /**
603  * amdgpu_fence_driver_fini - tear down the fence driver
604  * for all possible rings.
605  *
606  * @adev: amdgpu device pointer
607  *
608  * Tear down the fence driver for all possible rings (all asics).
609  */
610 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
611 {
612 	int i, r;
613 
614 	if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
615 		kmem_cache_destroy(amdgpu_fence_slab);
616 	mutex_lock(&adev->ring_lock);
617 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
618 		struct amdgpu_ring *ring = adev->rings[i];
619 
620 		if (!ring || !ring->fence_drv.initialized)
621 			continue;
622 		r = amdgpu_fence_wait_empty(ring);
623 		if (r) {
624 			/* no need to trigger GPU reset as we are unloading */
625 			amdgpu_fence_driver_force_completion(adev);
626 		}
627 #ifdef __NetBSD__
628 		spin_lock(&ring->fence_drv.fence_lock);
629 		amdgpu_fence_wakeup_locked(ring);
630 		spin_unlock(&ring->fence_drv.fence_lock);
631 #else
632 		wake_up_all(&ring->fence_drv.fence_queue);
633 #endif
634 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
635 			       ring->fence_drv.irq_type);
636 		amd_sched_fini(&ring->sched);
637 		del_timer_sync(&ring->fence_drv.fallback_timer);
638 		ring->fence_drv.initialized = false;
639 #ifdef __NetBSD__
640 		BUG_ON(!TAILQ_EMPTY(&ring->fence_drv.fence_check));
641 		DRM_DESTROY_WAITQUEUE(&ring->fence_drv.fence_queue);
642 		spin_lock_destroy(&ring->fence_drv.fence_lock);
643 #endif
644 	}
645 	mutex_unlock(&adev->ring_lock);
646 }
647 
648 /**
649  * amdgpu_fence_driver_suspend - suspend the fence driver
650  * for all possible rings.
651  *
652  * @adev: amdgpu device pointer
653  *
654  * Suspend the fence driver for all possible rings (all asics).
655  */
656 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
657 {
658 	int i, r;
659 
660 	mutex_lock(&adev->ring_lock);
661 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
662 		struct amdgpu_ring *ring = adev->rings[i];
663 		if (!ring || !ring->fence_drv.initialized)
664 			continue;
665 
666 		/* wait for gpu to finish processing current batch */
667 		r = amdgpu_fence_wait_empty(ring);
668 		if (r) {
669 			/* delay GPU reset to resume */
670 			amdgpu_fence_driver_force_completion(adev);
671 		}
672 
673 		/* disable the interrupt */
674 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
675 			       ring->fence_drv.irq_type);
676 	}
677 	mutex_unlock(&adev->ring_lock);
678 }
679 
680 /**
681  * amdgpu_fence_driver_resume - resume the fence driver
682  * for all possible rings.
683  *
684  * @adev: amdgpu device pointer
685  *
686  * Resume the fence driver for all possible rings (all asics).
687  * Not all asics have all rings, so each asic will only
688  * start the fence driver on the rings it has using
689  * amdgpu_fence_driver_start_ring().
690  * Returns 0 for success.
691  */
692 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
693 {
694 	int i;
695 
696 	mutex_lock(&adev->ring_lock);
697 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
698 		struct amdgpu_ring *ring = adev->rings[i];
699 		if (!ring || !ring->fence_drv.initialized)
700 			continue;
701 
702 		/* enable the interrupt */
703 		amdgpu_irq_get(adev, ring->fence_drv.irq_src,
704 			       ring->fence_drv.irq_type);
705 	}
706 	mutex_unlock(&adev->ring_lock);
707 }
708 
709 /**
710  * amdgpu_fence_driver_force_completion - force all fence waiter to complete
711  *
712  * @adev: amdgpu device pointer
713  *
714  * In case of GPU reset failure make sure no process keep waiting on fence
715  * that will never complete.
716  */
717 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
718 {
719 	int i;
720 
721 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
722 		struct amdgpu_ring *ring = adev->rings[i];
723 		if (!ring || !ring->fence_drv.initialized)
724 			continue;
725 
726 		amdgpu_fence_write(ring, ring->fence_drv.sync_seq[i]);
727 	}
728 }
729 
730 /*
731  * Common fence implementation
732  */
733 
734 static const char *amdgpu_fence_get_driver_name(struct fence *fence)
735 {
736 	return "amdgpu";
737 }
738 
739 static const char *amdgpu_fence_get_timeline_name(struct fence *f)
740 {
741 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
742 	return (const char *)fence->ring->name;
743 }
744 
745 /**
746  * amdgpu_fence_is_signaled - test if fence is signaled
747  *
748  * @f: fence to test
749  *
750  * Test the fence sequence number if it is already signaled. If it isn't
751  * signaled start fence processing. Returns True if the fence is signaled.
752  */
753 static bool amdgpu_fence_is_signaled(struct fence *f)
754 {
755 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
756 	struct amdgpu_ring *ring = fence->ring;
757 
758 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
759 		return true;
760 
761 	amdgpu_fence_process(ring);
762 
763 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
764 		return true;
765 
766 	return false;
767 }
768 
769 /**
770  * amdgpu_fence_check_signaled - callback from fence_queue
771  *
772  * this function is called with fence_queue lock held, which is also used
773  * for the fence locking itself, so unlocked variants are used for
774  * fence_signal, and remove_wait_queue.
775  */
776 #ifdef __NetBSD__
777 static int amdgpu_fence_check_signaled(struct amdgpu_fence *fence)
778 #else
779 static int amdgpu_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
780 #endif
781 {
782 #ifndef __NetBSD__
783 	struct amdgpu_fence *fence;
784 #endif
785 	u64 seq;
786 	int ret;
787 
788 #ifndef __NetBSD__
789 	fence = container_of(wait, struct amdgpu_fence, fence_wake);
790 #endif
791 	BUG_ON(!spin_is_locked(&fence->ring->fence_drv.fence_lock));
792 
793 	/*
794 	 * We cannot use amdgpu_fence_process here because we're already
795 	 * in the waitqueue, in a call from wake_up_all.
796 	 */
797 	seq = atomic64_read(&fence->ring->fence_drv.last_seq);
798 	if (seq >= fence->seq) {
799 		ret = fence_signal_locked(&fence->base);
800 		if (!ret)
801 			FENCE_TRACE(&fence->base, "signaled from irq context\n");
802 		else
803 			FENCE_TRACE(&fence->base, "was already signaled\n");
804 
805 #ifdef __NetBSD__
806 		TAILQ_REMOVE(&fence->ring->fence_drv.fence_check, fence,
807 		    fence_check);
808 #else
809 		__remove_wait_queue(&fence->ring->fence_drv.fence_queue, &fence->fence_wake);
810 #endif
811 		fence_put(&fence->base);
812 	} else
813 		FENCE_TRACE(&fence->base, "pending\n");
814 	return 0;
815 }
816 
817 /**
818  * amdgpu_fence_enable_signaling - enable signalling on fence
819  * @fence: fence
820  *
821  * This function is called with fence_queue lock held, and adds a callback
822  * to fence_queue that checks if this fence is signaled, and if so it
823  * signals the fence and removes itself.
824  */
825 static bool amdgpu_fence_enable_signaling(struct fence *f)
826 {
827 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
828 	struct amdgpu_ring *ring = fence->ring;
829 
830 	if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
831 		return false;
832 
833 #ifdef __NetBSD__
834 	TAILQ_INSERT_TAIL(&ring->fence_drv.fence_check, fence, fence_check);
835 #else
836 	fence->fence_wake.flags = 0;
837 	fence->fence_wake.private = NULL;
838 	fence->fence_wake.func = amdgpu_fence_check_signaled;
839 	__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
840 #endif
841 	fence_get(f);
842 	if (!timer_pending(&ring->fence_drv.fallback_timer))
843 		amdgpu_fence_schedule_fallback(ring);
844 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
845 	return true;
846 }
847 
848 static void amdgpu_fence_release(struct fence *f)
849 {
850 	struct amdgpu_fence *fence = to_amdgpu_fence(f);
851 	kmem_cache_free(amdgpu_fence_slab, fence);
852 }
853 
854 const struct fence_ops amdgpu_fence_ops = {
855 	.get_driver_name = amdgpu_fence_get_driver_name,
856 	.get_timeline_name = amdgpu_fence_get_timeline_name,
857 	.enable_signaling = amdgpu_fence_enable_signaling,
858 	.signaled = amdgpu_fence_is_signaled,
859 	.wait = fence_default_wait,
860 	.release = amdgpu_fence_release,
861 };
862 
863 /*
864  * Fence debugfs
865  */
866 #if defined(CONFIG_DEBUG_FS)
867 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
868 {
869 	struct drm_info_node *node = (struct drm_info_node *)m->private;
870 	struct drm_device *dev = node->minor->dev;
871 	struct amdgpu_device *adev = dev->dev_private;
872 	int i, j;
873 
874 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
875 		struct amdgpu_ring *ring = adev->rings[i];
876 		if (!ring || !ring->fence_drv.initialized)
877 			continue;
878 
879 		amdgpu_fence_process(ring);
880 
881 		seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
882 		seq_printf(m, "Last signaled fence 0x%016llx\n",
883 			   (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
884 		seq_printf(m, "Last emitted        0x%016"PRIx64"\n",
885 			   ring->fence_drv.sync_seq[i]);
886 
887 		for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
888 			struct amdgpu_ring *other = adev->rings[j];
889 			if (i != j && other && other->fence_drv.initialized &&
890 			    ring->fence_drv.sync_seq[j])
891 				seq_printf(m, "Last sync to ring %d 0x%016"PRIx64"\n",
892 					   j, ring->fence_drv.sync_seq[j]);
893 		}
894 	}
895 	return 0;
896 }
897 
898 static struct drm_info_list amdgpu_debugfs_fence_list[] = {
899 	{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
900 };
901 #endif
902 
903 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
904 {
905 #if defined(CONFIG_DEBUG_FS)
906 	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 1);
907 #else
908 	return 0;
909 #endif
910 }
911 
912