xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_irq.c (revision 1ad61ae0a79a724d2d3ec69e69c8e1d1ff6b53a0)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 /**
30  * DOC: Interrupt Handling
31  *
32  * Interrupts generated within GPU hardware raise interrupt requests that are
33  * passed to amdgpu IRQ handler which is responsible for detecting source and
34  * type of the interrupt and dispatching matching handlers. If handling an
35  * interrupt requires calling kernel functions that may sleep processing is
36  * dispatched to work handlers.
37  *
38  * If MSI functionality is not disabled by module parameter then MSI
39  * support will be enabled.
40  *
41  * For GPU interrupt sources that may be driven by another driver, IRQ domain
42  * support is used (with mapping between virtual and hardware IRQs).
43  */
44 
45 #include <linux/irq.h>
46 #include <linux/pci.h>
47 
48 #include <drm/drm_crtc_helper.h>
49 #include <drm/drm_vblank.h>
50 #include <drm/amdgpu_drm.h>
51 #include <drm/drm_drv.h>
52 #include "amdgpu.h"
53 #include "amdgpu_ih.h"
54 #include "atom.h"
55 #include "amdgpu_connectors.h"
56 #include "amdgpu_trace.h"
57 #include "amdgpu_amdkfd.h"
58 #include "amdgpu_ras.h"
59 
60 #include <linux/pm_runtime.h>
61 
62 #ifdef CONFIG_DRM_AMD_DC
63 #include "amdgpu_dm_irq.h"
64 #endif
65 
66 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
67 
68 const char *soc15_ih_clientid_name[] = {
69 	"IH",
70 	"SDMA2 or ACP",
71 	"ATHUB",
72 	"BIF",
73 	"SDMA3 or DCE",
74 	"SDMA4 or ISP",
75 	"VMC1 or PCIE0",
76 	"RLC",
77 	"SDMA0",
78 	"SDMA1",
79 	"SE0SH",
80 	"SE1SH",
81 	"SE2SH",
82 	"SE3SH",
83 	"VCN1 or UVD1",
84 	"THM",
85 	"VCN or UVD",
86 	"SDMA5 or VCE0",
87 	"VMC",
88 	"SDMA6 or XDMA",
89 	"GRBM_CP",
90 	"ATS",
91 	"ROM_SMUIO",
92 	"DF",
93 	"SDMA7 or VCE1",
94 	"PWR",
95 	"reserved",
96 	"UTCL2",
97 	"EA",
98 	"UTCL2LOG",
99 	"MP0",
100 	"MP1"
101 };
102 
103 /**
104  * amdgpu_hotplug_work_func - work handler for display hotplug event
105  *
106  * @work: work struct pointer
107  *
108  * This is the hotplug event work handler (all ASICs).
109  * The work gets scheduled from the IRQ handler if there
110  * was a hotplug interrupt.  It walks through the connector table
111  * and calls hotplug handler for each connector. After this, it sends
112  * a DRM hotplug event to alert userspace.
113  *
114  * This design approach is required in order to defer hotplug event handling
115  * from the IRQ handler to a work handler because hotplug handler has to use
116  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
117  * sleep).
118  */
119 static void amdgpu_hotplug_work_func(struct work_struct *work)
120 {
121 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
122 						  hotplug_work);
123 	struct drm_device *dev = adev_to_drm(adev);
124 	struct drm_mode_config *mode_config = &dev->mode_config;
125 	struct drm_connector *connector;
126 	struct drm_connector_list_iter iter;
127 
128 	mutex_lock(&mode_config->mutex);
129 	drm_connector_list_iter_begin(dev, &iter);
130 	drm_for_each_connector_iter(connector, &iter)
131 		amdgpu_connector_hotplug(connector);
132 	drm_connector_list_iter_end(&iter);
133 	mutex_unlock(&mode_config->mutex);
134 	/* Just fire off a uevent and let userspace tell us what to do */
135 	drm_helper_hpd_irq_event(dev);
136 }
137 
138 /**
139  * amdgpu_irq_disable_all - disable *all* interrupts
140  *
141  * @adev: amdgpu device pointer
142  *
143  * Disable all types of interrupts from all sources.
144  */
145 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
146 {
147 	unsigned long irqflags;
148 	unsigned i, j, k;
149 	int r;
150 
151 	spin_lock_irqsave(&adev->irq.lock, irqflags);
152 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
153 		if (!adev->irq.client[i].sources)
154 			continue;
155 
156 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
157 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
158 
159 			if (!src || !src->funcs->set || !src->num_types)
160 				continue;
161 
162 			for (k = 0; k < src->num_types; ++k) {
163 				r = src->funcs->set(adev, src, k,
164 						    AMDGPU_IRQ_STATE_DISABLE);
165 				if (r)
166 					DRM_ERROR("error disabling interrupt (%d)\n",
167 						  r);
168 			}
169 		}
170 	}
171 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
172 }
173 
174 /**
175  * amdgpu_irq_handler - IRQ handler
176  *
177  * @irq: IRQ number (unused)
178  * @arg: pointer to DRM device
179  *
180  * IRQ handler for amdgpu driver (all ASICs).
181  *
182  * Returns:
183  * result of handling the IRQ, as defined by &irqreturn_t
184  */
185 irqreturn_t amdgpu_irq_handler(void *arg)
186 {
187 	struct drm_device *dev = (struct drm_device *) arg;
188 	struct amdgpu_device *adev = drm_to_adev(dev);
189 	irqreturn_t ret;
190 
191 	if (!adev->irq.installed)
192 		return 0;
193 
194 	ret = amdgpu_ih_process(adev, &adev->irq.ih);
195 	if (ret == IRQ_HANDLED)
196 		pm_runtime_mark_last_busy(dev->dev);
197 
198 	amdgpu_ras_interrupt_fatal_error_handler(adev);
199 
200 	return ret;
201 }
202 
203 /**
204  * amdgpu_irq_handle_ih1 - kick of processing for IH1
205  *
206  * @work: work structure in struct amdgpu_irq
207  *
208  * Kick of processing IH ring 1.
209  */
210 static void amdgpu_irq_handle_ih1(struct work_struct *work)
211 {
212 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
213 						  irq.ih1_work);
214 
215 	amdgpu_ih_process(adev, &adev->irq.ih1);
216 }
217 
218 /**
219  * amdgpu_irq_handle_ih2 - kick of processing for IH2
220  *
221  * @work: work structure in struct amdgpu_irq
222  *
223  * Kick of processing IH ring 2.
224  */
225 static void amdgpu_irq_handle_ih2(struct work_struct *work)
226 {
227 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
228 						  irq.ih2_work);
229 
230 	amdgpu_ih_process(adev, &adev->irq.ih2);
231 }
232 
233 /**
234  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
235  *
236  * @work: work structure in struct amdgpu_irq
237  *
238  * Kick of processing IH soft ring.
239  */
240 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
241 {
242 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
243 						  irq.ih_soft_work);
244 
245 	amdgpu_ih_process(adev, &adev->irq.ih_soft);
246 }
247 
248 /**
249  * amdgpu_msi_ok - check whether MSI functionality is enabled
250  *
251  * @adev: amdgpu device pointer (unused)
252  *
253  * Checks whether MSI functionality has been disabled via module parameter
254  * (all ASICs).
255  *
256  * Returns:
257  * *true* if MSIs are allowed to be enabled or *false* otherwise
258  */
259 bool amdgpu_msi_ok(struct amdgpu_device *adev)
260 {
261 	if (amdgpu_msi == 1)
262 		return true;
263 	else if (amdgpu_msi == 0)
264 		return false;
265 
266 	return true;
267 }
268 
269 static void amdgpu_restore_msix(struct amdgpu_device *adev)
270 {
271 	STUB();
272 #ifdef notyet
273 	u16 ctrl;
274 
275 	pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
276 	if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
277 		return;
278 
279 	/* VF FLR */
280 	ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
281 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
282 	ctrl |= PCI_MSIX_FLAGS_ENABLE;
283 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
284 #endif
285 }
286 
287 /**
288  * amdgpu_irq_init - initialize interrupt handling
289  *
290  * @adev: amdgpu device pointer
291  *
292  * Sets up work functions for hotplug and reset interrupts, enables MSI
293  * functionality, initializes vblank, hotplug and reset interrupt handling.
294  *
295  * Returns:
296  * 0 on success or error code on failure
297  */
298 int amdgpu_irq_init(struct amdgpu_device *adev)
299 {
300 	int r = 0;
301 	unsigned int irq;
302 
303 	mtx_init(&adev->irq.lock, IPL_TTY);
304 
305 #ifdef notyet
306 	/* Enable MSI if not disabled by module parameter */
307 	adev->irq.msi_enabled = false;
308 
309 	if (amdgpu_msi_ok(adev)) {
310 		int nvec = pci_msix_vec_count(adev->pdev);
311 		unsigned int flags;
312 
313 		if (nvec <= 0) {
314 			flags = PCI_IRQ_MSI;
315 		} else {
316 			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
317 		}
318 		/* we only need one vector */
319 		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
320 		if (nvec > 0) {
321 			adev->irq.msi_enabled = true;
322 			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
323 		}
324 	}
325 #endif
326 
327 	if (!amdgpu_device_has_dc_support(adev)) {
328 		if (!adev->enable_virtual_display)
329 			/* Disable vblank IRQs aggressively for power-saving */
330 			/* XXX: can this be enabled for DC? */
331 			adev_to_drm(adev)->vblank_disable_immediate = true;
332 
333 		r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
334 		if (r)
335 			return r;
336 
337 		/* Pre-DCE11 */
338 		INIT_WORK(&adev->hotplug_work,
339 				amdgpu_hotplug_work_func);
340 	}
341 
342 	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
343 	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
344 	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
345 
346 	/* Use vector 0 for MSI-X. */
347 	r = pci_irq_vector(adev->pdev, 0);
348 	if (r < 0)
349 		return r;
350 	irq = r;
351 
352 	/* PCI devices require shared interrupts. */
353 	r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
354 			adev_to_drm(adev));
355 	if (r) {
356 		if (!amdgpu_device_has_dc_support(adev))
357 			flush_work(&adev->hotplug_work);
358 		return r;
359 	}
360 	adev->irq.installed = true;
361 	adev->irq.irq = irq;
362 	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
363 
364 	DRM_DEBUG("amdgpu: irq initialized.\n");
365 	return 0;
366 }
367 
368 
369 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
370 {
371 	if (adev->irq.installed) {
372 		free_irq(adev->irq.irq, adev_to_drm(adev));
373 		adev->irq.installed = false;
374 		if (adev->irq.msi_enabled)
375 			pci_free_irq_vectors(adev->pdev);
376 
377 		if (!amdgpu_device_has_dc_support(adev))
378 			flush_work(&adev->hotplug_work);
379 	}
380 
381 	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
382 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
383 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
384 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
385 }
386 
387 /**
388  * amdgpu_irq_fini_sw - shut down interrupt handling
389  *
390  * @adev: amdgpu device pointer
391  *
392  * Tears down work functions for hotplug and reset interrupts, disables MSI
393  * functionality, shuts down vblank, hotplug and reset interrupt handling,
394  * turns off interrupts from all sources (all ASICs).
395  */
396 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
397 {
398 	unsigned i, j;
399 
400 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
401 		if (!adev->irq.client[i].sources)
402 			continue;
403 
404 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
405 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
406 
407 			if (!src)
408 				continue;
409 
410 			kfree(src->enabled_types);
411 			src->enabled_types = NULL;
412 		}
413 		kfree(adev->irq.client[i].sources);
414 		adev->irq.client[i].sources = NULL;
415 	}
416 }
417 
418 /**
419  * amdgpu_irq_add_id - register IRQ source
420  *
421  * @adev: amdgpu device pointer
422  * @client_id: client id
423  * @src_id: source id
424  * @source: IRQ source pointer
425  *
426  * Registers IRQ source on a client.
427  *
428  * Returns:
429  * 0 on success or error code otherwise
430  */
431 int amdgpu_irq_add_id(struct amdgpu_device *adev,
432 		      unsigned client_id, unsigned src_id,
433 		      struct amdgpu_irq_src *source)
434 {
435 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
436 		return -EINVAL;
437 
438 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
439 		return -EINVAL;
440 
441 	if (!source->funcs)
442 		return -EINVAL;
443 
444 	if (!adev->irq.client[client_id].sources) {
445 		adev->irq.client[client_id].sources =
446 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
447 				sizeof(struct amdgpu_irq_src *),
448 				GFP_KERNEL);
449 		if (!adev->irq.client[client_id].sources)
450 			return -ENOMEM;
451 	}
452 
453 	if (adev->irq.client[client_id].sources[src_id] != NULL)
454 		return -EINVAL;
455 
456 	if (source->num_types && !source->enabled_types) {
457 		atomic_t *types;
458 
459 		types = kcalloc(source->num_types, sizeof(atomic_t),
460 				GFP_KERNEL);
461 		if (!types)
462 			return -ENOMEM;
463 
464 		source->enabled_types = types;
465 	}
466 
467 	adev->irq.client[client_id].sources[src_id] = source;
468 	return 0;
469 }
470 
471 /**
472  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
473  *
474  * @adev: amdgpu device pointer
475  * @ih: interrupt ring instance
476  *
477  * Dispatches IRQ to IP blocks.
478  */
479 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
480 			 struct amdgpu_ih_ring *ih)
481 {
482 	u32 ring_index = ih->rptr >> 2;
483 	struct amdgpu_iv_entry entry;
484 	unsigned client_id, src_id;
485 	struct amdgpu_irq_src *src;
486 	bool handled = false;
487 	int r;
488 
489 	entry.ih = ih;
490 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
491 	amdgpu_ih_decode_iv(adev, &entry);
492 
493 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
494 
495 	client_id = entry.client_id;
496 	src_id = entry.src_id;
497 
498 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
499 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
500 
501 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
502 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
503 
504 	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
505 		   adev->irq.virq[src_id]) {
506 		STUB();
507 #ifdef notyet
508 		generic_handle_domain_irq(adev->irq.domain, src_id);
509 #endif
510 
511 	} else if (!adev->irq.client[client_id].sources) {
512 		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
513 			  client_id, src_id);
514 
515 	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
516 		r = src->funcs->process(adev, src, &entry);
517 		if (r < 0)
518 			DRM_ERROR("error processing interrupt (%d)\n", r);
519 		else if (r)
520 			handled = true;
521 
522 	} else {
523 		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
524 	}
525 
526 	/* Send it to amdkfd as well if it isn't already handled */
527 	if (!handled)
528 		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
529 
530 	if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
531 		ih->processed_timestamp = entry.timestamp;
532 }
533 
534 /**
535  * amdgpu_irq_delegate - delegate IV to soft IH ring
536  *
537  * @adev: amdgpu device pointer
538  * @entry: IV entry
539  * @num_dw: size of IV
540  *
541  * Delegate the IV to the soft IH ring and schedule processing of it. Used
542  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
543  */
544 void amdgpu_irq_delegate(struct amdgpu_device *adev,
545 			 struct amdgpu_iv_entry *entry,
546 			 unsigned int num_dw)
547 {
548 	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
549 	schedule_work(&adev->irq.ih_soft_work);
550 }
551 
552 /**
553  * amdgpu_irq_update - update hardware interrupt state
554  *
555  * @adev: amdgpu device pointer
556  * @src: interrupt source pointer
557  * @type: type of interrupt
558  *
559  * Updates interrupt state for the specific source (all ASICs).
560  */
561 int amdgpu_irq_update(struct amdgpu_device *adev,
562 			     struct amdgpu_irq_src *src, unsigned type)
563 {
564 	unsigned long irqflags;
565 	enum amdgpu_interrupt_state state;
566 	int r;
567 
568 	spin_lock_irqsave(&adev->irq.lock, irqflags);
569 
570 	/* We need to determine after taking the lock, otherwise
571 	   we might disable just enabled interrupts again */
572 	if (amdgpu_irq_enabled(adev, src, type))
573 		state = AMDGPU_IRQ_STATE_ENABLE;
574 	else
575 		state = AMDGPU_IRQ_STATE_DISABLE;
576 
577 	r = src->funcs->set(adev, src, type, state);
578 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
579 	return r;
580 }
581 
582 /**
583  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
584  *
585  * @adev: amdgpu device pointer
586  *
587  * Updates state of all types of interrupts on all sources on resume after
588  * reset.
589  */
590 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
591 {
592 	int i, j, k;
593 
594 	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
595 		amdgpu_restore_msix(adev);
596 
597 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
598 		if (!adev->irq.client[i].sources)
599 			continue;
600 
601 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
602 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
603 
604 			if (!src || !src->funcs || !src->funcs->set)
605 				continue;
606 			for (k = 0; k < src->num_types; k++)
607 				amdgpu_irq_update(adev, src, k);
608 		}
609 	}
610 }
611 
612 /**
613  * amdgpu_irq_get - enable interrupt
614  *
615  * @adev: amdgpu device pointer
616  * @src: interrupt source pointer
617  * @type: type of interrupt
618  *
619  * Enables specified type of interrupt on the specified source (all ASICs).
620  *
621  * Returns:
622  * 0 on success or error code otherwise
623  */
624 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
625 		   unsigned type)
626 {
627 	if (!adev->irq.installed)
628 		return -ENOENT;
629 
630 	if (type >= src->num_types)
631 		return -EINVAL;
632 
633 	if (!src->enabled_types || !src->funcs->set)
634 		return -EINVAL;
635 
636 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
637 		return amdgpu_irq_update(adev, src, type);
638 
639 	return 0;
640 }
641 
642 /**
643  * amdgpu_irq_put - disable interrupt
644  *
645  * @adev: amdgpu device pointer
646  * @src: interrupt source pointer
647  * @type: type of interrupt
648  *
649  * Enables specified type of interrupt on the specified source (all ASICs).
650  *
651  * Returns:
652  * 0 on success or error code otherwise
653  */
654 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
655 		   unsigned type)
656 {
657 	if (!adev->irq.installed)
658 		return -ENOENT;
659 
660 	if (type >= src->num_types)
661 		return -EINVAL;
662 
663 	if (!src->enabled_types || !src->funcs->set)
664 		return -EINVAL;
665 
666 	if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
667 		return -EINVAL;
668 
669 	if (atomic_dec_and_test(&src->enabled_types[type]))
670 		return amdgpu_irq_update(adev, src, type);
671 
672 	return 0;
673 }
674 
675 /**
676  * amdgpu_irq_enabled - check whether interrupt is enabled or not
677  *
678  * @adev: amdgpu device pointer
679  * @src: interrupt source pointer
680  * @type: type of interrupt
681  *
682  * Checks whether the given type of interrupt is enabled on the given source.
683  *
684  * Returns:
685  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
686  * invalid parameters
687  */
688 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
689 			unsigned type)
690 {
691 	if (!adev->irq.installed)
692 		return false;
693 
694 	if (type >= src->num_types)
695 		return false;
696 
697 	if (!src->enabled_types || !src->funcs->set)
698 		return false;
699 
700 	return !!atomic_read(&src->enabled_types[type]);
701 }
702 
703 #ifdef __linux__
704 /* XXX: Generic IRQ handling */
705 static void amdgpu_irq_mask(struct irq_data *irqd)
706 {
707 	/* XXX */
708 }
709 
710 static void amdgpu_irq_unmask(struct irq_data *irqd)
711 {
712 	/* XXX */
713 }
714 
715 /* amdgpu hardware interrupt chip descriptor */
716 static struct irq_chip amdgpu_irq_chip = {
717 	.name = "amdgpu-ih",
718 	.irq_mask = amdgpu_irq_mask,
719 	.irq_unmask = amdgpu_irq_unmask,
720 };
721 #endif
722 
723 #ifdef __linux__
724 /**
725  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
726  *
727  * @d: amdgpu IRQ domain pointer (unused)
728  * @irq: virtual IRQ number
729  * @hwirq: hardware irq number
730  *
731  * Current implementation assigns simple interrupt handler to the given virtual
732  * IRQ.
733  *
734  * Returns:
735  * 0 on success or error code otherwise
736  */
737 static int amdgpu_irqdomain_map(struct irq_domain *d,
738 				unsigned int irq, irq_hw_number_t hwirq)
739 {
740 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
741 		return -EPERM;
742 
743 	irq_set_chip_and_handler(irq,
744 				 &amdgpu_irq_chip, handle_simple_irq);
745 	return 0;
746 }
747 
748 /* Implementation of methods for amdgpu IRQ domain */
749 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
750 	.map = amdgpu_irqdomain_map,
751 };
752 #endif
753 
754 /**
755  * amdgpu_irq_add_domain - create a linear IRQ domain
756  *
757  * @adev: amdgpu device pointer
758  *
759  * Creates an IRQ domain for GPU interrupt sources
760  * that may be driven by another driver (e.g., ACP).
761  *
762  * Returns:
763  * 0 on success or error code otherwise
764  */
765 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
766 {
767 #ifdef __linux__
768 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
769 						 &amdgpu_hw_irqdomain_ops, adev);
770 	if (!adev->irq.domain) {
771 		DRM_ERROR("GPU irq add domain failed\n");
772 		return -ENODEV;
773 	}
774 #endif
775 
776 	return 0;
777 }
778 
779 /**
780  * amdgpu_irq_remove_domain - remove the IRQ domain
781  *
782  * @adev: amdgpu device pointer
783  *
784  * Removes the IRQ domain for GPU interrupt sources
785  * that may be driven by another driver (e.g., ACP).
786  */
787 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
788 {
789 	STUB();
790 #if 0
791 	if (adev->irq.domain) {
792 		irq_domain_remove(adev->irq.domain);
793 		adev->irq.domain = NULL;
794 	}
795 #endif
796 }
797 
798 /**
799  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
800  *
801  * @adev: amdgpu device pointer
802  * @src_id: IH source id
803  *
804  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
805  * Use this for components that generate a GPU interrupt, but are driven
806  * by a different driver (e.g., ACP).
807  *
808  * Returns:
809  * Linux IRQ
810  */
811 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
812 {
813 	STUB();
814 	return 0;
815 #if 0
816 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
817 
818 	return adev->irq.virq[src_id];
819 #endif
820 }
821