xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_irq.c (revision 5c389b79544373bccfce668b646e62e7ba9802a3)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 /**
30  * DOC: Interrupt Handling
31  *
32  * Interrupts generated within GPU hardware raise interrupt requests that are
33  * passed to amdgpu IRQ handler which is responsible for detecting source and
34  * type of the interrupt and dispatching matching handlers. If handling an
35  * interrupt requires calling kernel functions that may sleep processing is
36  * dispatched to work handlers.
37  *
38  * If MSI functionality is not disabled by module parameter then MSI
39  * support will be enabled.
40  *
41  * For GPU interrupt sources that may be driven by another driver, IRQ domain
42  * support is used (with mapping between virtual and hardware IRQs).
43  */
44 
45 #include <linux/irq.h>
46 #include <linux/pci.h>
47 
48 #include <drm/drm_crtc_helper.h>
49 #include <drm/drm_vblank.h>
50 #include <drm/amdgpu_drm.h>
51 #include <drm/drm_drv.h>
52 #include "amdgpu.h"
53 #include "amdgpu_ih.h"
54 #include "atom.h"
55 #include "amdgpu_connectors.h"
56 #include "amdgpu_trace.h"
57 #include "amdgpu_amdkfd.h"
58 #include "amdgpu_ras.h"
59 
60 #include <linux/pm_runtime.h>
61 
62 #ifdef CONFIG_DRM_AMD_DC
63 #include "amdgpu_dm_irq.h"
64 #endif
65 
66 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
67 
68 const char *soc15_ih_clientid_name[] = {
69 	"IH",
70 	"SDMA2 or ACP",
71 	"ATHUB",
72 	"BIF",
73 	"SDMA3 or DCE",
74 	"SDMA4 or ISP",
75 	"VMC1 or PCIE0",
76 	"RLC",
77 	"SDMA0",
78 	"SDMA1",
79 	"SE0SH",
80 	"SE1SH",
81 	"SE2SH",
82 	"SE3SH",
83 	"VCN1 or UVD1",
84 	"THM",
85 	"VCN or UVD",
86 	"SDMA5 or VCE0",
87 	"VMC",
88 	"SDMA6 or XDMA",
89 	"GRBM_CP",
90 	"ATS",
91 	"ROM_SMUIO",
92 	"DF",
93 	"SDMA7 or VCE1",
94 	"PWR",
95 	"reserved",
96 	"UTCL2",
97 	"EA",
98 	"UTCL2LOG",
99 	"MP0",
100 	"MP1"
101 };
102 
103 /**
104  * amdgpu_hotplug_work_func - work handler for display hotplug event
105  *
106  * @work: work struct pointer
107  *
108  * This is the hotplug event work handler (all ASICs).
109  * The work gets scheduled from the IRQ handler if there
110  * was a hotplug interrupt.  It walks through the connector table
111  * and calls hotplug handler for each connector. After this, it sends
112  * a DRM hotplug event to alert userspace.
113  *
114  * This design approach is required in order to defer hotplug event handling
115  * from the IRQ handler to a work handler because hotplug handler has to use
116  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
117  * sleep).
118  */
119 static void amdgpu_hotplug_work_func(struct work_struct *work)
120 {
121 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
122 						  hotplug_work);
123 	struct drm_device *dev = adev_to_drm(adev);
124 	struct drm_mode_config *mode_config = &dev->mode_config;
125 	struct drm_connector *connector;
126 	struct drm_connector_list_iter iter;
127 
128 	mutex_lock(&mode_config->mutex);
129 	drm_connector_list_iter_begin(dev, &iter);
130 	drm_for_each_connector_iter(connector, &iter)
131 		amdgpu_connector_hotplug(connector);
132 	drm_connector_list_iter_end(&iter);
133 	mutex_unlock(&mode_config->mutex);
134 	/* Just fire off a uevent and let userspace tell us what to do */
135 	drm_helper_hpd_irq_event(dev);
136 }
137 
138 /**
139  * amdgpu_irq_disable_all - disable *all* interrupts
140  *
141  * @adev: amdgpu device pointer
142  *
143  * Disable all types of interrupts from all sources.
144  */
145 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
146 {
147 	unsigned long irqflags;
148 	unsigned i, j, k;
149 	int r;
150 
151 	spin_lock_irqsave(&adev->irq.lock, irqflags);
152 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
153 		if (!adev->irq.client[i].sources)
154 			continue;
155 
156 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
157 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
158 
159 			if (!src || !src->funcs->set || !src->num_types)
160 				continue;
161 
162 			for (k = 0; k < src->num_types; ++k) {
163 				atomic_set(&src->enabled_types[k], 0);
164 				r = src->funcs->set(adev, src, k,
165 						    AMDGPU_IRQ_STATE_DISABLE);
166 				if (r)
167 					DRM_ERROR("error disabling interrupt (%d)\n",
168 						  r);
169 			}
170 		}
171 	}
172 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
173 }
174 
175 /**
176  * amdgpu_irq_handler - IRQ handler
177  *
178  * @irq: IRQ number (unused)
179  * @arg: pointer to DRM device
180  *
181  * IRQ handler for amdgpu driver (all ASICs).
182  *
183  * Returns:
184  * result of handling the IRQ, as defined by &irqreturn_t
185  */
186 irqreturn_t amdgpu_irq_handler(void *arg)
187 {
188 	struct drm_device *dev = (struct drm_device *) arg;
189 	struct amdgpu_device *adev = drm_to_adev(dev);
190 	irqreturn_t ret;
191 
192 	if (!adev->irq.installed)
193 		return 0;
194 
195 	ret = amdgpu_ih_process(adev, &adev->irq.ih);
196 	if (ret == IRQ_HANDLED)
197 		pm_runtime_mark_last_busy(dev->dev);
198 
199 	amdgpu_ras_interrupt_fatal_error_handler(adev);
200 
201 	return ret;
202 }
203 
204 /**
205  * amdgpu_irq_handle_ih1 - kick of processing for IH1
206  *
207  * @work: work structure in struct amdgpu_irq
208  *
209  * Kick of processing IH ring 1.
210  */
211 static void amdgpu_irq_handle_ih1(struct work_struct *work)
212 {
213 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
214 						  irq.ih1_work);
215 
216 	amdgpu_ih_process(adev, &adev->irq.ih1);
217 }
218 
219 /**
220  * amdgpu_irq_handle_ih2 - kick of processing for IH2
221  *
222  * @work: work structure in struct amdgpu_irq
223  *
224  * Kick of processing IH ring 2.
225  */
226 static void amdgpu_irq_handle_ih2(struct work_struct *work)
227 {
228 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
229 						  irq.ih2_work);
230 
231 	amdgpu_ih_process(adev, &adev->irq.ih2);
232 }
233 
234 /**
235  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
236  *
237  * @work: work structure in struct amdgpu_irq
238  *
239  * Kick of processing IH soft ring.
240  */
241 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
242 {
243 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
244 						  irq.ih_soft_work);
245 
246 	amdgpu_ih_process(adev, &adev->irq.ih_soft);
247 }
248 
249 /**
250  * amdgpu_msi_ok - check whether MSI functionality is enabled
251  *
252  * @adev: amdgpu device pointer (unused)
253  *
254  * Checks whether MSI functionality has been disabled via module parameter
255  * (all ASICs).
256  *
257  * Returns:
258  * *true* if MSIs are allowed to be enabled or *false* otherwise
259  */
260 bool amdgpu_msi_ok(struct amdgpu_device *adev)
261 {
262 	if (amdgpu_msi == 1)
263 		return true;
264 	else if (amdgpu_msi == 0)
265 		return false;
266 
267 	return true;
268 }
269 
270 static void amdgpu_restore_msix(struct amdgpu_device *adev)
271 {
272 	STUB();
273 #ifdef notyet
274 	u16 ctrl;
275 
276 	pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
277 	if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
278 		return;
279 
280 	/* VF FLR */
281 	ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
282 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
283 	ctrl |= PCI_MSIX_FLAGS_ENABLE;
284 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
285 #endif
286 }
287 
288 /**
289  * amdgpu_irq_init - initialize interrupt handling
290  *
291  * @adev: amdgpu device pointer
292  *
293  * Sets up work functions for hotplug and reset interrupts, enables MSI
294  * functionality, initializes vblank, hotplug and reset interrupt handling.
295  *
296  * Returns:
297  * 0 on success or error code on failure
298  */
299 int amdgpu_irq_init(struct amdgpu_device *adev)
300 {
301 	int r = 0;
302 	unsigned int irq;
303 
304 	mtx_init(&adev->irq.lock, IPL_TTY);
305 
306 #ifdef notyet
307 	/* Enable MSI if not disabled by module parameter */
308 	adev->irq.msi_enabled = false;
309 
310 	if (amdgpu_msi_ok(adev)) {
311 		int nvec = pci_msix_vec_count(adev->pdev);
312 		unsigned int flags;
313 
314 		if (nvec <= 0) {
315 			flags = PCI_IRQ_MSI;
316 		} else {
317 			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
318 		}
319 		/* we only need one vector */
320 		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
321 		if (nvec > 0) {
322 			adev->irq.msi_enabled = true;
323 			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
324 		}
325 	}
326 #endif
327 
328 	if (!amdgpu_device_has_dc_support(adev)) {
329 		if (!adev->enable_virtual_display)
330 			/* Disable vblank IRQs aggressively for power-saving */
331 			/* XXX: can this be enabled for DC? */
332 			adev_to_drm(adev)->vblank_disable_immediate = true;
333 
334 		r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
335 		if (r)
336 			return r;
337 
338 		/* Pre-DCE11 */
339 		INIT_WORK(&adev->hotplug_work,
340 				amdgpu_hotplug_work_func);
341 	}
342 
343 	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
344 	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
345 	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
346 
347 	/* Use vector 0 for MSI-X. */
348 	r = pci_irq_vector(adev->pdev, 0);
349 	if (r < 0)
350 		return r;
351 	irq = r;
352 
353 	/* PCI devices require shared interrupts. */
354 	r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
355 			adev_to_drm(adev));
356 	if (r) {
357 		if (!amdgpu_device_has_dc_support(adev))
358 			flush_work(&adev->hotplug_work);
359 		return r;
360 	}
361 	adev->irq.installed = true;
362 	adev->irq.irq = irq;
363 	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
364 
365 	DRM_DEBUG("amdgpu: irq initialized.\n");
366 	return 0;
367 }
368 
369 
370 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
371 {
372 	if (adev->irq.installed) {
373 		free_irq(adev->irq.irq, adev_to_drm(adev));
374 		adev->irq.installed = false;
375 		if (adev->irq.msi_enabled)
376 			pci_free_irq_vectors(adev->pdev);
377 
378 		if (!amdgpu_device_has_dc_support(adev))
379 			flush_work(&adev->hotplug_work);
380 	}
381 
382 	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
383 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
384 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
385 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
386 }
387 
388 /**
389  * amdgpu_irq_fini_sw - shut down interrupt handling
390  *
391  * @adev: amdgpu device pointer
392  *
393  * Tears down work functions for hotplug and reset interrupts, disables MSI
394  * functionality, shuts down vblank, hotplug and reset interrupt handling,
395  * turns off interrupts from all sources (all ASICs).
396  */
397 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
398 {
399 	unsigned i, j;
400 
401 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
402 		if (!adev->irq.client[i].sources)
403 			continue;
404 
405 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
406 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
407 
408 			if (!src)
409 				continue;
410 
411 			kfree(src->enabled_types);
412 			src->enabled_types = NULL;
413 		}
414 		kfree(adev->irq.client[i].sources);
415 		adev->irq.client[i].sources = NULL;
416 	}
417 }
418 
419 /**
420  * amdgpu_irq_add_id - register IRQ source
421  *
422  * @adev: amdgpu device pointer
423  * @client_id: client id
424  * @src_id: source id
425  * @source: IRQ source pointer
426  *
427  * Registers IRQ source on a client.
428  *
429  * Returns:
430  * 0 on success or error code otherwise
431  */
432 int amdgpu_irq_add_id(struct amdgpu_device *adev,
433 		      unsigned client_id, unsigned src_id,
434 		      struct amdgpu_irq_src *source)
435 {
436 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
437 		return -EINVAL;
438 
439 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
440 		return -EINVAL;
441 
442 	if (!source->funcs)
443 		return -EINVAL;
444 
445 	if (!adev->irq.client[client_id].sources) {
446 		adev->irq.client[client_id].sources =
447 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
448 				sizeof(struct amdgpu_irq_src *),
449 				GFP_KERNEL);
450 		if (!adev->irq.client[client_id].sources)
451 			return -ENOMEM;
452 	}
453 
454 	if (adev->irq.client[client_id].sources[src_id] != NULL)
455 		return -EINVAL;
456 
457 	if (source->num_types && !source->enabled_types) {
458 		atomic_t *types;
459 
460 		types = kcalloc(source->num_types, sizeof(atomic_t),
461 				GFP_KERNEL);
462 		if (!types)
463 			return -ENOMEM;
464 
465 		source->enabled_types = types;
466 	}
467 
468 	adev->irq.client[client_id].sources[src_id] = source;
469 	return 0;
470 }
471 
472 /**
473  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
474  *
475  * @adev: amdgpu device pointer
476  * @ih: interrupt ring instance
477  *
478  * Dispatches IRQ to IP blocks.
479  */
480 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
481 			 struct amdgpu_ih_ring *ih)
482 {
483 	u32 ring_index = ih->rptr >> 2;
484 	struct amdgpu_iv_entry entry;
485 	unsigned client_id, src_id;
486 	struct amdgpu_irq_src *src;
487 	bool handled = false;
488 	int r;
489 
490 	entry.ih = ih;
491 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
492 	amdgpu_ih_decode_iv(adev, &entry);
493 
494 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
495 
496 	client_id = entry.client_id;
497 	src_id = entry.src_id;
498 
499 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
500 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
501 
502 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
503 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
504 
505 	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
506 		   adev->irq.virq[src_id]) {
507 		STUB();
508 #ifdef notyet
509 		generic_handle_domain_irq(adev->irq.domain, src_id);
510 #endif
511 
512 	} else if (!adev->irq.client[client_id].sources) {
513 		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
514 			  client_id, src_id);
515 
516 	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
517 		r = src->funcs->process(adev, src, &entry);
518 		if (r < 0)
519 			DRM_ERROR("error processing interrupt (%d)\n", r);
520 		else if (r)
521 			handled = true;
522 
523 	} else {
524 		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
525 	}
526 
527 	/* Send it to amdkfd as well if it isn't already handled */
528 	if (!handled)
529 		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
530 
531 	if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
532 		ih->processed_timestamp = entry.timestamp;
533 }
534 
535 /**
536  * amdgpu_irq_delegate - delegate IV to soft IH ring
537  *
538  * @adev: amdgpu device pointer
539  * @entry: IV entry
540  * @num_dw: size of IV
541  *
542  * Delegate the IV to the soft IH ring and schedule processing of it. Used
543  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
544  */
545 void amdgpu_irq_delegate(struct amdgpu_device *adev,
546 			 struct amdgpu_iv_entry *entry,
547 			 unsigned int num_dw)
548 {
549 	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
550 	schedule_work(&adev->irq.ih_soft_work);
551 }
552 
553 /**
554  * amdgpu_irq_update - update hardware interrupt state
555  *
556  * @adev: amdgpu device pointer
557  * @src: interrupt source pointer
558  * @type: type of interrupt
559  *
560  * Updates interrupt state for the specific source (all ASICs).
561  */
562 int amdgpu_irq_update(struct amdgpu_device *adev,
563 			     struct amdgpu_irq_src *src, unsigned type)
564 {
565 	unsigned long irqflags;
566 	enum amdgpu_interrupt_state state;
567 	int r;
568 
569 	spin_lock_irqsave(&adev->irq.lock, irqflags);
570 
571 	/* We need to determine after taking the lock, otherwise
572 	   we might disable just enabled interrupts again */
573 	if (amdgpu_irq_enabled(adev, src, type))
574 		state = AMDGPU_IRQ_STATE_ENABLE;
575 	else
576 		state = AMDGPU_IRQ_STATE_DISABLE;
577 
578 	r = src->funcs->set(adev, src, type, state);
579 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
580 	return r;
581 }
582 
583 /**
584  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
585  *
586  * @adev: amdgpu device pointer
587  *
588  * Updates state of all types of interrupts on all sources on resume after
589  * reset.
590  */
591 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
592 {
593 	int i, j, k;
594 
595 	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
596 		amdgpu_restore_msix(adev);
597 
598 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
599 		if (!adev->irq.client[i].sources)
600 			continue;
601 
602 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
603 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
604 
605 			if (!src || !src->funcs || !src->funcs->set)
606 				continue;
607 			for (k = 0; k < src->num_types; k++)
608 				amdgpu_irq_update(adev, src, k);
609 		}
610 	}
611 }
612 
613 /**
614  * amdgpu_irq_get - enable interrupt
615  *
616  * @adev: amdgpu device pointer
617  * @src: interrupt source pointer
618  * @type: type of interrupt
619  *
620  * Enables specified type of interrupt on the specified source (all ASICs).
621  *
622  * Returns:
623  * 0 on success or error code otherwise
624  */
625 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
626 		   unsigned type)
627 {
628 	if (!adev->irq.installed)
629 		return -ENOENT;
630 
631 	if (type >= src->num_types)
632 		return -EINVAL;
633 
634 	if (!src->enabled_types || !src->funcs->set)
635 		return -EINVAL;
636 
637 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
638 		return amdgpu_irq_update(adev, src, type);
639 
640 	return 0;
641 }
642 
643 /**
644  * amdgpu_irq_put - disable interrupt
645  *
646  * @adev: amdgpu device pointer
647  * @src: interrupt source pointer
648  * @type: type of interrupt
649  *
650  * Enables specified type of interrupt on the specified source (all ASICs).
651  *
652  * Returns:
653  * 0 on success or error code otherwise
654  */
655 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
656 		   unsigned type)
657 {
658 	if (!adev->irq.installed)
659 		return -ENOENT;
660 
661 	if (type >= src->num_types)
662 		return -EINVAL;
663 
664 	if (!src->enabled_types || !src->funcs->set)
665 		return -EINVAL;
666 
667 	if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
668 		return -EINVAL;
669 
670 	if (atomic_dec_and_test(&src->enabled_types[type]))
671 		return amdgpu_irq_update(adev, src, type);
672 
673 	return 0;
674 }
675 
676 /**
677  * amdgpu_irq_enabled - check whether interrupt is enabled or not
678  *
679  * @adev: amdgpu device pointer
680  * @src: interrupt source pointer
681  * @type: type of interrupt
682  *
683  * Checks whether the given type of interrupt is enabled on the given source.
684  *
685  * Returns:
686  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
687  * invalid parameters
688  */
689 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
690 			unsigned type)
691 {
692 	if (!adev->irq.installed)
693 		return false;
694 
695 	if (type >= src->num_types)
696 		return false;
697 
698 	if (!src->enabled_types || !src->funcs->set)
699 		return false;
700 
701 	return !!atomic_read(&src->enabled_types[type]);
702 }
703 
704 #ifdef __linux__
705 /* XXX: Generic IRQ handling */
706 static void amdgpu_irq_mask(struct irq_data *irqd)
707 {
708 	/* XXX */
709 }
710 
711 static void amdgpu_irq_unmask(struct irq_data *irqd)
712 {
713 	/* XXX */
714 }
715 
716 /* amdgpu hardware interrupt chip descriptor */
717 static struct irq_chip amdgpu_irq_chip = {
718 	.name = "amdgpu-ih",
719 	.irq_mask = amdgpu_irq_mask,
720 	.irq_unmask = amdgpu_irq_unmask,
721 };
722 #endif
723 
724 #ifdef __linux__
725 /**
726  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
727  *
728  * @d: amdgpu IRQ domain pointer (unused)
729  * @irq: virtual IRQ number
730  * @hwirq: hardware irq number
731  *
732  * Current implementation assigns simple interrupt handler to the given virtual
733  * IRQ.
734  *
735  * Returns:
736  * 0 on success or error code otherwise
737  */
738 static int amdgpu_irqdomain_map(struct irq_domain *d,
739 				unsigned int irq, irq_hw_number_t hwirq)
740 {
741 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
742 		return -EPERM;
743 
744 	irq_set_chip_and_handler(irq,
745 				 &amdgpu_irq_chip, handle_simple_irq);
746 	return 0;
747 }
748 
749 /* Implementation of methods for amdgpu IRQ domain */
750 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
751 	.map = amdgpu_irqdomain_map,
752 };
753 #endif
754 
755 /**
756  * amdgpu_irq_add_domain - create a linear IRQ domain
757  *
758  * @adev: amdgpu device pointer
759  *
760  * Creates an IRQ domain for GPU interrupt sources
761  * that may be driven by another driver (e.g., ACP).
762  *
763  * Returns:
764  * 0 on success or error code otherwise
765  */
766 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
767 {
768 #ifdef __linux__
769 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
770 						 &amdgpu_hw_irqdomain_ops, adev);
771 	if (!adev->irq.domain) {
772 		DRM_ERROR("GPU irq add domain failed\n");
773 		return -ENODEV;
774 	}
775 #endif
776 
777 	return 0;
778 }
779 
780 /**
781  * amdgpu_irq_remove_domain - remove the IRQ domain
782  *
783  * @adev: amdgpu device pointer
784  *
785  * Removes the IRQ domain for GPU interrupt sources
786  * that may be driven by another driver (e.g., ACP).
787  */
788 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
789 {
790 	STUB();
791 #if 0
792 	if (adev->irq.domain) {
793 		irq_domain_remove(adev->irq.domain);
794 		adev->irq.domain = NULL;
795 	}
796 #endif
797 }
798 
799 /**
800  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
801  *
802  * @adev: amdgpu device pointer
803  * @src_id: IH source id
804  *
805  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
806  * Use this for components that generate a GPU interrupt, but are driven
807  * by a different driver (e.g., ACP).
808  *
809  * Returns:
810  * Linux IRQ
811  */
812 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
813 {
814 	STUB();
815 	return 0;
816 #if 0
817 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
818 
819 	return adev->irq.virq[src_id];
820 #endif
821 }
822