xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_irq.c (revision f84b1df5a16cdd762c93854218de246e79975d3b)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 /**
30  * DOC: Interrupt Handling
31  *
32  * Interrupts generated within GPU hardware raise interrupt requests that are
33  * passed to amdgpu IRQ handler which is responsible for detecting source and
34  * type of the interrupt and dispatching matching handlers. If handling an
35  * interrupt requires calling kernel functions that may sleep processing is
36  * dispatched to work handlers.
37  *
38  * If MSI functionality is not disabled by module parameter then MSI
39  * support will be enabled.
40  *
41  * For GPU interrupt sources that may be driven by another driver, IRQ domain
42  * support is used (with mapping between virtual and hardware IRQs).
43  */
44 
45 #include <linux/irq.h>
46 #include <linux/pci.h>
47 
48 #include <drm/drm_crtc_helper.h>
49 #include <drm/drm_vblank.h>
50 #include <drm/amdgpu_drm.h>
51 #include <drm/drm_drv.h>
52 #include "amdgpu.h"
53 #include "amdgpu_ih.h"
54 #include "atom.h"
55 #include "amdgpu_connectors.h"
56 #include "amdgpu_trace.h"
57 #include "amdgpu_amdkfd.h"
58 #include "amdgpu_ras.h"
59 
60 #include <linux/pm_runtime.h>
61 
62 #ifdef CONFIG_DRM_AMD_DC
63 #include "amdgpu_dm_irq.h"
64 #endif
65 
66 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
67 
68 const char *soc15_ih_clientid_name[] = {
69 	"IH",
70 	"SDMA2 or ACP",
71 	"ATHUB",
72 	"BIF",
73 	"SDMA3 or DCE",
74 	"SDMA4 or ISP",
75 	"VMC1 or PCIE0",
76 	"RLC",
77 	"SDMA0",
78 	"SDMA1",
79 	"SE0SH",
80 	"SE1SH",
81 	"SE2SH",
82 	"SE3SH",
83 	"VCN1 or UVD1",
84 	"THM",
85 	"VCN or UVD",
86 	"SDMA5 or VCE0",
87 	"VMC",
88 	"SDMA6 or XDMA",
89 	"GRBM_CP",
90 	"ATS",
91 	"ROM_SMUIO",
92 	"DF",
93 	"SDMA7 or VCE1",
94 	"PWR",
95 	"reserved",
96 	"UTCL2",
97 	"EA",
98 	"UTCL2LOG",
99 	"MP0",
100 	"MP1"
101 };
102 
103 /**
104  * amdgpu_hotplug_work_func - work handler for display hotplug event
105  *
106  * @work: work struct pointer
107  *
108  * This is the hotplug event work handler (all ASICs).
109  * The work gets scheduled from the IRQ handler if there
110  * was a hotplug interrupt.  It walks through the connector table
111  * and calls hotplug handler for each connector. After this, it sends
112  * a DRM hotplug event to alert userspace.
113  *
114  * This design approach is required in order to defer hotplug event handling
115  * from the IRQ handler to a work handler because hotplug handler has to use
116  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
117  * sleep).
118  */
119 static void amdgpu_hotplug_work_func(struct work_struct *work)
120 {
121 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
122 						  hotplug_work);
123 	struct drm_device *dev = adev_to_drm(adev);
124 	struct drm_mode_config *mode_config = &dev->mode_config;
125 	struct drm_connector *connector;
126 	struct drm_connector_list_iter iter;
127 
128 	mutex_lock(&mode_config->mutex);
129 	drm_connector_list_iter_begin(dev, &iter);
130 	drm_for_each_connector_iter(connector, &iter)
131 		amdgpu_connector_hotplug(connector);
132 	drm_connector_list_iter_end(&iter);
133 	mutex_unlock(&mode_config->mutex);
134 	/* Just fire off a uevent and let userspace tell us what to do */
135 	drm_helper_hpd_irq_event(dev);
136 }
137 
138 /**
139  * amdgpu_irq_disable_all - disable *all* interrupts
140  *
141  * @adev: amdgpu device pointer
142  *
143  * Disable all types of interrupts from all sources.
144  */
145 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
146 {
147 	unsigned long irqflags;
148 	unsigned i, j, k;
149 	int r;
150 
151 	spin_lock_irqsave(&adev->irq.lock, irqflags);
152 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
153 		if (!adev->irq.client[i].sources)
154 			continue;
155 
156 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
157 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
158 
159 			if (!src || !src->funcs->set || !src->num_types)
160 				continue;
161 
162 			for (k = 0; k < src->num_types; ++k) {
163 				atomic_set(&src->enabled_types[k], 0);
164 				r = src->funcs->set(adev, src, k,
165 						    AMDGPU_IRQ_STATE_DISABLE);
166 				if (r)
167 					DRM_ERROR("error disabling interrupt (%d)\n",
168 						  r);
169 			}
170 		}
171 	}
172 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
173 }
174 
175 /**
176  * amdgpu_irq_handler - IRQ handler
177  *
178  * @irq: IRQ number (unused)
179  * @arg: pointer to DRM device
180  *
181  * IRQ handler for amdgpu driver (all ASICs).
182  *
183  * Returns:
184  * result of handling the IRQ, as defined by &irqreturn_t
185  */
186 irqreturn_t amdgpu_irq_handler(void *arg)
187 {
188 	struct drm_device *dev = (struct drm_device *) arg;
189 	struct amdgpu_device *adev = drm_to_adev(dev);
190 	irqreturn_t ret;
191 
192 	if (!adev->irq.installed)
193 		return 0;
194 
195 	ret = amdgpu_ih_process(adev, &adev->irq.ih);
196 	if (ret == IRQ_HANDLED)
197 		pm_runtime_mark_last_busy(dev->dev);
198 
199 	/* For the hardware that cannot enable bif ring for both ras_controller_irq
200          * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
201 	 * register to check whether the interrupt is triggered or not, and properly
202 	 * ack the interrupt if it is there
203 	 */
204 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
205 		if (adev->nbio.ras_funcs &&
206 		    adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
207 			adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
208 
209 		if (adev->nbio.ras_funcs &&
210 		    adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
211 			adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
212 	}
213 
214 	return ret;
215 }
216 
217 /**
218  * amdgpu_irq_handle_ih1 - kick of processing for IH1
219  *
220  * @work: work structure in struct amdgpu_irq
221  *
222  * Kick of processing IH ring 1.
223  */
224 static void amdgpu_irq_handle_ih1(struct work_struct *work)
225 {
226 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
227 						  irq.ih1_work);
228 
229 	amdgpu_ih_process(adev, &adev->irq.ih1);
230 }
231 
232 /**
233  * amdgpu_irq_handle_ih2 - kick of processing for IH2
234  *
235  * @work: work structure in struct amdgpu_irq
236  *
237  * Kick of processing IH ring 2.
238  */
239 static void amdgpu_irq_handle_ih2(struct work_struct *work)
240 {
241 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
242 						  irq.ih2_work);
243 
244 	amdgpu_ih_process(adev, &adev->irq.ih2);
245 }
246 
247 /**
248  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
249  *
250  * @work: work structure in struct amdgpu_irq
251  *
252  * Kick of processing IH soft ring.
253  */
254 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
255 {
256 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
257 						  irq.ih_soft_work);
258 
259 	amdgpu_ih_process(adev, &adev->irq.ih_soft);
260 }
261 
262 /**
263  * amdgpu_msi_ok - check whether MSI functionality is enabled
264  *
265  * @adev: amdgpu device pointer (unused)
266  *
267  * Checks whether MSI functionality has been disabled via module parameter
268  * (all ASICs).
269  *
270  * Returns:
271  * *true* if MSIs are allowed to be enabled or *false* otherwise
272  */
273 bool amdgpu_msi_ok(struct amdgpu_device *adev)
274 {
275 	if (amdgpu_msi == 1)
276 		return true;
277 	else if (amdgpu_msi == 0)
278 		return false;
279 
280 	return true;
281 }
282 
283 static void amdgpu_restore_msix(struct amdgpu_device *adev)
284 {
285 	STUB();
286 #ifdef notyet
287 	u16 ctrl;
288 
289 	pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
290 	if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
291 		return;
292 
293 	/* VF FLR */
294 	ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
295 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
296 	ctrl |= PCI_MSIX_FLAGS_ENABLE;
297 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
298 #endif
299 }
300 
301 /**
302  * amdgpu_irq_init - initialize interrupt handling
303  *
304  * @adev: amdgpu device pointer
305  *
306  * Sets up work functions for hotplug and reset interrupts, enables MSI
307  * functionality, initializes vblank, hotplug and reset interrupt handling.
308  *
309  * Returns:
310  * 0 on success or error code on failure
311  */
312 int amdgpu_irq_init(struct amdgpu_device *adev)
313 {
314 	int r = 0;
315 	unsigned int irq;
316 
317 	mtx_init(&adev->irq.lock, IPL_TTY);
318 
319 #ifdef notyet
320 	/* Enable MSI if not disabled by module parameter */
321 	adev->irq.msi_enabled = false;
322 
323 	if (amdgpu_msi_ok(adev)) {
324 		int nvec = pci_msix_vec_count(adev->pdev);
325 		unsigned int flags;
326 
327 		if (nvec <= 0) {
328 			flags = PCI_IRQ_MSI;
329 		} else {
330 			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
331 		}
332 		/* we only need one vector */
333 		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
334 		if (nvec > 0) {
335 			adev->irq.msi_enabled = true;
336 			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
337 		}
338 	}
339 #endif
340 
341 	if (!amdgpu_device_has_dc_support(adev)) {
342 		if (!adev->enable_virtual_display)
343 			/* Disable vblank IRQs aggressively for power-saving */
344 			adev_to_drm(adev)->vblank_disable_immediate = true;
345 
346 		r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
347 		if (r)
348 			return r;
349 
350 		/* Pre-DCE11 */
351 		INIT_WORK(&adev->hotplug_work,
352 				amdgpu_hotplug_work_func);
353 	}
354 
355 	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
356 	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
357 	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
358 
359 	/* Use vector 0 for MSI-X. */
360 	r = pci_irq_vector(adev->pdev, 0);
361 	if (r < 0)
362 		return r;
363 	irq = r;
364 
365 	/* PCI devices require shared interrupts. */
366 	r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
367 			adev_to_drm(adev));
368 	if (r) {
369 		if (!amdgpu_device_has_dc_support(adev))
370 			flush_work(&adev->hotplug_work);
371 		return r;
372 	}
373 	adev->irq.installed = true;
374 	adev->irq.irq = irq;
375 	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
376 
377 	DRM_DEBUG("amdgpu: irq initialized.\n");
378 	return 0;
379 }
380 
381 
382 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
383 {
384 	if (adev->irq.installed) {
385 		free_irq(adev->irq.irq, adev_to_drm(adev));
386 		adev->irq.installed = false;
387 		if (adev->irq.msi_enabled)
388 			pci_free_irq_vectors(adev->pdev);
389 
390 		if (!amdgpu_device_has_dc_support(adev))
391 			flush_work(&adev->hotplug_work);
392 	}
393 
394 	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
395 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
396 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
397 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
398 }
399 
400 /**
401  * amdgpu_irq_fini - shut down interrupt handling
402  *
403  * @adev: amdgpu device pointer
404  *
405  * Tears down work functions for hotplug and reset interrupts, disables MSI
406  * functionality, shuts down vblank, hotplug and reset interrupt handling,
407  * turns off interrupts from all sources (all ASICs).
408  */
409 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
410 {
411 	unsigned i, j;
412 
413 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
414 		if (!adev->irq.client[i].sources)
415 			continue;
416 
417 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
418 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
419 
420 			if (!src)
421 				continue;
422 
423 			kfree(src->enabled_types);
424 			src->enabled_types = NULL;
425 		}
426 		kfree(adev->irq.client[i].sources);
427 		adev->irq.client[i].sources = NULL;
428 	}
429 }
430 
431 /**
432  * amdgpu_irq_add_id - register IRQ source
433  *
434  * @adev: amdgpu device pointer
435  * @client_id: client id
436  * @src_id: source id
437  * @source: IRQ source pointer
438  *
439  * Registers IRQ source on a client.
440  *
441  * Returns:
442  * 0 on success or error code otherwise
443  */
444 int amdgpu_irq_add_id(struct amdgpu_device *adev,
445 		      unsigned client_id, unsigned src_id,
446 		      struct amdgpu_irq_src *source)
447 {
448 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
449 		return -EINVAL;
450 
451 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
452 		return -EINVAL;
453 
454 	if (!source->funcs)
455 		return -EINVAL;
456 
457 	if (!adev->irq.client[client_id].sources) {
458 		adev->irq.client[client_id].sources =
459 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
460 				sizeof(struct amdgpu_irq_src *),
461 				GFP_KERNEL);
462 		if (!adev->irq.client[client_id].sources)
463 			return -ENOMEM;
464 	}
465 
466 	if (adev->irq.client[client_id].sources[src_id] != NULL)
467 		return -EINVAL;
468 
469 	if (source->num_types && !source->enabled_types) {
470 		atomic_t *types;
471 
472 		types = kcalloc(source->num_types, sizeof(atomic_t),
473 				GFP_KERNEL);
474 		if (!types)
475 			return -ENOMEM;
476 
477 		source->enabled_types = types;
478 	}
479 
480 	adev->irq.client[client_id].sources[src_id] = source;
481 	return 0;
482 }
483 
484 /**
485  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
486  *
487  * @adev: amdgpu device pointer
488  * @ih: interrupt ring instance
489  *
490  * Dispatches IRQ to IP blocks.
491  */
492 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
493 			 struct amdgpu_ih_ring *ih)
494 {
495 	u32 ring_index = ih->rptr >> 2;
496 	struct amdgpu_iv_entry entry;
497 	unsigned client_id, src_id;
498 	struct amdgpu_irq_src *src;
499 	bool handled = false;
500 	int r;
501 
502 	entry.ih = ih;
503 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
504 	amdgpu_ih_decode_iv(adev, &entry);
505 
506 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
507 
508 	client_id = entry.client_id;
509 	src_id = entry.src_id;
510 
511 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
512 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
513 
514 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
515 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
516 
517 	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
518 		   adev->irq.virq[src_id]) {
519 		STUB();
520 #ifdef notyet
521 		generic_handle_domain_irq(adev->irq.domain, src_id);
522 #endif
523 
524 	} else if (!adev->irq.client[client_id].sources) {
525 		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
526 			  client_id, src_id);
527 
528 	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
529 		r = src->funcs->process(adev, src, &entry);
530 		if (r < 0)
531 			DRM_ERROR("error processing interrupt (%d)\n", r);
532 		else if (r)
533 			handled = true;
534 
535 	} else {
536 		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
537 	}
538 
539 	/* Send it to amdkfd as well if it isn't already handled */
540 	if (!handled)
541 		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
542 }
543 
544 /**
545  * amdgpu_irq_delegate - delegate IV to soft IH ring
546  *
547  * @adev: amdgpu device pointer
548  * @entry: IV entry
549  * @num_dw: size of IV
550  *
551  * Delegate the IV to the soft IH ring and schedule processing of it. Used
552  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
553  */
554 void amdgpu_irq_delegate(struct amdgpu_device *adev,
555 			 struct amdgpu_iv_entry *entry,
556 			 unsigned int num_dw)
557 {
558 	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
559 	schedule_work(&adev->irq.ih_soft_work);
560 }
561 
562 /**
563  * amdgpu_irq_update - update hardware interrupt state
564  *
565  * @adev: amdgpu device pointer
566  * @src: interrupt source pointer
567  * @type: type of interrupt
568  *
569  * Updates interrupt state for the specific source (all ASICs).
570  */
571 int amdgpu_irq_update(struct amdgpu_device *adev,
572 			     struct amdgpu_irq_src *src, unsigned type)
573 {
574 	unsigned long irqflags;
575 	enum amdgpu_interrupt_state state;
576 	int r;
577 
578 	spin_lock_irqsave(&adev->irq.lock, irqflags);
579 
580 	/* We need to determine after taking the lock, otherwise
581 	   we might disable just enabled interrupts again */
582 	if (amdgpu_irq_enabled(adev, src, type))
583 		state = AMDGPU_IRQ_STATE_ENABLE;
584 	else
585 		state = AMDGPU_IRQ_STATE_DISABLE;
586 
587 	r = src->funcs->set(adev, src, type, state);
588 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
589 	return r;
590 }
591 
592 /**
593  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
594  *
595  * @adev: amdgpu device pointer
596  *
597  * Updates state of all types of interrupts on all sources on resume after
598  * reset.
599  */
600 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
601 {
602 	int i, j, k;
603 
604 	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
605 		amdgpu_restore_msix(adev);
606 
607 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
608 		if (!adev->irq.client[i].sources)
609 			continue;
610 
611 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
612 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
613 
614 			if (!src || !src->funcs || !src->funcs->set)
615 				continue;
616 			for (k = 0; k < src->num_types; k++)
617 				amdgpu_irq_update(adev, src, k);
618 		}
619 	}
620 }
621 
622 /**
623  * amdgpu_irq_get - enable interrupt
624  *
625  * @adev: amdgpu device pointer
626  * @src: interrupt source pointer
627  * @type: type of interrupt
628  *
629  * Enables specified type of interrupt on the specified source (all ASICs).
630  *
631  * Returns:
632  * 0 on success or error code otherwise
633  */
634 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
635 		   unsigned type)
636 {
637 	if (!adev->irq.installed)
638 		return -ENOENT;
639 
640 	if (type >= src->num_types)
641 		return -EINVAL;
642 
643 	if (!src->enabled_types || !src->funcs->set)
644 		return -EINVAL;
645 
646 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
647 		return amdgpu_irq_update(adev, src, type);
648 
649 	return 0;
650 }
651 
652 /**
653  * amdgpu_irq_put - disable interrupt
654  *
655  * @adev: amdgpu device pointer
656  * @src: interrupt source pointer
657  * @type: type of interrupt
658  *
659  * Enables specified type of interrupt on the specified source (all ASICs).
660  *
661  * Returns:
662  * 0 on success or error code otherwise
663  */
664 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
665 		   unsigned type)
666 {
667 	if (!adev->irq.installed)
668 		return -ENOENT;
669 
670 	if (type >= src->num_types)
671 		return -EINVAL;
672 
673 	if (!src->enabled_types || !src->funcs->set)
674 		return -EINVAL;
675 
676 	if (atomic_dec_and_test(&src->enabled_types[type]))
677 		return amdgpu_irq_update(adev, src, type);
678 
679 	return 0;
680 }
681 
682 /**
683  * amdgpu_irq_enabled - check whether interrupt is enabled or not
684  *
685  * @adev: amdgpu device pointer
686  * @src: interrupt source pointer
687  * @type: type of interrupt
688  *
689  * Checks whether the given type of interrupt is enabled on the given source.
690  *
691  * Returns:
692  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
693  * invalid parameters
694  */
695 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
696 			unsigned type)
697 {
698 	if (!adev->irq.installed)
699 		return false;
700 
701 	if (type >= src->num_types)
702 		return false;
703 
704 	if (!src->enabled_types || !src->funcs->set)
705 		return false;
706 
707 	return !!atomic_read(&src->enabled_types[type]);
708 }
709 
710 #ifdef __linux__
711 /* XXX: Generic IRQ handling */
712 static void amdgpu_irq_mask(struct irq_data *irqd)
713 {
714 	/* XXX */
715 }
716 
717 static void amdgpu_irq_unmask(struct irq_data *irqd)
718 {
719 	/* XXX */
720 }
721 
722 /* amdgpu hardware interrupt chip descriptor */
723 static struct irq_chip amdgpu_irq_chip = {
724 	.name = "amdgpu-ih",
725 	.irq_mask = amdgpu_irq_mask,
726 	.irq_unmask = amdgpu_irq_unmask,
727 };
728 #endif
729 
730 #ifdef __linux__
731 /**
732  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
733  *
734  * @d: amdgpu IRQ domain pointer (unused)
735  * @irq: virtual IRQ number
736  * @hwirq: hardware irq number
737  *
738  * Current implementation assigns simple interrupt handler to the given virtual
739  * IRQ.
740  *
741  * Returns:
742  * 0 on success or error code otherwise
743  */
744 static int amdgpu_irqdomain_map(struct irq_domain *d,
745 				unsigned int irq, irq_hw_number_t hwirq)
746 {
747 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
748 		return -EPERM;
749 
750 	irq_set_chip_and_handler(irq,
751 				 &amdgpu_irq_chip, handle_simple_irq);
752 	return 0;
753 }
754 
755 /* Implementation of methods for amdgpu IRQ domain */
756 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
757 	.map = amdgpu_irqdomain_map,
758 };
759 #endif
760 
761 /**
762  * amdgpu_irq_add_domain - create a linear IRQ domain
763  *
764  * @adev: amdgpu device pointer
765  *
766  * Creates an IRQ domain for GPU interrupt sources
767  * that may be driven by another driver (e.g., ACP).
768  *
769  * Returns:
770  * 0 on success or error code otherwise
771  */
772 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
773 {
774 #ifdef __linux__
775 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
776 						 &amdgpu_hw_irqdomain_ops, adev);
777 	if (!adev->irq.domain) {
778 		DRM_ERROR("GPU irq add domain failed\n");
779 		return -ENODEV;
780 	}
781 #endif
782 
783 	return 0;
784 }
785 
786 /**
787  * amdgpu_irq_remove_domain - remove the IRQ domain
788  *
789  * @adev: amdgpu device pointer
790  *
791  * Removes the IRQ domain for GPU interrupt sources
792  * that may be driven by another driver (e.g., ACP).
793  */
794 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
795 {
796 	STUB();
797 #if 0
798 	if (adev->irq.domain) {
799 		irq_domain_remove(adev->irq.domain);
800 		adev->irq.domain = NULL;
801 	}
802 #endif
803 }
804 
805 /**
806  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
807  *
808  * @adev: amdgpu device pointer
809  * @src_id: IH source id
810  *
811  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
812  * Use this for components that generate a GPU interrupt, but are driven
813  * by a different driver (e.g., ACP).
814  *
815  * Returns:
816  * Linux IRQ
817  */
818 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
819 {
820 	STUB();
821 	return 0;
822 #if 0
823 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
824 
825 	return adev->irq.virq[src_id];
826 #endif
827 }
828