xref: /openbsd-src/sys/dev/pci/drm/amd/amdgpu/amdgpu_irq.c (revision 78fec973f57e9fc9edd564490c79661460ad807b)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 
29 /**
30  * DOC: Interrupt Handling
31  *
32  * Interrupts generated within GPU hardware raise interrupt requests that are
33  * passed to amdgpu IRQ handler which is responsible for detecting source and
34  * type of the interrupt and dispatching matching handlers. If handling an
35  * interrupt requires calling kernel functions that may sleep processing is
36  * dispatched to work handlers.
37  *
38  * If MSI functionality is not disabled by module parameter then MSI
39  * support will be enabled.
40  *
41  * For GPU interrupt sources that may be driven by another driver, IRQ domain
42  * support is used (with mapping between virtual and hardware IRQs).
43  */
44 
45 #include <linux/irq.h>
46 #include <linux/pci.h>
47 
48 #include <drm/drm_crtc_helper.h>
49 #include <drm/drm_vblank.h>
50 #include <drm/amdgpu_drm.h>
51 #include <drm/drm_drv.h>
52 #include "amdgpu.h"
53 #include "amdgpu_ih.h"
54 #include "atom.h"
55 #include "amdgpu_connectors.h"
56 #include "amdgpu_trace.h"
57 #include "amdgpu_amdkfd.h"
58 #include "amdgpu_ras.h"
59 
60 #include <linux/pm_runtime.h>
61 
62 #ifdef CONFIG_DRM_AMD_DC
63 #include "amdgpu_dm_irq.h"
64 #endif
65 
66 #define AMDGPU_WAIT_IDLE_TIMEOUT 200
67 
68 const char *soc15_ih_clientid_name[] = {
69 	"IH",
70 	"SDMA2 or ACP",
71 	"ATHUB",
72 	"BIF",
73 	"SDMA3 or DCE",
74 	"SDMA4 or ISP",
75 	"VMC1 or PCIE0",
76 	"RLC",
77 	"SDMA0",
78 	"SDMA1",
79 	"SE0SH",
80 	"SE1SH",
81 	"SE2SH",
82 	"SE3SH",
83 	"VCN1 or UVD1",
84 	"THM",
85 	"VCN or UVD",
86 	"SDMA5 or VCE0",
87 	"VMC",
88 	"SDMA6 or XDMA",
89 	"GRBM_CP",
90 	"ATS",
91 	"ROM_SMUIO",
92 	"DF",
93 	"SDMA7 or VCE1",
94 	"PWR",
95 	"reserved",
96 	"UTCL2",
97 	"EA",
98 	"UTCL2LOG",
99 	"MP0",
100 	"MP1"
101 };
102 
103 /**
104  * amdgpu_hotplug_work_func - work handler for display hotplug event
105  *
106  * @work: work struct pointer
107  *
108  * This is the hotplug event work handler (all ASICs).
109  * The work gets scheduled from the IRQ handler if there
110  * was a hotplug interrupt.  It walks through the connector table
111  * and calls hotplug handler for each connector. After this, it sends
112  * a DRM hotplug event to alert userspace.
113  *
114  * This design approach is required in order to defer hotplug event handling
115  * from the IRQ handler to a work handler because hotplug handler has to use
116  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
117  * sleep).
118  */
119 static void amdgpu_hotplug_work_func(struct work_struct *work)
120 {
121 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
122 						  hotplug_work);
123 	struct drm_device *dev = adev_to_drm(adev);
124 	struct drm_mode_config *mode_config = &dev->mode_config;
125 	struct drm_connector *connector;
126 	struct drm_connector_list_iter iter;
127 
128 	mutex_lock(&mode_config->mutex);
129 	drm_connector_list_iter_begin(dev, &iter);
130 	drm_for_each_connector_iter(connector, &iter)
131 		amdgpu_connector_hotplug(connector);
132 	drm_connector_list_iter_end(&iter);
133 	mutex_unlock(&mode_config->mutex);
134 	/* Just fire off a uevent and let userspace tell us what to do */
135 	drm_helper_hpd_irq_event(dev);
136 }
137 
138 /**
139  * amdgpu_irq_disable_all - disable *all* interrupts
140  *
141  * @adev: amdgpu device pointer
142  *
143  * Disable all types of interrupts from all sources.
144  */
145 void amdgpu_irq_disable_all(struct amdgpu_device *adev)
146 {
147 	unsigned long irqflags;
148 	unsigned i, j, k;
149 	int r;
150 
151 	spin_lock_irqsave(&adev->irq.lock, irqflags);
152 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
153 		if (!adev->irq.client[i].sources)
154 			continue;
155 
156 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
157 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
158 
159 			if (!src || !src->funcs->set || !src->num_types)
160 				continue;
161 
162 			for (k = 0; k < src->num_types; ++k) {
163 				atomic_set(&src->enabled_types[k], 0);
164 				r = src->funcs->set(adev, src, k,
165 						    AMDGPU_IRQ_STATE_DISABLE);
166 				if (r)
167 					DRM_ERROR("error disabling interrupt (%d)\n",
168 						  r);
169 			}
170 		}
171 	}
172 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
173 }
174 
175 /**
176  * amdgpu_irq_handler - IRQ handler
177  *
178  * @irq: IRQ number (unused)
179  * @arg: pointer to DRM device
180  *
181  * IRQ handler for amdgpu driver (all ASICs).
182  *
183  * Returns:
184  * result of handling the IRQ, as defined by &irqreturn_t
185  */
186 irqreturn_t amdgpu_irq_handler(void *arg)
187 {
188 	struct drm_device *dev = (struct drm_device *) arg;
189 	struct amdgpu_device *adev = drm_to_adev(dev);
190 	irqreturn_t ret;
191 
192 	if (!adev->irq.installed)
193 		return 0;
194 
195 	ret = amdgpu_ih_process(adev, &adev->irq.ih);
196 	if (ret == IRQ_HANDLED)
197 		pm_runtime_mark_last_busy(dev->dev);
198 
199 	/* For the hardware that cannot enable bif ring for both ras_controller_irq
200          * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
201 	 * register to check whether the interrupt is triggered or not, and properly
202 	 * ack the interrupt if it is there
203 	 */
204 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
205 		if (adev->nbio.ras_funcs &&
206 		    adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
207 			adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
208 
209 		if (adev->nbio.ras_funcs &&
210 		    adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
211 			adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
212 	}
213 
214 	return ret;
215 }
216 
217 /**
218  * amdgpu_irq_handle_ih1 - kick of processing for IH1
219  *
220  * @work: work structure in struct amdgpu_irq
221  *
222  * Kick of processing IH ring 1.
223  */
224 static void amdgpu_irq_handle_ih1(struct work_struct *work)
225 {
226 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
227 						  irq.ih1_work);
228 
229 	amdgpu_ih_process(adev, &adev->irq.ih1);
230 }
231 
232 /**
233  * amdgpu_irq_handle_ih2 - kick of processing for IH2
234  *
235  * @work: work structure in struct amdgpu_irq
236  *
237  * Kick of processing IH ring 2.
238  */
239 static void amdgpu_irq_handle_ih2(struct work_struct *work)
240 {
241 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
242 						  irq.ih2_work);
243 
244 	amdgpu_ih_process(adev, &adev->irq.ih2);
245 }
246 
247 /**
248  * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
249  *
250  * @work: work structure in struct amdgpu_irq
251  *
252  * Kick of processing IH soft ring.
253  */
254 static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
255 {
256 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
257 						  irq.ih_soft_work);
258 
259 	amdgpu_ih_process(adev, &adev->irq.ih_soft);
260 }
261 
262 /**
263  * amdgpu_msi_ok - check whether MSI functionality is enabled
264  *
265  * @adev: amdgpu device pointer (unused)
266  *
267  * Checks whether MSI functionality has been disabled via module parameter
268  * (all ASICs).
269  *
270  * Returns:
271  * *true* if MSIs are allowed to be enabled or *false* otherwise
272  */
273 bool amdgpu_msi_ok(struct amdgpu_device *adev)
274 {
275 	if (amdgpu_msi == 1)
276 		return true;
277 	else if (amdgpu_msi == 0)
278 		return false;
279 
280 	return true;
281 }
282 
283 static void amdgpu_restore_msix(struct amdgpu_device *adev)
284 {
285 	STUB();
286 #ifdef notyet
287 	u16 ctrl;
288 
289 	pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
290 	if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
291 		return;
292 
293 	/* VF FLR */
294 	ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
295 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
296 	ctrl |= PCI_MSIX_FLAGS_ENABLE;
297 	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
298 #endif
299 }
300 
301 /**
302  * amdgpu_irq_init - initialize interrupt handling
303  *
304  * @adev: amdgpu device pointer
305  *
306  * Sets up work functions for hotplug and reset interrupts, enables MSI
307  * functionality, initializes vblank, hotplug and reset interrupt handling.
308  *
309  * Returns:
310  * 0 on success or error code on failure
311  */
312 int amdgpu_irq_init(struct amdgpu_device *adev)
313 {
314 	int r = 0;
315 	unsigned int irq;
316 
317 	mtx_init(&adev->irq.lock, IPL_TTY);
318 
319 #ifdef notyet
320 	/* Enable MSI if not disabled by module parameter */
321 	adev->irq.msi_enabled = false;
322 
323 	if (amdgpu_msi_ok(adev)) {
324 		int nvec = pci_msix_vec_count(adev->pdev);
325 		unsigned int flags;
326 
327 		if (nvec <= 0) {
328 			flags = PCI_IRQ_MSI;
329 		} else {
330 			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
331 		}
332 		/* we only need one vector */
333 		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
334 		if (nvec > 0) {
335 			adev->irq.msi_enabled = true;
336 			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
337 		}
338 	}
339 #endif
340 
341 	if (!amdgpu_device_has_dc_support(adev)) {
342 		if (!adev->enable_virtual_display)
343 			/* Disable vblank IRQs aggressively for power-saving */
344 			/* XXX: can this be enabled for DC? */
345 			adev_to_drm(adev)->vblank_disable_immediate = true;
346 
347 		r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
348 		if (r)
349 			return r;
350 
351 		/* Pre-DCE11 */
352 		INIT_WORK(&adev->hotplug_work,
353 				amdgpu_hotplug_work_func);
354 	}
355 
356 	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
357 	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
358 	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
359 
360 	/* Use vector 0 for MSI-X. */
361 	r = pci_irq_vector(adev->pdev, 0);
362 	if (r < 0)
363 		return r;
364 	irq = r;
365 
366 	/* PCI devices require shared interrupts. */
367 	r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
368 			adev_to_drm(adev));
369 	if (r) {
370 		if (!amdgpu_device_has_dc_support(adev))
371 			flush_work(&adev->hotplug_work);
372 		return r;
373 	}
374 	adev->irq.installed = true;
375 	adev->irq.irq = irq;
376 	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
377 
378 	DRM_DEBUG("amdgpu: irq initialized.\n");
379 	return 0;
380 }
381 
382 
383 void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
384 {
385 	if (adev->irq.installed) {
386 		free_irq(adev->irq.irq, adev_to_drm(adev));
387 		adev->irq.installed = false;
388 		if (adev->irq.msi_enabled)
389 			pci_free_irq_vectors(adev->pdev);
390 
391 		if (!amdgpu_device_has_dc_support(adev))
392 			flush_work(&adev->hotplug_work);
393 	}
394 
395 	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
396 	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
397 	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
398 	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
399 }
400 
401 /**
402  * amdgpu_irq_fini - shut down interrupt handling
403  *
404  * @adev: amdgpu device pointer
405  *
406  * Tears down work functions for hotplug and reset interrupts, disables MSI
407  * functionality, shuts down vblank, hotplug and reset interrupt handling,
408  * turns off interrupts from all sources (all ASICs).
409  */
410 void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
411 {
412 	unsigned i, j;
413 
414 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
415 		if (!adev->irq.client[i].sources)
416 			continue;
417 
418 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
419 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
420 
421 			if (!src)
422 				continue;
423 
424 			kfree(src->enabled_types);
425 			src->enabled_types = NULL;
426 		}
427 		kfree(adev->irq.client[i].sources);
428 		adev->irq.client[i].sources = NULL;
429 	}
430 }
431 
432 /**
433  * amdgpu_irq_add_id - register IRQ source
434  *
435  * @adev: amdgpu device pointer
436  * @client_id: client id
437  * @src_id: source id
438  * @source: IRQ source pointer
439  *
440  * Registers IRQ source on a client.
441  *
442  * Returns:
443  * 0 on success or error code otherwise
444  */
445 int amdgpu_irq_add_id(struct amdgpu_device *adev,
446 		      unsigned client_id, unsigned src_id,
447 		      struct amdgpu_irq_src *source)
448 {
449 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
450 		return -EINVAL;
451 
452 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
453 		return -EINVAL;
454 
455 	if (!source->funcs)
456 		return -EINVAL;
457 
458 	if (!adev->irq.client[client_id].sources) {
459 		adev->irq.client[client_id].sources =
460 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
461 				sizeof(struct amdgpu_irq_src *),
462 				GFP_KERNEL);
463 		if (!adev->irq.client[client_id].sources)
464 			return -ENOMEM;
465 	}
466 
467 	if (adev->irq.client[client_id].sources[src_id] != NULL)
468 		return -EINVAL;
469 
470 	if (source->num_types && !source->enabled_types) {
471 		atomic_t *types;
472 
473 		types = kcalloc(source->num_types, sizeof(atomic_t),
474 				GFP_KERNEL);
475 		if (!types)
476 			return -ENOMEM;
477 
478 		source->enabled_types = types;
479 	}
480 
481 	adev->irq.client[client_id].sources[src_id] = source;
482 	return 0;
483 }
484 
485 /**
486  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
487  *
488  * @adev: amdgpu device pointer
489  * @ih: interrupt ring instance
490  *
491  * Dispatches IRQ to IP blocks.
492  */
493 void amdgpu_irq_dispatch(struct amdgpu_device *adev,
494 			 struct amdgpu_ih_ring *ih)
495 {
496 	u32 ring_index = ih->rptr >> 2;
497 	struct amdgpu_iv_entry entry;
498 	unsigned client_id, src_id;
499 	struct amdgpu_irq_src *src;
500 	bool handled = false;
501 	int r;
502 
503 	entry.ih = ih;
504 	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
505 	amdgpu_ih_decode_iv(adev, &entry);
506 
507 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
508 
509 	client_id = entry.client_id;
510 	src_id = entry.src_id;
511 
512 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
513 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
514 
515 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
516 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
517 
518 	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
519 		   adev->irq.virq[src_id]) {
520 		STUB();
521 #ifdef notyet
522 		generic_handle_domain_irq(adev->irq.domain, src_id);
523 #endif
524 
525 	} else if (!adev->irq.client[client_id].sources) {
526 		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
527 			  client_id, src_id);
528 
529 	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
530 		r = src->funcs->process(adev, src, &entry);
531 		if (r < 0)
532 			DRM_ERROR("error processing interrupt (%d)\n", r);
533 		else if (r)
534 			handled = true;
535 
536 	} else {
537 		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
538 	}
539 
540 	/* Send it to amdkfd as well if it isn't already handled */
541 	if (!handled)
542 		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
543 }
544 
545 /**
546  * amdgpu_irq_delegate - delegate IV to soft IH ring
547  *
548  * @adev: amdgpu device pointer
549  * @entry: IV entry
550  * @num_dw: size of IV
551  *
552  * Delegate the IV to the soft IH ring and schedule processing of it. Used
553  * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
554  */
555 void amdgpu_irq_delegate(struct amdgpu_device *adev,
556 			 struct amdgpu_iv_entry *entry,
557 			 unsigned int num_dw)
558 {
559 	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
560 	schedule_work(&adev->irq.ih_soft_work);
561 }
562 
563 /**
564  * amdgpu_irq_update - update hardware interrupt state
565  *
566  * @adev: amdgpu device pointer
567  * @src: interrupt source pointer
568  * @type: type of interrupt
569  *
570  * Updates interrupt state for the specific source (all ASICs).
571  */
572 int amdgpu_irq_update(struct amdgpu_device *adev,
573 			     struct amdgpu_irq_src *src, unsigned type)
574 {
575 	unsigned long irqflags;
576 	enum amdgpu_interrupt_state state;
577 	int r;
578 
579 	spin_lock_irqsave(&adev->irq.lock, irqflags);
580 
581 	/* We need to determine after taking the lock, otherwise
582 	   we might disable just enabled interrupts again */
583 	if (amdgpu_irq_enabled(adev, src, type))
584 		state = AMDGPU_IRQ_STATE_ENABLE;
585 	else
586 		state = AMDGPU_IRQ_STATE_DISABLE;
587 
588 	r = src->funcs->set(adev, src, type, state);
589 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
590 	return r;
591 }
592 
593 /**
594  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
595  *
596  * @adev: amdgpu device pointer
597  *
598  * Updates state of all types of interrupts on all sources on resume after
599  * reset.
600  */
601 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
602 {
603 	int i, j, k;
604 
605 	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
606 		amdgpu_restore_msix(adev);
607 
608 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
609 		if (!adev->irq.client[i].sources)
610 			continue;
611 
612 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
613 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
614 
615 			if (!src || !src->funcs || !src->funcs->set)
616 				continue;
617 			for (k = 0; k < src->num_types; k++)
618 				amdgpu_irq_update(adev, src, k);
619 		}
620 	}
621 }
622 
623 /**
624  * amdgpu_irq_get - enable interrupt
625  *
626  * @adev: amdgpu device pointer
627  * @src: interrupt source pointer
628  * @type: type of interrupt
629  *
630  * Enables specified type of interrupt on the specified source (all ASICs).
631  *
632  * Returns:
633  * 0 on success or error code otherwise
634  */
635 int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
636 		   unsigned type)
637 {
638 	if (!adev->irq.installed)
639 		return -ENOENT;
640 
641 	if (type >= src->num_types)
642 		return -EINVAL;
643 
644 	if (!src->enabled_types || !src->funcs->set)
645 		return -EINVAL;
646 
647 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
648 		return amdgpu_irq_update(adev, src, type);
649 
650 	return 0;
651 }
652 
653 /**
654  * amdgpu_irq_put - disable interrupt
655  *
656  * @adev: amdgpu device pointer
657  * @src: interrupt source pointer
658  * @type: type of interrupt
659  *
660  * Enables specified type of interrupt on the specified source (all ASICs).
661  *
662  * Returns:
663  * 0 on success or error code otherwise
664  */
665 int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
666 		   unsigned type)
667 {
668 	if (!adev->irq.installed)
669 		return -ENOENT;
670 
671 	if (type >= src->num_types)
672 		return -EINVAL;
673 
674 	if (!src->enabled_types || !src->funcs->set)
675 		return -EINVAL;
676 
677 	if (atomic_dec_and_test(&src->enabled_types[type]))
678 		return amdgpu_irq_update(adev, src, type);
679 
680 	return 0;
681 }
682 
683 /**
684  * amdgpu_irq_enabled - check whether interrupt is enabled or not
685  *
686  * @adev: amdgpu device pointer
687  * @src: interrupt source pointer
688  * @type: type of interrupt
689  *
690  * Checks whether the given type of interrupt is enabled on the given source.
691  *
692  * Returns:
693  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
694  * invalid parameters
695  */
696 bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
697 			unsigned type)
698 {
699 	if (!adev->irq.installed)
700 		return false;
701 
702 	if (type >= src->num_types)
703 		return false;
704 
705 	if (!src->enabled_types || !src->funcs->set)
706 		return false;
707 
708 	return !!atomic_read(&src->enabled_types[type]);
709 }
710 
711 #ifdef __linux__
712 /* XXX: Generic IRQ handling */
713 static void amdgpu_irq_mask(struct irq_data *irqd)
714 {
715 	/* XXX */
716 }
717 
718 static void amdgpu_irq_unmask(struct irq_data *irqd)
719 {
720 	/* XXX */
721 }
722 
723 /* amdgpu hardware interrupt chip descriptor */
724 static struct irq_chip amdgpu_irq_chip = {
725 	.name = "amdgpu-ih",
726 	.irq_mask = amdgpu_irq_mask,
727 	.irq_unmask = amdgpu_irq_unmask,
728 };
729 #endif
730 
731 #ifdef __linux__
732 /**
733  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
734  *
735  * @d: amdgpu IRQ domain pointer (unused)
736  * @irq: virtual IRQ number
737  * @hwirq: hardware irq number
738  *
739  * Current implementation assigns simple interrupt handler to the given virtual
740  * IRQ.
741  *
742  * Returns:
743  * 0 on success or error code otherwise
744  */
745 static int amdgpu_irqdomain_map(struct irq_domain *d,
746 				unsigned int irq, irq_hw_number_t hwirq)
747 {
748 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
749 		return -EPERM;
750 
751 	irq_set_chip_and_handler(irq,
752 				 &amdgpu_irq_chip, handle_simple_irq);
753 	return 0;
754 }
755 
756 /* Implementation of methods for amdgpu IRQ domain */
757 static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
758 	.map = amdgpu_irqdomain_map,
759 };
760 #endif
761 
762 /**
763  * amdgpu_irq_add_domain - create a linear IRQ domain
764  *
765  * @adev: amdgpu device pointer
766  *
767  * Creates an IRQ domain for GPU interrupt sources
768  * that may be driven by another driver (e.g., ACP).
769  *
770  * Returns:
771  * 0 on success or error code otherwise
772  */
773 int amdgpu_irq_add_domain(struct amdgpu_device *adev)
774 {
775 #ifdef __linux__
776 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
777 						 &amdgpu_hw_irqdomain_ops, adev);
778 	if (!adev->irq.domain) {
779 		DRM_ERROR("GPU irq add domain failed\n");
780 		return -ENODEV;
781 	}
782 #endif
783 
784 	return 0;
785 }
786 
787 /**
788  * amdgpu_irq_remove_domain - remove the IRQ domain
789  *
790  * @adev: amdgpu device pointer
791  *
792  * Removes the IRQ domain for GPU interrupt sources
793  * that may be driven by another driver (e.g., ACP).
794  */
795 void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
796 {
797 	STUB();
798 #if 0
799 	if (adev->irq.domain) {
800 		irq_domain_remove(adev->irq.domain);
801 		adev->irq.domain = NULL;
802 	}
803 #endif
804 }
805 
806 /**
807  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
808  *
809  * @adev: amdgpu device pointer
810  * @src_id: IH source id
811  *
812  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
813  * Use this for components that generate a GPU interrupt, but are driven
814  * by a different driver (e.g., ACP).
815  *
816  * Returns:
817  * Linux IRQ
818  */
819 unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
820 {
821 	STUB();
822 	return 0;
823 #if 0
824 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
825 
826 	return adev->irq.virq[src_id];
827 #endif
828 }
829