xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_irq.c (revision 5c11d0187414cef24d94c2ede7b2d2f1f5c3b19c)
1*5c11d018Sriastradh /*	$NetBSD: amdgpu_irq.c,v 1.9 2021/12/19 12:38:49 riastradh Exp $	*/
2efa246c0Sriastradh 
3efa246c0Sriastradh /*
4efa246c0Sriastradh  * Copyright 2008 Advanced Micro Devices, Inc.
5efa246c0Sriastradh  * Copyright 2008 Red Hat Inc.
6efa246c0Sriastradh  * Copyright 2009 Jerome Glisse.
7efa246c0Sriastradh  *
8efa246c0Sriastradh  * Permission is hereby granted, free of charge, to any person obtaining a
9efa246c0Sriastradh  * copy of this software and associated documentation files (the "Software"),
10efa246c0Sriastradh  * to deal in the Software without restriction, including without limitation
11efa246c0Sriastradh  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12efa246c0Sriastradh  * and/or sell copies of the Software, and to permit persons to whom the
13efa246c0Sriastradh  * Software is furnished to do so, subject to the following conditions:
14efa246c0Sriastradh  *
15efa246c0Sriastradh  * The above copyright notice and this permission notice shall be included in
16efa246c0Sriastradh  * all copies or substantial portions of the Software.
17efa246c0Sriastradh  *
18efa246c0Sriastradh  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19efa246c0Sriastradh  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20efa246c0Sriastradh  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21efa246c0Sriastradh  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
22efa246c0Sriastradh  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23efa246c0Sriastradh  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24efa246c0Sriastradh  * OTHER DEALINGS IN THE SOFTWARE.
25efa246c0Sriastradh  *
26efa246c0Sriastradh  * Authors: Dave Airlie
27efa246c0Sriastradh  *          Alex Deucher
28efa246c0Sriastradh  *          Jerome Glisse
29efa246c0Sriastradh  */
3041ec0267Sriastradh 
3141ec0267Sriastradh /**
3241ec0267Sriastradh  * DOC: Interrupt Handling
3341ec0267Sriastradh  *
3441ec0267Sriastradh  * Interrupts generated within GPU hardware raise interrupt requests that are
3541ec0267Sriastradh  * passed to amdgpu IRQ handler which is responsible for detecting source and
3641ec0267Sriastradh  * type of the interrupt and dispatching matching handlers. If handling an
3741ec0267Sriastradh  * interrupt requires calling kernel functions that may sleep processing is
3841ec0267Sriastradh  * dispatched to work handlers.
3941ec0267Sriastradh  *
4041ec0267Sriastradh  * If MSI functionality is not disabled by module parameter then MSI
4141ec0267Sriastradh  * support will be enabled.
4241ec0267Sriastradh  *
4341ec0267Sriastradh  * For GPU interrupt sources that may be driven by another driver, IRQ domain
4441ec0267Sriastradh  * support is used (with mapping between virtual and hardware IRQs).
4541ec0267Sriastradh  */
4641ec0267Sriastradh 
47efa246c0Sriastradh #include <sys/cdefs.h>
48*5c11d018Sriastradh __KERNEL_RCSID(0, "$NetBSD: amdgpu_irq.c,v 1.9 2021/12/19 12:38:49 riastradh Exp $");
49efa246c0Sriastradh 
50efa246c0Sriastradh #include <linux/irq.h>
5141ec0267Sriastradh #include <linux/pci.h>
5241ec0267Sriastradh 
53efa246c0Sriastradh #include <drm/drm_crtc_helper.h>
5441ec0267Sriastradh #include <drm/drm_irq.h>
5541ec0267Sriastradh #include <drm/drm_vblank.h>
56efa246c0Sriastradh #include <drm/amdgpu_drm.h>
57efa246c0Sriastradh #include "amdgpu.h"
58efa246c0Sriastradh #include "amdgpu_ih.h"
59efa246c0Sriastradh #include "atom.h"
60efa246c0Sriastradh #include "amdgpu_connectors.h"
6141ec0267Sriastradh #include "amdgpu_trace.h"
6241ec0267Sriastradh #include "amdgpu_amdkfd.h"
6341ec0267Sriastradh #include "amdgpu_ras.h"
64efa246c0Sriastradh 
65efa246c0Sriastradh #include <linux/pm_runtime.h>
66efa246c0Sriastradh 
6741ec0267Sriastradh #ifdef CONFIG_DRM_AMD_DC
6841ec0267Sriastradh #include "amdgpu_dm_irq.h"
6941ec0267Sriastradh #endif
7041ec0267Sriastradh 
71efa246c0Sriastradh #define AMDGPU_WAIT_IDLE_TIMEOUT 200
72efa246c0Sriastradh 
73efa246c0Sriastradh /**
7441ec0267Sriastradh  * amdgpu_hotplug_work_func - work handler for display hotplug event
75efa246c0Sriastradh  *
7641ec0267Sriastradh  * @work: work struct pointer
77efa246c0Sriastradh  *
7841ec0267Sriastradh  * This is the hotplug event work handler (all ASICs).
7941ec0267Sriastradh  * The work gets scheduled from the IRQ handler if there
8041ec0267Sriastradh  * was a hotplug interrupt.  It walks through the connector table
8141ec0267Sriastradh  * and calls hotplug handler for each connector. After this, it sends
8241ec0267Sriastradh  * a DRM hotplug event to alert userspace.
8341ec0267Sriastradh  *
8441ec0267Sriastradh  * This design approach is required in order to defer hotplug event handling
8541ec0267Sriastradh  * from the IRQ handler to a work handler because hotplug handler has to use
8641ec0267Sriastradh  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
8741ec0267Sriastradh  * sleep).
88efa246c0Sriastradh  */
amdgpu_hotplug_work_func(struct work_struct * work)89efa246c0Sriastradh static void amdgpu_hotplug_work_func(struct work_struct *work)
90efa246c0Sriastradh {
91efa246c0Sriastradh 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
92efa246c0Sriastradh 						  hotplug_work);
93efa246c0Sriastradh 	struct drm_device *dev = adev->ddev;
94efa246c0Sriastradh 	struct drm_mode_config *mode_config = &dev->mode_config;
95efa246c0Sriastradh 	struct drm_connector *connector;
9641ec0267Sriastradh 	struct drm_connector_list_iter iter;
97efa246c0Sriastradh 
98efa246c0Sriastradh 	mutex_lock(&mode_config->mutex);
9941ec0267Sriastradh 	drm_connector_list_iter_begin(dev, &iter);
10041ec0267Sriastradh 	drm_for_each_connector_iter(connector, &iter)
101efa246c0Sriastradh 		amdgpu_connector_hotplug(connector);
10241ec0267Sriastradh 	drm_connector_list_iter_end(&iter);
103efa246c0Sriastradh 	mutex_unlock(&mode_config->mutex);
104efa246c0Sriastradh 	/* Just fire off a uevent and let userspace tell us what to do */
105efa246c0Sriastradh 	drm_helper_hpd_irq_event(dev);
106efa246c0Sriastradh }
107efa246c0Sriastradh 
108efa246c0Sriastradh /**
10941ec0267Sriastradh  * amdgpu_irq_disable_all - disable *all* interrupts
110efa246c0Sriastradh  *
11141ec0267Sriastradh  * @adev: amdgpu device pointer
112efa246c0Sriastradh  *
11341ec0267Sriastradh  * Disable all types of interrupts from all sources.
114efa246c0Sriastradh  */
amdgpu_irq_disable_all(struct amdgpu_device * adev)11541ec0267Sriastradh void amdgpu_irq_disable_all(struct amdgpu_device *adev)
116efa246c0Sriastradh {
117efa246c0Sriastradh 	unsigned long irqflags;
11841ec0267Sriastradh 	unsigned i, j, k;
119efa246c0Sriastradh 	int r;
120efa246c0Sriastradh 
121efa246c0Sriastradh 	spin_lock_irqsave(&adev->irq.lock, irqflags);
12241ec0267Sriastradh 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
12341ec0267Sriastradh 		if (!adev->irq.client[i].sources)
12441ec0267Sriastradh 			continue;
12541ec0267Sriastradh 
12641ec0267Sriastradh 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
12741ec0267Sriastradh 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
128efa246c0Sriastradh 
129efa246c0Sriastradh 			if (!src || !src->funcs->set || !src->num_types)
130efa246c0Sriastradh 				continue;
131efa246c0Sriastradh 
13241ec0267Sriastradh 			for (k = 0; k < src->num_types; ++k) {
13341ec0267Sriastradh 				atomic_set(&src->enabled_types[k], 0);
13441ec0267Sriastradh 				r = src->funcs->set(adev, src, k,
135efa246c0Sriastradh 						    AMDGPU_IRQ_STATE_DISABLE);
136efa246c0Sriastradh 				if (r)
137efa246c0Sriastradh 					DRM_ERROR("error disabling interrupt (%d)\n",
138efa246c0Sriastradh 						  r);
139efa246c0Sriastradh 			}
140efa246c0Sriastradh 		}
14141ec0267Sriastradh 	}
142efa246c0Sriastradh 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
143efa246c0Sriastradh }
144efa246c0Sriastradh 
145efa246c0Sriastradh /**
14641ec0267Sriastradh  * amdgpu_irq_handler - IRQ handler
147efa246c0Sriastradh  *
14841ec0267Sriastradh  * @irq: IRQ number (unused)
14941ec0267Sriastradh  * @arg: pointer to DRM device
150efa246c0Sriastradh  *
15141ec0267Sriastradh  * IRQ handler for amdgpu driver (all ASICs).
152efa246c0Sriastradh  *
15341ec0267Sriastradh  * Returns:
15441ec0267Sriastradh  * result of handling the IRQ, as defined by &irqreturn_t
155efa246c0Sriastradh  */
amdgpu_irq_handler(DRM_IRQ_ARGS)1560d50c49dSriastradh irqreturn_t amdgpu_irq_handler(DRM_IRQ_ARGS)
157efa246c0Sriastradh {
158efa246c0Sriastradh 	struct drm_device *dev = (struct drm_device *) arg;
159efa246c0Sriastradh 	struct amdgpu_device *adev = dev->dev_private;
160efa246c0Sriastradh 	irqreturn_t ret;
161efa246c0Sriastradh 
16241ec0267Sriastradh 	ret = amdgpu_ih_process(adev, &adev->irq.ih);
163efa246c0Sriastradh 	if (ret == IRQ_HANDLED)
164efa246c0Sriastradh 		pm_runtime_mark_last_busy(dev->dev);
16541ec0267Sriastradh 
16641ec0267Sriastradh 	/* For the hardware that cannot enable bif ring for both ras_controller_irq
16741ec0267Sriastradh          * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
16841ec0267Sriastradh 	 * register to check whether the interrupt is triggered or not, and properly
16941ec0267Sriastradh 	 * ack the interrupt if it is there
17041ec0267Sriastradh 	 */
17141ec0267Sriastradh 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
17241ec0267Sriastradh 		if (adev->nbio.funcs &&
17341ec0267Sriastradh 		    adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
17441ec0267Sriastradh 			adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
17541ec0267Sriastradh 
17641ec0267Sriastradh 		if (adev->nbio.funcs &&
17741ec0267Sriastradh 		    adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
17841ec0267Sriastradh 			adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
17941ec0267Sriastradh 	}
18041ec0267Sriastradh 
181efa246c0Sriastradh 	return ret;
182efa246c0Sriastradh }
183efa246c0Sriastradh 
184efa246c0Sriastradh /**
18541ec0267Sriastradh  * amdgpu_irq_handle_ih1 - kick of processing for IH1
186efa246c0Sriastradh  *
18741ec0267Sriastradh  * @work: work structure in struct amdgpu_irq
188efa246c0Sriastradh  *
18941ec0267Sriastradh  * Kick of processing IH ring 1.
19041ec0267Sriastradh  */
amdgpu_irq_handle_ih1(struct work_struct * work)19141ec0267Sriastradh static void amdgpu_irq_handle_ih1(struct work_struct *work)
19241ec0267Sriastradh {
19341ec0267Sriastradh 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
19441ec0267Sriastradh 						  irq.ih1_work);
19541ec0267Sriastradh 
19641ec0267Sriastradh 	amdgpu_ih_process(adev, &adev->irq.ih1);
19741ec0267Sriastradh }
19841ec0267Sriastradh 
19941ec0267Sriastradh /**
20041ec0267Sriastradh  * amdgpu_irq_handle_ih2 - kick of processing for IH2
20141ec0267Sriastradh  *
20241ec0267Sriastradh  * @work: work structure in struct amdgpu_irq
20341ec0267Sriastradh  *
20441ec0267Sriastradh  * Kick of processing IH ring 2.
20541ec0267Sriastradh  */
amdgpu_irq_handle_ih2(struct work_struct * work)20641ec0267Sriastradh static void amdgpu_irq_handle_ih2(struct work_struct *work)
20741ec0267Sriastradh {
20841ec0267Sriastradh 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
20941ec0267Sriastradh 						  irq.ih2_work);
21041ec0267Sriastradh 
21141ec0267Sriastradh 	amdgpu_ih_process(adev, &adev->irq.ih2);
21241ec0267Sriastradh }
21341ec0267Sriastradh 
21441ec0267Sriastradh /**
21541ec0267Sriastradh  * amdgpu_msi_ok - check whether MSI functionality is enabled
21641ec0267Sriastradh  *
21741ec0267Sriastradh  * @adev: amdgpu device pointer (unused)
21841ec0267Sriastradh  *
21941ec0267Sriastradh  * Checks whether MSI functionality has been disabled via module parameter
22041ec0267Sriastradh  * (all ASICs).
22141ec0267Sriastradh  *
22241ec0267Sriastradh  * Returns:
22341ec0267Sriastradh  * *true* if MSIs are allowed to be enabled or *false* otherwise
224efa246c0Sriastradh  */
amdgpu_msi_ok(struct amdgpu_device * adev)225efa246c0Sriastradh static bool amdgpu_msi_ok(struct amdgpu_device *adev)
226efa246c0Sriastradh {
227efa246c0Sriastradh 	if (amdgpu_msi == 1)
228efa246c0Sriastradh 		return true;
229efa246c0Sriastradh 	else if (amdgpu_msi == 0)
230efa246c0Sriastradh 		return false;
231efa246c0Sriastradh 
232efa246c0Sriastradh 	return true;
233efa246c0Sriastradh }
234efa246c0Sriastradh 
235efa246c0Sriastradh /**
23641ec0267Sriastradh  * amdgpu_irq_init - initialize interrupt handling
237efa246c0Sriastradh  *
238efa246c0Sriastradh  * @adev: amdgpu device pointer
239efa246c0Sriastradh  *
24041ec0267Sriastradh  * Sets up work functions for hotplug and reset interrupts, enables MSI
24141ec0267Sriastradh  * functionality, initializes vblank, hotplug and reset interrupt handling.
24241ec0267Sriastradh  *
24341ec0267Sriastradh  * Returns:
24441ec0267Sriastradh  * 0 on success or error code on failure
245efa246c0Sriastradh  */
amdgpu_irq_init(struct amdgpu_device * adev)246efa246c0Sriastradh int amdgpu_irq_init(struct amdgpu_device *adev)
247efa246c0Sriastradh {
248efa246c0Sriastradh 	int r = 0;
249efa246c0Sriastradh 
250efa246c0Sriastradh 	spin_lock_init(&adev->irq.lock);
25141ec0267Sriastradh 
25241ec0267Sriastradh 	/* Enable MSI if not disabled by module parameter */
253efa246c0Sriastradh 	adev->irq.msi_enabled = false;
254efa246c0Sriastradh 
255efa246c0Sriastradh 	if (amdgpu_msi_ok(adev)) {
256*5c11d018Sriastradh #ifdef __NetBSD__		/* XXX amdgpu msix */
257*5c11d018Sriastradh 		if (pci_enable_msi(adev->pdev) == 0) {
258*5c11d018Sriastradh 			adev->irq.msi_enabled = true;
259*5c11d018Sriastradh 			dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
260*5c11d018Sriastradh 		} else {
261*5c11d018Sriastradh 			dev_err(adev->dev, "amdgpu: failed to enable MSI\n");
262*5c11d018Sriastradh 		}
263*5c11d018Sriastradh #else
26441ec0267Sriastradh 		int nvec = pci_msix_vec_count(adev->pdev);
26541ec0267Sriastradh 		unsigned int flags;
26641ec0267Sriastradh 
26741ec0267Sriastradh 		if (nvec <= 0) {
26841ec0267Sriastradh 			flags = PCI_IRQ_MSI;
26941ec0267Sriastradh 		} else {
27041ec0267Sriastradh 			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
27141ec0267Sriastradh 		}
27241ec0267Sriastradh 		/* we only need one vector */
27341ec0267Sriastradh 		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
27441ec0267Sriastradh 		if (nvec > 0) {
275efa246c0Sriastradh 			adev->irq.msi_enabled = true;
27641ec0267Sriastradh 			dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
277efa246c0Sriastradh 		}
2780caae222Sriastradh #endif
279efa246c0Sriastradh 	}
280efa246c0Sriastradh 
28141ec0267Sriastradh 	if (!amdgpu_device_has_dc_support(adev)) {
28241ec0267Sriastradh 		if (!adev->enable_virtual_display)
28341ec0267Sriastradh 			/* Disable vblank IRQs aggressively for power-saving */
28441ec0267Sriastradh 			/* XXX: can this be enabled for DC? */
28541ec0267Sriastradh 			adev->ddev->vblank_disable_immediate = true;
28641ec0267Sriastradh 
28741ec0267Sriastradh 		r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
28841ec0267Sriastradh 		if (r)
28941ec0267Sriastradh 			return r;
29041ec0267Sriastradh 
29141ec0267Sriastradh 		/* Pre-DCE11 */
29241ec0267Sriastradh 		INIT_WORK(&adev->hotplug_work,
29341ec0267Sriastradh 				amdgpu_hotplug_work_func);
29441ec0267Sriastradh 	}
29541ec0267Sriastradh 
29641ec0267Sriastradh 	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
29741ec0267Sriastradh 	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
298efa246c0Sriastradh 
299efa246c0Sriastradh 	adev->irq.installed = true;
30041ec0267Sriastradh #ifdef __NetBSD__	/* XXX post-merge address comment below */
30137ac6a77Sriastradh 	r = drm_irq_install(adev->ddev);
30237ac6a77Sriastradh #else
30341ec0267Sriastradh 	/* Use vector 0 for MSI-X */
30441ec0267Sriastradh 	r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0));
30537ac6a77Sriastradh #endif
306efa246c0Sriastradh 	if (r) {
307efa246c0Sriastradh 		adev->irq.installed = false;
30841ec0267Sriastradh 		if (!amdgpu_device_has_dc_support(adev))
309efa246c0Sriastradh 			flush_work(&adev->hotplug_work);
310efa246c0Sriastradh 		return r;
311efa246c0Sriastradh 	}
31241ec0267Sriastradh 	adev->ddev->max_vblank_count = 0x00ffffff;
313efa246c0Sriastradh 
31441ec0267Sriastradh 	DRM_DEBUG("amdgpu: irq initialized.\n");
315efa246c0Sriastradh 	return 0;
316efa246c0Sriastradh }
317efa246c0Sriastradh 
318efa246c0Sriastradh /**
31941ec0267Sriastradh  * amdgpu_irq_fini - shut down interrupt handling
320efa246c0Sriastradh  *
321efa246c0Sriastradh  * @adev: amdgpu device pointer
322efa246c0Sriastradh  *
32341ec0267Sriastradh  * Tears down work functions for hotplug and reset interrupts, disables MSI
32441ec0267Sriastradh  * functionality, shuts down vblank, hotplug and reset interrupt handling,
32541ec0267Sriastradh  * turns off interrupts from all sources (all ASICs).
326efa246c0Sriastradh  */
amdgpu_irq_fini(struct amdgpu_device * adev)327efa246c0Sriastradh void amdgpu_irq_fini(struct amdgpu_device *adev)
328efa246c0Sriastradh {
32941ec0267Sriastradh 	unsigned i, j;
330efa246c0Sriastradh 
331efa246c0Sriastradh 	if (adev->irq.installed) {
332efa246c0Sriastradh 		drm_irq_uninstall(adev->ddev);
333efa246c0Sriastradh 		adev->irq.installed = false;
3340caae222Sriastradh #ifndef __NetBSD__		/* XXX amdgpu msix */
335efa246c0Sriastradh 		if (adev->irq.msi_enabled)
33641ec0267Sriastradh 			pci_free_irq_vectors(adev->pdev);
3370caae222Sriastradh #endif
33841ec0267Sriastradh 		if (!amdgpu_device_has_dc_support(adev))
339efa246c0Sriastradh 			flush_work(&adev->hotplug_work);
340efa246c0Sriastradh 	}
341efa246c0Sriastradh 
34241ec0267Sriastradh 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
34341ec0267Sriastradh 		if (!adev->irq.client[i].sources)
34441ec0267Sriastradh 			continue;
34541ec0267Sriastradh 
34641ec0267Sriastradh 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
34741ec0267Sriastradh 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
348efa246c0Sriastradh 
349efa246c0Sriastradh 			if (!src)
350efa246c0Sriastradh 				continue;
351efa246c0Sriastradh 
352efa246c0Sriastradh 			kfree(src->enabled_types);
353efa246c0Sriastradh 			src->enabled_types = NULL;
354efa246c0Sriastradh 			if (src->data) {
355efa246c0Sriastradh 				kfree(src->data);
356efa246c0Sriastradh 				kfree(src);
35741ec0267Sriastradh 				adev->irq.client[i].sources[j] = NULL;
358efa246c0Sriastradh 			}
359efa246c0Sriastradh 		}
36041ec0267Sriastradh 		kfree(adev->irq.client[i].sources);
36141ec0267Sriastradh 		adev->irq.client[i].sources = NULL;
36241ec0267Sriastradh 	}
36307eb61ceSriastradh 
36407eb61ceSriastradh 	spin_lock_destroy(&adev->irq.lock);
365efa246c0Sriastradh }
366efa246c0Sriastradh 
367efa246c0Sriastradh /**
36841ec0267Sriastradh  * amdgpu_irq_add_id - register IRQ source
369efa246c0Sriastradh  *
370efa246c0Sriastradh  * @adev: amdgpu device pointer
37141ec0267Sriastradh  * @client_id: client id
37241ec0267Sriastradh  * @src_id: source id
37341ec0267Sriastradh  * @source: IRQ source pointer
374efa246c0Sriastradh  *
37541ec0267Sriastradh  * Registers IRQ source on a client.
37641ec0267Sriastradh  *
37741ec0267Sriastradh  * Returns:
37841ec0267Sriastradh  * 0 on success or error code otherwise
379efa246c0Sriastradh  */
amdgpu_irq_add_id(struct amdgpu_device * adev,unsigned client_id,unsigned src_id,struct amdgpu_irq_src * source)38041ec0267Sriastradh int amdgpu_irq_add_id(struct amdgpu_device *adev,
38141ec0267Sriastradh 		      unsigned client_id, unsigned src_id,
382efa246c0Sriastradh 		      struct amdgpu_irq_src *source)
383efa246c0Sriastradh {
38441ec0267Sriastradh 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
38541ec0267Sriastradh 		return -EINVAL;
38641ec0267Sriastradh 
387efa246c0Sriastradh 	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
388efa246c0Sriastradh 		return -EINVAL;
389efa246c0Sriastradh 
39041ec0267Sriastradh 	if (!source->funcs)
391efa246c0Sriastradh 		return -EINVAL;
392efa246c0Sriastradh 
39341ec0267Sriastradh 	if (!adev->irq.client[client_id].sources) {
39441ec0267Sriastradh 		adev->irq.client[client_id].sources =
39541ec0267Sriastradh 			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
39641ec0267Sriastradh 				sizeof(struct amdgpu_irq_src *),
39741ec0267Sriastradh 				GFP_KERNEL);
39841ec0267Sriastradh 		if (!adev->irq.client[client_id].sources)
39941ec0267Sriastradh 			return -ENOMEM;
40041ec0267Sriastradh 	}
40141ec0267Sriastradh 
40241ec0267Sriastradh 	if (adev->irq.client[client_id].sources[src_id] != NULL)
403efa246c0Sriastradh 		return -EINVAL;
404efa246c0Sriastradh 
405efa246c0Sriastradh 	if (source->num_types && !source->enabled_types) {
406efa246c0Sriastradh 		atomic_t *types;
407efa246c0Sriastradh 
408efa246c0Sriastradh 		types = kcalloc(source->num_types, sizeof(atomic_t),
409efa246c0Sriastradh 				GFP_KERNEL);
410efa246c0Sriastradh 		if (!types)
411efa246c0Sriastradh 			return -ENOMEM;
412efa246c0Sriastradh 
413efa246c0Sriastradh 		source->enabled_types = types;
414efa246c0Sriastradh 	}
415efa246c0Sriastradh 
41641ec0267Sriastradh 	adev->irq.client[client_id].sources[src_id] = source;
417efa246c0Sriastradh 	return 0;
418efa246c0Sriastradh }
419efa246c0Sriastradh 
420efa246c0Sriastradh /**
42141ec0267Sriastradh  * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
422efa246c0Sriastradh  *
423efa246c0Sriastradh  * @adev: amdgpu device pointer
42441ec0267Sriastradh  * @ih: interrupt ring instance
425efa246c0Sriastradh  *
42641ec0267Sriastradh  * Dispatches IRQ to IP blocks.
427efa246c0Sriastradh  */
amdgpu_irq_dispatch(struct amdgpu_device * adev,struct amdgpu_ih_ring * ih)428efa246c0Sriastradh void amdgpu_irq_dispatch(struct amdgpu_device *adev,
42941ec0267Sriastradh 			 struct amdgpu_ih_ring *ih)
430efa246c0Sriastradh {
43141ec0267Sriastradh 	u32 ring_index = ih->rptr >> 2;
43241ec0267Sriastradh 	struct amdgpu_iv_entry entry;
43341ec0267Sriastradh 	unsigned client_id, src_id;
434efa246c0Sriastradh 	struct amdgpu_irq_src *src;
43541ec0267Sriastradh 	bool handled = false;
436efa246c0Sriastradh 	int r;
437efa246c0Sriastradh 
4380caae222Sriastradh 	entry.iv_entry = (const uint32_t *)__UNVOLATILE(&ih->ring[ring_index]);
43941ec0267Sriastradh 	amdgpu_ih_decode_iv(adev, &entry);
44041ec0267Sriastradh 
44141ec0267Sriastradh 	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
44241ec0267Sriastradh 
44341ec0267Sriastradh 	client_id = entry.client_id;
44441ec0267Sriastradh 	src_id = entry.src_id;
44541ec0267Sriastradh 
44641ec0267Sriastradh 	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
44741ec0267Sriastradh 		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
44841ec0267Sriastradh 
44941ec0267Sriastradh 	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
450efa246c0Sriastradh 		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
451efa246c0Sriastradh 
4520caae222Sriastradh #ifndef __NetBSD__		/* XXX amdgpu irq */
45341ec0267Sriastradh 	} else if (adev->irq.virq[src_id]) {
45441ec0267Sriastradh 		generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
4550caae222Sriastradh #endif
456efa246c0Sriastradh 
45741ec0267Sriastradh 	} else if (!adev->irq.client[client_id].sources) {
45841ec0267Sriastradh 		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
45941ec0267Sriastradh 			  client_id, src_id);
46041ec0267Sriastradh 
46141ec0267Sriastradh 	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
46241ec0267Sriastradh 		r = src->funcs->process(adev, src, &entry);
46341ec0267Sriastradh 		if (r < 0)
464efa246c0Sriastradh 			DRM_ERROR("error processing interrupt (%d)\n", r);
46541ec0267Sriastradh 		else if (r)
46641ec0267Sriastradh 			handled = true;
46741ec0267Sriastradh 
46841ec0267Sriastradh 	} else {
46941ec0267Sriastradh 		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
47041ec0267Sriastradh 	}
47141ec0267Sriastradh 
47241ec0267Sriastradh 	/* Send it to amdkfd as well if it isn't already handled */
47341ec0267Sriastradh 	if (!handled)
47441ec0267Sriastradh 		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
475efa246c0Sriastradh }
476efa246c0Sriastradh 
477efa246c0Sriastradh /**
47841ec0267Sriastradh  * amdgpu_irq_update - update hardware interrupt state
479efa246c0Sriastradh  *
480efa246c0Sriastradh  * @adev: amdgpu device pointer
48141ec0267Sriastradh  * @src: interrupt source pointer
48241ec0267Sriastradh  * @type: type of interrupt
483efa246c0Sriastradh  *
48441ec0267Sriastradh  * Updates interrupt state for the specific source (all ASICs).
485efa246c0Sriastradh  */
amdgpu_irq_update(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type)486efa246c0Sriastradh int amdgpu_irq_update(struct amdgpu_device *adev,
487efa246c0Sriastradh 			     struct amdgpu_irq_src *src, unsigned type)
488efa246c0Sriastradh {
489efa246c0Sriastradh 	unsigned long irqflags;
490efa246c0Sriastradh 	enum amdgpu_interrupt_state state;
491efa246c0Sriastradh 	int r;
492efa246c0Sriastradh 
493efa246c0Sriastradh 	spin_lock_irqsave(&adev->irq.lock, irqflags);
494efa246c0Sriastradh 
49541ec0267Sriastradh 	/* We need to determine after taking the lock, otherwise
496efa246c0Sriastradh 	   we might disable just enabled interrupts again */
497efa246c0Sriastradh 	if (amdgpu_irq_enabled(adev, src, type))
498efa246c0Sriastradh 		state = AMDGPU_IRQ_STATE_ENABLE;
499efa246c0Sriastradh 	else
500efa246c0Sriastradh 		state = AMDGPU_IRQ_STATE_DISABLE;
501efa246c0Sriastradh 
502efa246c0Sriastradh 	r = src->funcs->set(adev, src, type, state);
503efa246c0Sriastradh 	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
504efa246c0Sriastradh 	return r;
505efa246c0Sriastradh }
506efa246c0Sriastradh 
507efa246c0Sriastradh /**
50841ec0267Sriastradh  * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
50941ec0267Sriastradh  *
51041ec0267Sriastradh  * @adev: amdgpu device pointer
51141ec0267Sriastradh  *
51241ec0267Sriastradh  * Updates state of all types of interrupts on all sources on resume after
51341ec0267Sriastradh  * reset.
51441ec0267Sriastradh  */
amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device * adev)51541ec0267Sriastradh void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
51641ec0267Sriastradh {
51741ec0267Sriastradh 	int i, j, k;
51841ec0267Sriastradh 
51941ec0267Sriastradh 	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
52041ec0267Sriastradh 		if (!adev->irq.client[i].sources)
52141ec0267Sriastradh 			continue;
52241ec0267Sriastradh 
52341ec0267Sriastradh 		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
52441ec0267Sriastradh 			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
52541ec0267Sriastradh 
52641ec0267Sriastradh 			if (!src)
52741ec0267Sriastradh 				continue;
52841ec0267Sriastradh 			for (k = 0; k < src->num_types; k++)
52941ec0267Sriastradh 				amdgpu_irq_update(adev, src, k);
53041ec0267Sriastradh 		}
53141ec0267Sriastradh 	}
53241ec0267Sriastradh }
53341ec0267Sriastradh 
53441ec0267Sriastradh /**
535efa246c0Sriastradh  * amdgpu_irq_get - enable interrupt
536efa246c0Sriastradh  *
537efa246c0Sriastradh  * @adev: amdgpu device pointer
53841ec0267Sriastradh  * @src: interrupt source pointer
53941ec0267Sriastradh  * @type: type of interrupt
540efa246c0Sriastradh  *
54141ec0267Sriastradh  * Enables specified type of interrupt on the specified source (all ASICs).
54241ec0267Sriastradh  *
54341ec0267Sriastradh  * Returns:
54441ec0267Sriastradh  * 0 on success or error code otherwise
545efa246c0Sriastradh  */
amdgpu_irq_get(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type)546efa246c0Sriastradh int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
547efa246c0Sriastradh 		   unsigned type)
548efa246c0Sriastradh {
549efa246c0Sriastradh 	if (!adev->ddev->irq_enabled)
550efa246c0Sriastradh 		return -ENOENT;
551efa246c0Sriastradh 
552efa246c0Sriastradh 	if (type >= src->num_types)
553efa246c0Sriastradh 		return -EINVAL;
554efa246c0Sriastradh 
555efa246c0Sriastradh 	if (!src->enabled_types || !src->funcs->set)
556efa246c0Sriastradh 		return -EINVAL;
557efa246c0Sriastradh 
558efa246c0Sriastradh 	if (atomic_inc_return(&src->enabled_types[type]) == 1)
559efa246c0Sriastradh 		return amdgpu_irq_update(adev, src, type);
560efa246c0Sriastradh 
561efa246c0Sriastradh 	return 0;
562efa246c0Sriastradh }
563efa246c0Sriastradh 
564efa246c0Sriastradh /**
565efa246c0Sriastradh  * amdgpu_irq_put - disable interrupt
566efa246c0Sriastradh  *
567efa246c0Sriastradh  * @adev: amdgpu device pointer
56841ec0267Sriastradh  * @src: interrupt source pointer
56941ec0267Sriastradh  * @type: type of interrupt
570efa246c0Sriastradh  *
57141ec0267Sriastradh  * Enables specified type of interrupt on the specified source (all ASICs).
57241ec0267Sriastradh  *
57341ec0267Sriastradh  * Returns:
57441ec0267Sriastradh  * 0 on success or error code otherwise
575efa246c0Sriastradh  */
amdgpu_irq_put(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type)576efa246c0Sriastradh int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
577efa246c0Sriastradh 		   unsigned type)
578efa246c0Sriastradh {
579efa246c0Sriastradh 	if (!adev->ddev->irq_enabled)
580efa246c0Sriastradh 		return -ENOENT;
581efa246c0Sriastradh 
582efa246c0Sriastradh 	if (type >= src->num_types)
583efa246c0Sriastradh 		return -EINVAL;
584efa246c0Sriastradh 
585efa246c0Sriastradh 	if (!src->enabled_types || !src->funcs->set)
586efa246c0Sriastradh 		return -EINVAL;
587efa246c0Sriastradh 
588efa246c0Sriastradh 	if (atomic_dec_and_test(&src->enabled_types[type]))
589efa246c0Sriastradh 		return amdgpu_irq_update(adev, src, type);
590efa246c0Sriastradh 
591efa246c0Sriastradh 	return 0;
592efa246c0Sriastradh }
593efa246c0Sriastradh 
594efa246c0Sriastradh /**
59541ec0267Sriastradh  * amdgpu_irq_enabled - check whether interrupt is enabled or not
596efa246c0Sriastradh  *
597efa246c0Sriastradh  * @adev: amdgpu device pointer
59841ec0267Sriastradh  * @src: interrupt source pointer
59941ec0267Sriastradh  * @type: type of interrupt
600efa246c0Sriastradh  *
60141ec0267Sriastradh  * Checks whether the given type of interrupt is enabled on the given source.
60241ec0267Sriastradh  *
60341ec0267Sriastradh  * Returns:
60441ec0267Sriastradh  * *true* if interrupt is enabled, *false* if interrupt is disabled or on
60541ec0267Sriastradh  * invalid parameters
606efa246c0Sriastradh  */
amdgpu_irq_enabled(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type)607efa246c0Sriastradh bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
608efa246c0Sriastradh 			unsigned type)
609efa246c0Sriastradh {
610efa246c0Sriastradh 	if (!adev->ddev->irq_enabled)
611efa246c0Sriastradh 		return false;
612efa246c0Sriastradh 
613efa246c0Sriastradh 	if (type >= src->num_types)
614efa246c0Sriastradh 		return false;
615efa246c0Sriastradh 
616efa246c0Sriastradh 	if (!src->enabled_types || !src->funcs->set)
617efa246c0Sriastradh 		return false;
618efa246c0Sriastradh 
619efa246c0Sriastradh 	return !!atomic_read(&src->enabled_types[type]);
620efa246c0Sriastradh }
62141ec0267Sriastradh 
622cc07d396Sriastradh #ifndef __NetBSD__		/* XXX amdgpu irq domain */
6230caae222Sriastradh 
62441ec0267Sriastradh /* XXX: Generic IRQ handling */
amdgpu_irq_mask(struct irq_data * irqd)62541ec0267Sriastradh static void amdgpu_irq_mask(struct irq_data *irqd)
62641ec0267Sriastradh {
62741ec0267Sriastradh 	/* XXX */
62841ec0267Sriastradh }
62941ec0267Sriastradh 
amdgpu_irq_unmask(struct irq_data * irqd)63041ec0267Sriastradh static void amdgpu_irq_unmask(struct irq_data *irqd)
63141ec0267Sriastradh {
63241ec0267Sriastradh 	/* XXX */
63341ec0267Sriastradh }
63441ec0267Sriastradh 
63541ec0267Sriastradh /* amdgpu hardware interrupt chip descriptor */
63641ec0267Sriastradh static struct irq_chip amdgpu_irq_chip = {
63741ec0267Sriastradh 	.name = "amdgpu-ih",
63841ec0267Sriastradh 	.irq_mask = amdgpu_irq_mask,
63941ec0267Sriastradh 	.irq_unmask = amdgpu_irq_unmask,
64041ec0267Sriastradh };
64141ec0267Sriastradh 
64241ec0267Sriastradh /**
64341ec0267Sriastradh  * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
64441ec0267Sriastradh  *
64541ec0267Sriastradh  * @d: amdgpu IRQ domain pointer (unused)
64641ec0267Sriastradh  * @irq: virtual IRQ number
64741ec0267Sriastradh  * @hwirq: hardware irq number
64841ec0267Sriastradh  *
64941ec0267Sriastradh  * Current implementation assigns simple interrupt handler to the given virtual
65041ec0267Sriastradh  * IRQ.
65141ec0267Sriastradh  *
65241ec0267Sriastradh  * Returns:
65341ec0267Sriastradh  * 0 on success or error code otherwise
65441ec0267Sriastradh  */
amdgpu_irqdomain_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)65541ec0267Sriastradh static int amdgpu_irqdomain_map(struct irq_domain *d,
65641ec0267Sriastradh 				unsigned int irq, irq_hw_number_t hwirq)
65741ec0267Sriastradh {
65841ec0267Sriastradh 	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
65941ec0267Sriastradh 		return -EPERM;
66041ec0267Sriastradh 
66141ec0267Sriastradh 	irq_set_chip_and_handler(irq,
66241ec0267Sriastradh 				 &amdgpu_irq_chip, handle_simple_irq);
66341ec0267Sriastradh 	return 0;
66441ec0267Sriastradh }
66541ec0267Sriastradh 
66641ec0267Sriastradh /* Implementation of methods for amdgpu IRQ domain */
66741ec0267Sriastradh static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
66841ec0267Sriastradh 	.map = amdgpu_irqdomain_map,
66941ec0267Sriastradh };
67041ec0267Sriastradh 
671cc07d396Sriastradh #endif	/* __NetBSD__ */
672cc07d396Sriastradh 
67341ec0267Sriastradh /**
67441ec0267Sriastradh  * amdgpu_irq_add_domain - create a linear IRQ domain
67541ec0267Sriastradh  *
67641ec0267Sriastradh  * @adev: amdgpu device pointer
67741ec0267Sriastradh  *
67841ec0267Sriastradh  * Creates an IRQ domain for GPU interrupt sources
67941ec0267Sriastradh  * that may be driven by another driver (e.g., ACP).
68041ec0267Sriastradh  *
68141ec0267Sriastradh  * Returns:
68241ec0267Sriastradh  * 0 on success or error code otherwise
68341ec0267Sriastradh  */
amdgpu_irq_add_domain(struct amdgpu_device * adev)68441ec0267Sriastradh int amdgpu_irq_add_domain(struct amdgpu_device *adev)
68541ec0267Sriastradh {
686cc07d396Sriastradh #ifndef __NetBSD__		/* XXX amdgpu irq domain */
68741ec0267Sriastradh 	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
68841ec0267Sriastradh 						 &amdgpu_hw_irqdomain_ops, adev);
68941ec0267Sriastradh 	if (!adev->irq.domain) {
69041ec0267Sriastradh 		DRM_ERROR("GPU irq add domain failed\n");
69141ec0267Sriastradh 		return -ENODEV;
69241ec0267Sriastradh 	}
693cc07d396Sriastradh #endif
69441ec0267Sriastradh 
69541ec0267Sriastradh 	return 0;
69641ec0267Sriastradh }
69741ec0267Sriastradh 
69841ec0267Sriastradh /**
69941ec0267Sriastradh  * amdgpu_irq_remove_domain - remove the IRQ domain
70041ec0267Sriastradh  *
70141ec0267Sriastradh  * @adev: amdgpu device pointer
70241ec0267Sriastradh  *
70341ec0267Sriastradh  * Removes the IRQ domain for GPU interrupt sources
70441ec0267Sriastradh  * that may be driven by another driver (e.g., ACP).
70541ec0267Sriastradh  */
amdgpu_irq_remove_domain(struct amdgpu_device * adev)70641ec0267Sriastradh void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
70741ec0267Sriastradh {
708cc07d396Sriastradh #ifndef __NetBSD__		/* XXX amdgpu irq domain */
70941ec0267Sriastradh 	if (adev->irq.domain) {
71041ec0267Sriastradh 		irq_domain_remove(adev->irq.domain);
71141ec0267Sriastradh 		adev->irq.domain = NULL;
71241ec0267Sriastradh 	}
713cc07d396Sriastradh #endif
71441ec0267Sriastradh }
71541ec0267Sriastradh 
71641ec0267Sriastradh /**
71741ec0267Sriastradh  * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
71841ec0267Sriastradh  *
71941ec0267Sriastradh  * @adev: amdgpu device pointer
72041ec0267Sriastradh  * @src_id: IH source id
72141ec0267Sriastradh  *
72241ec0267Sriastradh  * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
72341ec0267Sriastradh  * Use this for components that generate a GPU interrupt, but are driven
72441ec0267Sriastradh  * by a different driver (e.g., ACP).
72541ec0267Sriastradh  *
72641ec0267Sriastradh  * Returns:
72741ec0267Sriastradh  * Linux IRQ
72841ec0267Sriastradh  */
amdgpu_irq_create_mapping(struct amdgpu_device * adev,unsigned src_id)72941ec0267Sriastradh unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
73041ec0267Sriastradh {
731cc07d396Sriastradh #ifdef __NetBSD__		/* XXX amdgpu irq domain */
732cc07d396Sriastradh 	return 0;
733cc07d396Sriastradh #else
73441ec0267Sriastradh 	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
73541ec0267Sriastradh 
73641ec0267Sriastradh 	return adev->irq.virq[src_id];
7370caae222Sriastradh #endif
738cc07d396Sriastradh }
739