1*602d3f06Sriastradh /* $NetBSD: amdgpu_pci.c,v 1.12 2023/08/07 16:34:47 riastradh Exp $ */
210f82c92Sriastradh
310f82c92Sriastradh /*-
410f82c92Sriastradh * Copyright (c) 2018 The NetBSD Foundation, Inc.
510f82c92Sriastradh * All rights reserved.
610f82c92Sriastradh *
710f82c92Sriastradh * This code is derived from software contributed to The NetBSD Foundation
810f82c92Sriastradh * by Taylor R. Campbell.
910f82c92Sriastradh *
1010f82c92Sriastradh * Redistribution and use in source and binary forms, with or without
1110f82c92Sriastradh * modification, are permitted provided that the following conditions
1210f82c92Sriastradh * are met:
1310f82c92Sriastradh * 1. Redistributions of source code must retain the above copyright
1410f82c92Sriastradh * notice, this list of conditions and the following disclaimer.
1510f82c92Sriastradh * 2. Redistributions in binary form must reproduce the above copyright
1610f82c92Sriastradh * notice, this list of conditions and the following disclaimer in the
1710f82c92Sriastradh * documentation and/or other materials provided with the distribution.
1810f82c92Sriastradh *
1910f82c92Sriastradh * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2010f82c92Sriastradh * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2110f82c92Sriastradh * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2210f82c92Sriastradh * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2310f82c92Sriastradh * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2410f82c92Sriastradh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2510f82c92Sriastradh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2610f82c92Sriastradh * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2710f82c92Sriastradh * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2810f82c92Sriastradh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2910f82c92Sriastradh * POSSIBILITY OF SUCH DAMAGE.
3010f82c92Sriastradh */
3110f82c92Sriastradh
3210f82c92Sriastradh #include <sys/cdefs.h>
33*602d3f06Sriastradh __KERNEL_RCSID(0, "$NetBSD: amdgpu_pci.c,v 1.12 2023/08/07 16:34:47 riastradh Exp $");
3410f82c92Sriastradh
3510f82c92Sriastradh #include <sys/types.h>
36eceb74bbSriastradh #include <sys/atomic.h>
3710f82c92Sriastradh #include <sys/queue.h>
3810f82c92Sriastradh #include <sys/systm.h>
3910f82c92Sriastradh #include <sys/workqueue.h>
4010f82c92Sriastradh
412b73d18aSriastradh #include <dev/pci/pcivar.h>
422b73d18aSriastradh
432b73d18aSriastradh #include <linux/pci.h>
442b73d18aSriastradh
452b73d18aSriastradh #include <drm/drm_device.h>
462b73d18aSriastradh #include <drm/drm_drv.h>
472b73d18aSriastradh #include <drm/drm_fb_helper.h>
48*602d3f06Sriastradh #include <drm/drm_ioctl.h>
492b73d18aSriastradh #include <drm/drm_pci.h>
502b73d18aSriastradh
5110f82c92Sriastradh #include <amdgpu.h>
5210f82c92Sriastradh #include "amdgpu_drv.h"
5310f82c92Sriastradh #include "amdgpu_task.h"
5410f82c92Sriastradh
55f21b21b0Sriastradh struct drm_device;
56f21b21b0Sriastradh
5710f82c92Sriastradh SIMPLEQ_HEAD(amdgpu_task_head, amdgpu_task);
5810f82c92Sriastradh
5910f82c92Sriastradh struct amdgpu_softc {
6010f82c92Sriastradh device_t sc_dev;
6110f82c92Sriastradh struct pci_attach_args sc_pa;
62eceb74bbSriastradh struct lwp *sc_task_thread;
63eceb74bbSriastradh struct amdgpu_task_head sc_tasks;
64eceb74bbSriastradh struct workqueue *sc_task_wq;
6510f82c92Sriastradh struct drm_device *sc_drm_dev;
6610f82c92Sriastradh struct pci_dev sc_pci_dev;
67daf16ae8Sriastradh bool sc_pci_attached;
68daf16ae8Sriastradh bool sc_dev_registered;
6910f82c92Sriastradh };
7010f82c92Sriastradh
7110f82c92Sriastradh static bool amdgpu_pci_lookup(const struct pci_attach_args *,
7210f82c92Sriastradh unsigned long *);
7310f82c92Sriastradh
7410f82c92Sriastradh static int amdgpu_match(device_t, cfdata_t, void *);
7510f82c92Sriastradh static void amdgpu_attach(device_t, device_t, void *);
7610f82c92Sriastradh static void amdgpu_attach_real(device_t);
7710f82c92Sriastradh static int amdgpu_detach(device_t, int);
7810f82c92Sriastradh static bool amdgpu_do_suspend(device_t, const pmf_qual_t *);
7910f82c92Sriastradh static bool amdgpu_do_resume(device_t, const pmf_qual_t *);
8010f82c92Sriastradh
8110f82c92Sriastradh static void amdgpu_task_work(struct work *, void *);
8210f82c92Sriastradh
8310f82c92Sriastradh CFATTACH_DECL_NEW(amdgpu, sizeof(struct amdgpu_softc),
8410f82c92Sriastradh amdgpu_match, amdgpu_attach, amdgpu_detach, NULL);
8510f82c92Sriastradh
8610f82c92Sriastradh /* XXX Kludge to get these from amdgpu_drv.c. */
8710f82c92Sriastradh extern struct drm_driver *const amdgpu_drm_driver;
8810f82c92Sriastradh extern const struct pci_device_id *const amdgpu_device_ids;
8910f82c92Sriastradh extern const size_t amdgpu_n_device_ids;
9010f82c92Sriastradh
9110f82c92Sriastradh static bool
amdgpu_pci_lookup(const struct pci_attach_args * pa,unsigned long * flags)9210f82c92Sriastradh amdgpu_pci_lookup(const struct pci_attach_args *pa, unsigned long *flags)
9310f82c92Sriastradh {
9410f82c92Sriastradh size_t i;
9510f82c92Sriastradh
9610f82c92Sriastradh for (i = 0; i < amdgpu_n_device_ids; i++) {
9710f82c92Sriastradh if ((PCI_VENDOR(pa->pa_id) == amdgpu_device_ids[i].vendor) &&
9810f82c92Sriastradh (PCI_PRODUCT(pa->pa_id) == amdgpu_device_ids[i].device))
9910f82c92Sriastradh break;
10010f82c92Sriastradh }
10110f82c92Sriastradh
10210f82c92Sriastradh /* Did we find it? */
10310f82c92Sriastradh if (i == amdgpu_n_device_ids)
10410f82c92Sriastradh return false;
10510f82c92Sriastradh
10610f82c92Sriastradh if (flags)
10710f82c92Sriastradh *flags = amdgpu_device_ids[i].driver_data;
10810f82c92Sriastradh return true;
10910f82c92Sriastradh }
11010f82c92Sriastradh
11110f82c92Sriastradh static int
amdgpu_match(device_t parent,cfdata_t match,void * aux)11210f82c92Sriastradh amdgpu_match(device_t parent, cfdata_t match, void *aux)
11310f82c92Sriastradh {
11410f82c92Sriastradh extern int amdgpu_guarantee_initialized(void);
11510f82c92Sriastradh const struct pci_attach_args *const pa = aux;
11610f82c92Sriastradh int error;
11710f82c92Sriastradh
11810f82c92Sriastradh error = amdgpu_guarantee_initialized();
11910f82c92Sriastradh if (error) {
12010f82c92Sriastradh aprint_error("amdgpu: failed to initialize: %d\n", error);
12110f82c92Sriastradh return 0;
12210f82c92Sriastradh }
12310f82c92Sriastradh
12410f82c92Sriastradh if (!amdgpu_pci_lookup(pa, NULL))
12510f82c92Sriastradh return 0;
12610f82c92Sriastradh
127dc1e1c45Sriastradh return 7; /* beat genfb_pci and radeon */
12810f82c92Sriastradh }
12910f82c92Sriastradh
13010f82c92Sriastradh static void
amdgpu_attach(device_t parent,device_t self,void * aux)13110f82c92Sriastradh amdgpu_attach(device_t parent, device_t self, void *aux)
13210f82c92Sriastradh {
13310f82c92Sriastradh struct amdgpu_softc *const sc = device_private(self);
13410f82c92Sriastradh const struct pci_attach_args *const pa = aux;
135eceb74bbSriastradh int error;
13610f82c92Sriastradh
13710f82c92Sriastradh pci_aprint_devinfo(pa, NULL);
13810f82c92Sriastradh
139eceb74bbSriastradh /* Initialize the Linux PCI device descriptor. */
140eceb74bbSriastradh linux_pci_dev_init(&sc->sc_pci_dev, self, device_parent(self), pa, 0);
141eceb74bbSriastradh
142eceb74bbSriastradh sc->sc_dev = self;
143eceb74bbSriastradh sc->sc_pa = *pa;
144eceb74bbSriastradh sc->sc_task_thread = NULL;
145eceb74bbSriastradh SIMPLEQ_INIT(&sc->sc_tasks);
146eceb74bbSriastradh error = workqueue_create(&sc->sc_task_wq, "amdgpufb",
147eceb74bbSriastradh &amdgpu_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE);
148eceb74bbSriastradh if (error) {
149eceb74bbSriastradh aprint_error_dev(self, "unable to create workqueue: %d\n",
150eceb74bbSriastradh error);
151eceb74bbSriastradh sc->sc_task_wq = NULL;
152eceb74bbSriastradh return;
153eceb74bbSriastradh }
15410f82c92Sriastradh
15510f82c92Sriastradh /*
156eceb74bbSriastradh * Defer the remainder of initialization until we have mounted
157eceb74bbSriastradh * the root file system and can load firmware images.
15810f82c92Sriastradh */
15910f82c92Sriastradh config_mountroot(self, &amdgpu_attach_real);
16010f82c92Sriastradh }
16110f82c92Sriastradh
16210f82c92Sriastradh static void
amdgpu_attach_real(device_t self)16310f82c92Sriastradh amdgpu_attach_real(device_t self)
16410f82c92Sriastradh {
16510f82c92Sriastradh struct amdgpu_softc *const sc = device_private(self);
16610f82c92Sriastradh const struct pci_attach_args *const pa = &sc->sc_pa;
16710f82c92Sriastradh bool ok __diagused;
16810f82c92Sriastradh unsigned long flags = 0; /* XXXGCC */
16910f82c92Sriastradh int error;
17010f82c92Sriastradh
17110f82c92Sriastradh ok = amdgpu_pci_lookup(pa, &flags);
17210f82c92Sriastradh KASSERT(ok);
17310f82c92Sriastradh
174eceb74bbSriastradh /*
175eceb74bbSriastradh * Cause any tasks issued synchronously during attach to be
176eceb74bbSriastradh * processed at the end of this function.
177eceb74bbSriastradh */
178eceb74bbSriastradh sc->sc_task_thread = curlwp;
179c31a21abSriastradh
180daf16ae8Sriastradh sc->sc_drm_dev = drm_dev_alloc(amdgpu_drm_driver, self);
181daf16ae8Sriastradh if (IS_ERR(sc->sc_drm_dev)) {
182daf16ae8Sriastradh aprint_error_dev(self, "unable to create drm device: %ld\n",
183daf16ae8Sriastradh PTR_ERR(sc->sc_drm_dev));
184daf16ae8Sriastradh sc->sc_drm_dev = NULL;
185daf16ae8Sriastradh goto out;
186daf16ae8Sriastradh }
187daf16ae8Sriastradh
18810f82c92Sriastradh /* XXX errno Linux->NetBSD */
1891cf7f83eSriastradh error = -drm_pci_attach(sc->sc_drm_dev, &sc->sc_pci_dev);
19010f82c92Sriastradh if (error) {
19110f82c92Sriastradh aprint_error_dev(self, "unable to attach drm: %d\n", error);
19210f82c92Sriastradh goto out;
19310f82c92Sriastradh }
194daf16ae8Sriastradh sc->sc_pci_attached = true;
195daf16ae8Sriastradh
196daf16ae8Sriastradh /* XXX errno Linux->NetBSD */
197daf16ae8Sriastradh error = -drm_dev_register(sc->sc_drm_dev, flags);
198daf16ae8Sriastradh if (error) {
199daf16ae8Sriastradh aprint_error_dev(self, "unable to register drm: %d\n", error);
200eceb74bbSriastradh goto out;
201daf16ae8Sriastradh }
202daf16ae8Sriastradh sc->sc_dev_registered = true;
20310f82c92Sriastradh
204eceb74bbSriastradh if (!pmf_device_register(self, &amdgpu_do_suspend, &amdgpu_do_resume))
205eceb74bbSriastradh aprint_error_dev(self, "unable to establish power handler\n");
20610f82c92Sriastradh
207eceb74bbSriastradh /*
208eceb74bbSriastradh * Process asynchronous tasks queued synchronously during
209eceb74bbSriastradh * attach. This will be for display detection to attach a
210eceb74bbSriastradh * framebuffer, so we have the opportunity for a console device
211eceb74bbSriastradh * to attach before autoconf has completed, in time for init(8)
212eceb74bbSriastradh * to find that console without panicking.
213eceb74bbSriastradh */
214eceb74bbSriastradh while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) {
215eceb74bbSriastradh struct amdgpu_task *const task = SIMPLEQ_FIRST(&sc->sc_tasks);
216eceb74bbSriastradh
217eceb74bbSriastradh SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, rt_u.queue);
21810f82c92Sriastradh (*task->rt_fn)(task);
21910f82c92Sriastradh }
22010f82c92Sriastradh
221eceb74bbSriastradh out: /* Cause any subesquent tasks to be processed by the workqueue. */
222eceb74bbSriastradh atomic_store_relaxed(&sc->sc_task_thread, NULL);
22310f82c92Sriastradh }
22410f82c92Sriastradh
22510f82c92Sriastradh static int
amdgpu_detach(device_t self,int flags)22610f82c92Sriastradh amdgpu_detach(device_t self, int flags)
22710f82c92Sriastradh {
22810f82c92Sriastradh struct amdgpu_softc *const sc = device_private(self);
22910f82c92Sriastradh int error;
23010f82c92Sriastradh
23110f82c92Sriastradh /* XXX Check for in-use before tearing it all down... */
23210f82c92Sriastradh error = config_detach_children(self, flags);
23310f82c92Sriastradh if (error)
23410f82c92Sriastradh return error;
23510f82c92Sriastradh
236eceb74bbSriastradh KASSERT(sc->sc_task_thread == NULL);
237eceb74bbSriastradh KASSERT(SIMPLEQ_EMPTY(&sc->sc_tasks));
23810f82c92Sriastradh
239eceb74bbSriastradh pmf_device_deregister(self);
240eceb74bbSriastradh if (sc->sc_dev_registered)
241daf16ae8Sriastradh drm_dev_unregister(sc->sc_drm_dev);
242eceb74bbSriastradh if (sc->sc_pci_attached)
243eceb74bbSriastradh drm_pci_detach(sc->sc_drm_dev);
244eceb74bbSriastradh if (sc->sc_drm_dev) {
245eceb74bbSriastradh drm_dev_put(sc->sc_drm_dev);
24610f82c92Sriastradh sc->sc_drm_dev = NULL;
247eceb74bbSriastradh }
248eceb74bbSriastradh if (sc->sc_task_wq) {
249eceb74bbSriastradh workqueue_destroy(sc->sc_task_wq);
250eceb74bbSriastradh sc->sc_task_wq = NULL;
251eceb74bbSriastradh }
252eceb74bbSriastradh linux_pci_dev_destroy(&sc->sc_pci_dev);
253eceb74bbSriastradh
25410f82c92Sriastradh return 0;
25510f82c92Sriastradh }
25610f82c92Sriastradh
25710f82c92Sriastradh static bool
amdgpu_do_suspend(device_t self,const pmf_qual_t * qual)25810f82c92Sriastradh amdgpu_do_suspend(device_t self, const pmf_qual_t *qual)
25910f82c92Sriastradh {
26010f82c92Sriastradh struct amdgpu_softc *const sc = device_private(self);
26110f82c92Sriastradh struct drm_device *const dev = sc->sc_drm_dev;
26210f82c92Sriastradh int ret;
26310f82c92Sriastradh
264*602d3f06Sriastradh drm_suspend_ioctl(dev);
265*602d3f06Sriastradh
2662b73d18aSriastradh ret = amdgpu_device_suspend(dev, /*fbcon*/true);
26710f82c92Sriastradh if (ret)
26810f82c92Sriastradh return false;
26910f82c92Sriastradh
27010f82c92Sriastradh return true;
27110f82c92Sriastradh }
27210f82c92Sriastradh
27310f82c92Sriastradh static bool
amdgpu_do_resume(device_t self,const pmf_qual_t * qual)27410f82c92Sriastradh amdgpu_do_resume(device_t self, const pmf_qual_t *qual)
27510f82c92Sriastradh {
27610f82c92Sriastradh struct amdgpu_softc *const sc = device_private(self);
27710f82c92Sriastradh struct drm_device *const dev = sc->sc_drm_dev;
27810f82c92Sriastradh int ret;
27910f82c92Sriastradh
2802b73d18aSriastradh ret = amdgpu_device_resume(dev, /*fbcon*/true);
28110f82c92Sriastradh if (ret)
282*602d3f06Sriastradh goto out;
28310f82c92Sriastradh
284*602d3f06Sriastradh out: drm_resume_ioctl(dev);
285*602d3f06Sriastradh return ret == 0;
28610f82c92Sriastradh }
28710f82c92Sriastradh
28810f82c92Sriastradh static void
amdgpu_task_work(struct work * work,void * cookie __unused)28910f82c92Sriastradh amdgpu_task_work(struct work *work, void *cookie __unused)
29010f82c92Sriastradh {
29110f82c92Sriastradh struct amdgpu_task *const task = container_of(work, struct amdgpu_task,
29210f82c92Sriastradh rt_u.work);
29310f82c92Sriastradh
29410f82c92Sriastradh (*task->rt_fn)(task);
29510f82c92Sriastradh }
29610f82c92Sriastradh
297a53f1117Sriastradh void
amdgpu_task_schedule(device_t self,struct amdgpu_task * task)29810f82c92Sriastradh amdgpu_task_schedule(device_t self, struct amdgpu_task *task)
29910f82c92Sriastradh {
30010f82c92Sriastradh struct amdgpu_softc *const sc = device_private(self);
30110f82c92Sriastradh
302eceb74bbSriastradh if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp)
303eceb74bbSriastradh SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, rt_u.queue);
304eceb74bbSriastradh else
305eceb74bbSriastradh workqueue_enqueue(sc->sc_task_wq, &task->rt_u.work, NULL);
30610f82c92Sriastradh }
307