xref: /netbsd-src/sys/arch/arm/nvidia/tegra_drm.c (revision bba6ca2d0d35add2de762c1e03f18e5ac1b1f92d)
1 /* $NetBSD: tegra_drm.c,v 1.16 2022/04/21 21:22:25 andvar Exp $ */
2 
3 /*-
4  * Copyright (c) 2015 Jared D. McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: tegra_drm.c,v 1.16 2022/04/21 21:22:25 andvar Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/device.h>
35 #include <sys/intr.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/conf.h>
39 
40 #include <uvm/uvm_extern.h>
41 #include <uvm/uvm_device.h>
42 
43 #include <drm/drm_drv.h>
44 #include <drm/drm_encoder.h>
45 
46 #include <arm/nvidia/tegra_reg.h>
47 #include <arm/nvidia/tegra_var.h>
48 #include <arm/nvidia/tegra_drm.h>
49 
50 #include <dev/fdt/fdtvar.h>
51 
52 static int	tegra_drm_match(device_t, cfdata_t, void *);
53 static void	tegra_drm_attach(device_t, device_t, void *);
54 
55 static int	tegra_drm_load(struct drm_device *, unsigned long);
56 static void	tegra_drm_unload(struct drm_device *);
57 
58 static void	tegra_drm_task_work(struct work *, void *);
59 
60 static struct drm_driver tegra_drm_driver = {
61 	.driver_features = DRIVER_MODESET | DRIVER_GEM,
62 	.dev_priv_size = 0,
63 	.load = tegra_drm_load,
64 	.unload = tegra_drm_unload,
65 
66 	.gem_free_object = drm_gem_cma_free_object,
67 	.mmap_object = drm_gem_or_legacy_mmap_object,
68 	.gem_uvm_ops = &drm_gem_cma_uvm_ops,
69 
70 	.dumb_create = drm_gem_cma_dumb_create,
71 
72 	.get_vblank_counter = tegra_drm_get_vblank_counter,
73 	.enable_vblank = tegra_drm_enable_vblank,
74 	.disable_vblank = tegra_drm_disable_vblank,
75 
76 	.name = DRIVER_NAME,
77 	.desc = DRIVER_DESC,
78 	.date = DRIVER_DATE,
79 	.major = DRIVER_MAJOR,
80 	.minor = DRIVER_MINOR,
81 	.patchlevel = DRIVER_PATCHLEVEL,
82 };
83 
84 CFATTACH_DECL_NEW(tegra_drm, sizeof(struct tegra_drm_softc),
85 	tegra_drm_match, tegra_drm_attach, NULL, NULL);
86 
87 static const struct device_compatible_entry compat_data[] = {
88 	{ .compat = "nvidia,tegra124-host1x" },
89 	DEVICE_COMPAT_EOL
90 };
91 
92 static int
tegra_drm_match(device_t parent,cfdata_t cf,void * aux)93 tegra_drm_match(device_t parent, cfdata_t cf, void *aux)
94 {
95 	struct fdt_attach_args * const faa = aux;
96 
97 	return of_compatible_match(faa->faa_phandle, compat_data);
98 }
99 
100 static const struct device_compatible_entry hdmi_compat[] = {
101 	{ .compat = "nvidia,tegra124-hdmi" },
102 	DEVICE_COMPAT_EOL
103 };
104 
105 static const struct device_compatible_entry dc_compat[] = {
106 	{ .compat = "nvidia,tegra124-dc" },
107 	DEVICE_COMPAT_EOL
108 };
109 
110 static void
tegra_drm_attach(device_t parent,device_t self,void * aux)111 tegra_drm_attach(device_t parent, device_t self, void *aux)
112 {
113 	struct tegra_drm_softc * const sc = device_private(self);
114 	struct fdt_attach_args * const faa = aux;
115 	struct drm_driver * const driver = &tegra_drm_driver;
116 	prop_dictionary_t prop = device_properties(self);
117 	int error, node, hdmi_phandle, ddc_phandle;
118 	static const char * const hdmi_supplies[] = {
119 		"hdmi-supply", "pll-supply", "vdd-supply"
120 	};
121 	struct fdtbus_regulator *reg;
122 	u_int n, ndc;
123 
124 	sc->sc_dev = self;
125 	sc->sc_dmat = faa->faa_dmat;
126 	sc->sc_bst = faa->faa_bst;
127 	sc->sc_phandle = faa->faa_phandle;
128 	sc->sc_task_thread = NULL;
129 	SIMPLEQ_INIT(&sc->sc_tasks);
130 	if (workqueue_create(&sc->sc_task_wq, "tegradrm",
131 	    &tegra_drm_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE)) {
132 		aprint_error_dev(self, "unable to create workqueue\n");
133 		sc->sc_task_wq = NULL;
134 		return;
135 	}
136 
137 	aprint_naive("\n");
138 	aprint_normal("\n");
139 
140 	sc->sc_clk_host1x = fdtbus_clock_get_index(faa->faa_phandle, 0);
141 	if (sc->sc_clk_host1x == NULL) {
142 		aprint_error_dev(self, "couldn't get clock host1x\n");
143 		return;
144 	}
145 	sc->sc_rst_host1x = fdtbus_reset_get(faa->faa_phandle, "host1x");
146 	if (sc->sc_clk_host1x == NULL || sc->sc_rst_host1x == NULL) {
147 		aprint_error_dev(self, "couldn't get reset host1x\n");
148 		return;
149 	}
150 
151 	ndc = 0;
152 	hdmi_phandle = -1;
153 	for (node = OF_child(faa->faa_phandle); node; node = OF_peer(node)) {
154 		if (of_compatible_match(node, hdmi_compat)) {
155 			sc->sc_clk_hdmi = fdtbus_clock_get(node, "hdmi");
156 			sc->sc_clk_hdmi_parent = fdtbus_clock_get(node,
157 			    "parent");
158 			sc->sc_rst_hdmi = fdtbus_reset_get(node, "hdmi");
159 			hdmi_phandle = node;
160 		} else if (of_compatible_match(node, dc_compat) &&
161 			   ndc < __arraycount(sc->sc_clk_dc)) {
162 			sc->sc_clk_dc[ndc] = fdtbus_clock_get(node, "dc");
163 			sc->sc_clk_dc_parent[ndc] = fdtbus_clock_get(node,
164 			    "parent");
165 			sc->sc_rst_dc[ndc] = fdtbus_reset_get(node, "dc");
166 			++ndc;
167 		}
168 	}
169 	if (hdmi_phandle >= 0) {
170 		ddc_phandle = fdtbus_get_phandle(hdmi_phandle,
171 		    "nvidia,ddc-i2c-bus");
172 		if (ddc_phandle >= 0) {
173 			sc->sc_ddc = fdtbus_i2c_get_tag(ddc_phandle);
174 		}
175 
176 		sc->sc_pin_hpd = fdtbus_gpio_acquire(hdmi_phandle,
177 		    "nvidia,hpd-gpio", GPIO_PIN_INPUT);
178 
179 		for (n = 0; n < __arraycount(hdmi_supplies); n++) {
180 			const char *supply = hdmi_supplies[n];
181 			reg = fdtbus_regulator_acquire(hdmi_phandle, supply);
182 			if (reg == NULL) {
183 				aprint_error_dev(self, "couldn't acquire %s\n",
184 				    supply);
185 				continue;
186 			}
187 			if (fdtbus_regulator_enable(reg) != 0) {
188 				aprint_error_dev(self, "couldn't enable %s\n",
189 				    supply);
190 			}
191 			fdtbus_regulator_release(reg);
192 		}
193 	}
194 
195 	fdtbus_reset_assert(sc->sc_rst_host1x);
196 	error = clk_enable(sc->sc_clk_host1x);
197 	if (error) {
198 		aprint_error_dev(self, "couldn't enable clock host1x: %d\n",
199 		    error);
200 		return;
201 	}
202 	fdtbus_reset_deassert(sc->sc_rst_host1x);
203 
204 	prop_dictionary_get_bool(prop, "force-dvi", &sc->sc_force_dvi);
205 
206 	sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
207 	if (IS_ERR(sc->sc_ddev)) {
208 		aprint_error_dev(self, "couldn't allocate DRM device\n");
209 		return;
210 	}
211 	sc->sc_ddev->dev_private = sc;
212 	sc->sc_ddev->bst = sc->sc_bst;
213 	sc->sc_ddev->bus_dmat = sc->sc_dmat;
214 	sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
215 	sc->sc_ddev->dmat_subregion_p = false;
216 
217 	/*
218 	 * Cause any tasks issued synchronously during attach to be
219 	 * processed at the end of this function.
220 	 */
221 	sc->sc_task_thread = curlwp;
222 
223 	error = -drm_dev_register(sc->sc_ddev, 0);
224 	if (error) {
225 		drm_dev_put(sc->sc_ddev);
226 		sc->sc_ddev = NULL;
227 		aprint_error_dev(self, "couldn't register DRM device: %d\n",
228 		    error);
229 		goto out;
230 	}
231 	sc->sc_dev_registered = true;
232 
233 	aprint_normal_dev(self, "initialized %s %d.%d.%d %s on minor %d\n",
234 	    driver->name, driver->major, driver->minor, driver->patchlevel,
235 	    driver->date, sc->sc_ddev->primary->index);
236 
237 	/*
238 	 * Process asynchronous tasks queued synchronously during
239 	 * attach.  This will be for display detection to attach a
240 	 * framebuffer, so we have the opportunity for a console device
241 	 * to attach before autoconf has completed, in time for init(8)
242 	 * to find that console without panicking.
243 	 */
244 	while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) {
245 		struct tegra_drm_task *const task =
246 		    SIMPLEQ_FIRST(&sc->sc_tasks);
247 
248 		SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, tdt_u.queue);
249 		(*task->tdt_fn)(task);
250 	}
251 
252 out:	/* Cause any subsequent tasks to be processed by the workqueue.  */
253 	atomic_store_relaxed(&sc->sc_task_thread, NULL);
254 }
255 
256 static int
tegra_drm_load(struct drm_device * ddev,unsigned long flags)257 tegra_drm_load(struct drm_device *ddev, unsigned long flags)
258 {
259 	int error;
260 
261 	error = tegra_drm_mode_init(ddev);
262 	if (error)
263 		goto drmerr;
264 
265 	error = tegra_drm_fb_init(ddev);
266 	if (error)
267 		goto drmerr;
268 
269 	return 0;
270 
271 drmerr:
272 	drm_mode_config_cleanup(ddev);
273 
274 	return error;
275 }
276 
277 static void
tegra_drm_unload(struct drm_device * ddev)278 tegra_drm_unload(struct drm_device *ddev)
279 {
280 
281 	drm_mode_config_cleanup(ddev);
282 }
283 
284 static void
tegra_drm_task_work(struct work * work,void * cookie)285 tegra_drm_task_work(struct work *work, void *cookie)
286 {
287 	struct tegra_drm_task *task = container_of(work, struct tegra_drm_task,
288 	    tdt_u.work);
289 
290 	(*task->tdt_fn)(task);
291 }
292 
293 void
tegra_task_init(struct tegra_drm_task * task,void (* fn)(struct tegra_drm_task *))294 tegra_task_init(struct tegra_drm_task *task,
295     void (*fn)(struct tegra_drm_task *))
296 {
297 
298 	task->tdt_fn = fn;
299 }
300 
301 void
tegra_task_schedule(device_t self,struct tegra_drm_task * task)302 tegra_task_schedule(device_t self, struct tegra_drm_task *task)
303 {
304 	struct tegra_drm_softc *sc = device_private(self);
305 
306 	if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp)
307 		SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, tdt_u.queue);
308 	else
309 		workqueue_enqueue(sc->sc_task_wq, &task->tdt_u.work, NULL);
310 }
311