xref: /netbsd-src/sys/arch/arm/rockchip/rk_drm.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /* $NetBSD: rk_drm.c,v 1.6 2021/04/28 04:51:41 mrg Exp $ */
2 
3 /*-
4  * Copyright (c) 2019 Jared D. McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: rk_drm.c,v 1.6 2021/04/28 04:51:41 mrg Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/device.h>
35 #include <sys/intr.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/conf.h>
39 
40 #include <uvm/uvm_extern.h>
41 #include <uvm/uvm_object.h>
42 #include <uvm/uvm_device.h>
43 
44 #include <drm/drmP.h>
45 #include <drm/drm_crtc_helper.h>
46 #include <drm/drm_fb_helper.h>
47 
48 #include <dev/fdt/fdtvar.h>
49 #include <dev/fdt/fdt_port.h>
50 
51 #include <arm/rockchip/rk_drm.h>
52 
53 #define	RK_DRM_MAX_WIDTH	3840
54 #define	RK_DRM_MAX_HEIGHT	2160
55 
56 static TAILQ_HEAD(, rk_drm_ports) rk_drm_ports =
57     TAILQ_HEAD_INITIALIZER(rk_drm_ports);
58 
59 static const struct device_compatible_entry compat_data[] = {
60 	{ .compat = "rockchip,display-subsystem" },
61 	DEVICE_COMPAT_EOL
62 };
63 
64 static const char * fb_compatible[] = {
65 	"simple-framebuffer",
66 	NULL
67 };
68 
69 static int	rk_drm_match(device_t, cfdata_t, void *);
70 static void	rk_drm_attach(device_t, device_t, void *);
71 
72 static void	rk_drm_init(device_t);
73 static vmem_t	*rk_drm_alloc_cma_pool(struct drm_device *, size_t);
74 
75 static int	rk_drm_set_busid(struct drm_device *, struct drm_master *);
76 
77 static uint32_t	rk_drm_get_vblank_counter(struct drm_device *, unsigned int);
78 static int	rk_drm_enable_vblank(struct drm_device *, unsigned int);
79 static void	rk_drm_disable_vblank(struct drm_device *, unsigned int);
80 
81 static int	rk_drm_load(struct drm_device *, unsigned long);
82 static int	rk_drm_unload(struct drm_device *);
83 
84 static struct drm_driver rk_drm_driver = {
85 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
86 	.dev_priv_size = 0,
87 	.load = rk_drm_load,
88 	.unload = rk_drm_unload,
89 
90 	.gem_free_object = drm_gem_cma_free_object,
91 	.mmap_object = drm_gem_or_legacy_mmap_object,
92 	.gem_uvm_ops = &drm_gem_cma_uvm_ops,
93 
94 	.dumb_create = drm_gem_cma_dumb_create,
95 	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
96 	.dumb_destroy = drm_gem_dumb_destroy,
97 
98 	.get_vblank_counter = rk_drm_get_vblank_counter,
99 	.enable_vblank = rk_drm_enable_vblank,
100 	.disable_vblank = rk_drm_disable_vblank,
101 
102 	.name = DRIVER_NAME,
103 	.desc = DRIVER_DESC,
104 	.date = DRIVER_DATE,
105 	.major = DRIVER_MAJOR,
106 	.minor = DRIVER_MINOR,
107 	.patchlevel = DRIVER_PATCHLEVEL,
108 
109 	.set_busid = rk_drm_set_busid,
110 };
111 
112 CFATTACH_DECL_NEW(rk_drm, sizeof(struct rk_drm_softc),
113 	rk_drm_match, rk_drm_attach, NULL, NULL);
114 
115 static int
116 rk_drm_match(device_t parent, cfdata_t cf, void *aux)
117 {
118 	struct fdt_attach_args * const faa = aux;
119 
120 	return of_compatible_match(faa->faa_phandle, compat_data);
121 }
122 
123 static void
124 rk_drm_attach(device_t parent, device_t self, void *aux)
125 {
126 	struct rk_drm_softc * const sc = device_private(self);
127 	struct fdt_attach_args * const faa = aux;
128 	struct drm_driver * const driver = &rk_drm_driver;
129 	prop_dictionary_t dict = device_properties(self);
130 	bool is_disabled;
131 
132 	sc->sc_dev = self;
133 	sc->sc_dmat = faa->faa_dmat;
134 	sc->sc_bst = faa->faa_bst;
135 	sc->sc_phandle = faa->faa_phandle;
136 
137 	aprint_naive("\n");
138 
139 	if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) && is_disabled) {
140 		aprint_normal(": (disabled)\n");
141 		return;
142 	}
143 
144 	aprint_normal("\n");
145 
146 	sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
147 	if (sc->sc_ddev == NULL) {
148 		aprint_error_dev(self, "couldn't allocate DRM device\n");
149 		return;
150 	}
151 	sc->sc_ddev->dev_private = sc;
152 	sc->sc_ddev->bst = sc->sc_bst;
153 	sc->sc_ddev->bus_dmat = sc->sc_dmat;
154 	sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
155 	sc->sc_ddev->dmat_subregion_p = false;
156 
157 	fdt_remove_bycompat(fb_compatible);
158 
159 	config_defer(self, rk_drm_init);
160 }
161 
162 static void
163 rk_drm_init(device_t dev)
164 {
165 	struct rk_drm_softc * const sc = device_private(dev);
166 	struct drm_driver * const driver = &rk_drm_driver;
167 	int error;
168 
169 	error = -drm_dev_register(sc->sc_ddev, 0);
170 	if (error) {
171 		drm_dev_unref(sc->sc_ddev);
172 		aprint_error_dev(dev, "couldn't register DRM device: %d\n",
173 		    error);
174 		return;
175 	}
176 
177 	aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
178 	    driver->name, driver->major, driver->minor, driver->patchlevel,
179 	    driver->date, sc->sc_ddev->primary->index);
180 }
181 
182 static vmem_t *
183 rk_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
184 {
185 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
186 	bus_dma_segment_t segs[1];
187 	int nsegs;
188 	int error;
189 
190 	error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
191 	    segs, 1, &nsegs, BUS_DMA_NOWAIT);
192 	if (error) {
193 		aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
194 		return NULL;
195 	}
196 
197 	return vmem_create("rkdrm", segs[0].ds_addr, segs[0].ds_len,
198 	    PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
199 }
200 
201 static int
202 rk_drm_set_busid(struct drm_device *ddev, struct drm_master *master)
203 {
204 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
205 	char id[32];
206 
207 	snprintf(id, sizeof(id), "platform:rk:%u", device_unit(sc->sc_dev));
208 
209 	master->unique = kzalloc(strlen(id) + 1, GFP_KERNEL);
210 	if (master->unique == NULL)
211 		return -ENOMEM;
212 	strcpy(master->unique, id);
213 	master->unique_len = strlen(master->unique);
214 
215 	return 0;
216 }
217 
218 static int
219 rk_drm_fb_create_handle(struct drm_framebuffer *fb,
220     struct drm_file *file, unsigned int *handle)
221 {
222 	struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
223 
224 	return drm_gem_handle_create(file, &sfb->obj->base, handle);
225 }
226 
227 static void
228 rk_drm_fb_destroy(struct drm_framebuffer *fb)
229 {
230 	struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(fb);
231 
232 	drm_framebuffer_cleanup(fb);
233 	drm_gem_object_unreference_unlocked(&sfb->obj->base);
234 	kmem_free(sfb, sizeof(*sfb));
235 }
236 
237 static const struct drm_framebuffer_funcs rk_drm_framebuffer_funcs = {
238 	.create_handle = rk_drm_fb_create_handle,
239 	.destroy = rk_drm_fb_destroy,
240 };
241 
242 static struct drm_framebuffer *
243 rk_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
244     struct drm_mode_fb_cmd2 *cmd)
245 {
246 	struct rk_drm_framebuffer *fb;
247 	struct drm_gem_object *gem_obj;
248 	int error;
249 
250 	if (cmd->flags)
251 		return NULL;
252 
253 	gem_obj = drm_gem_object_lookup(ddev, file, cmd->handles[0]);
254 	if (gem_obj == NULL)
255 		return NULL;
256 
257 	fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
258 	fb->obj = to_drm_gem_cma_obj(gem_obj);
259 	fb->base.pitches[0] = cmd->pitches[0];
260 	fb->base.pitches[1] = cmd->pitches[1];
261 	fb->base.pitches[2] = cmd->pitches[2];
262 	fb->base.offsets[0] = cmd->offsets[0];
263 	fb->base.offsets[1] = cmd->offsets[2];
264 	fb->base.offsets[2] = cmd->offsets[1];
265 	fb->base.width = cmd->width;
266 	fb->base.height = cmd->height;
267 	fb->base.pixel_format = cmd->pixel_format;
268 	fb->base.bits_per_pixel = drm_format_plane_cpp(fb->base.pixel_format, 0) * 8;
269 
270 	switch (fb->base.pixel_format) {
271 	case DRM_FORMAT_XRGB8888:
272 	case DRM_FORMAT_ARGB8888:
273 		fb->base.depth = 32;
274 		break;
275 	default:
276 		break;
277 	}
278 
279 	error = drm_framebuffer_init(ddev, &fb->base, &rk_drm_framebuffer_funcs);
280 	if (error != 0)
281 		goto dealloc;
282 
283 	return &fb->base;
284 
285 dealloc:
286 	drm_framebuffer_cleanup(&fb->base);
287 	kmem_free(fb, sizeof(*fb));
288 	drm_gem_object_unreference_unlocked(gem_obj);
289 
290 	return NULL;
291 }
292 
293 static struct drm_mode_config_funcs rk_drm_mode_config_funcs = {
294 	.fb_create = rk_drm_fb_create,
295 };
296 
297 static int
298 rk_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
299 {
300 	struct rk_drm_softc * const sc = rk_drm_private(helper->dev);
301 	struct drm_device *ddev = helper->dev;
302 	struct rk_drm_framebuffer *sfb = to_rk_drm_framebuffer(helper->fb);
303 	struct drm_framebuffer *fb = helper->fb;
304 	struct rk_drmfb_attach_args sfa;
305 	size_t cma_size;
306 	int error;
307 
308 	const u_int width = sizes->surface_width;
309 	const u_int height = sizes->surface_height;
310 	const u_int pitch = width * (32 / 8);
311 
312 	const size_t size = roundup(height * pitch, PAGE_SIZE);
313 
314 	/* Reserve enough memory for the FB console plus a 4K plane, rounded to 1MB */
315 	cma_size = size;
316 	cma_size += (RK_DRM_MAX_WIDTH * RK_DRM_MAX_HEIGHT * 4);
317 	cma_size = roundup(cma_size, 1024 * 1024);
318 	sc->sc_ddev->cma_pool = rk_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
319 	if (sc->sc_ddev->cma_pool != NULL)
320 		aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA\n",
321 		    (u_int)(cma_size / (1024 * 1024)));
322 
323 	sfb->obj = drm_gem_cma_create(ddev, size);
324 	if (sfb->obj == NULL) {
325 		DRM_ERROR("failed to allocate memory for framebuffer\n");
326 		return -ENOMEM;
327 	}
328 
329 	fb->pitches[0] = pitch;
330 	fb->offsets[0] = 0;
331 	fb->width = width;
332 	fb->height = height;
333 #ifdef __ARM_BIG_ENDIAN
334 	fb->pixel_format = DRM_FORMAT_BGRX8888;
335 #else
336 	fb->pixel_format = DRM_FORMAT_XRGB8888;
337 #endif
338 	drm_fb_get_bpp_depth(fb->pixel_format, &fb->depth, &fb->bits_per_pixel);
339 
340 	error = drm_framebuffer_init(ddev, fb, &rk_drm_framebuffer_funcs);
341 	if (error != 0) {
342 		DRM_ERROR("failed to initialize framebuffer\n");
343 		return error;
344 	}
345 
346 	memset(&sfa, 0, sizeof(sfa));
347 	sfa.sfa_drm_dev = ddev;
348 	sfa.sfa_fb_helper = helper;
349 	sfa.sfa_fb_sizes = *sizes;
350 	sfa.sfa_fb_bst = sc->sc_bst;
351 	sfa.sfa_fb_dmat = sc->sc_dmat;
352 	sfa.sfa_fb_linebytes = helper->fb->pitches[0];
353 
354 	helper->fbdev = config_found(ddev->dev, &sfa, NULL,
355 	    CFARG_IATTR, "rkfbbus",
356 	    CFARG_EOL);
357 	if (helper->fbdev == NULL) {
358 		DRM_ERROR("unable to attach framebuffer\n");
359 		return -ENXIO;
360 	}
361 
362 	return 0;
363 }
364 
365 static struct drm_fb_helper_funcs rk_drm_fb_helper_funcs = {
366 	.fb_probe = rk_drm_fb_probe,
367 };
368 
369 static int
370 rk_drm_load(struct drm_device *ddev, unsigned long flags)
371 {
372 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
373 	struct rk_drm_ports *sport;
374 	struct rk_drm_fbdev *fbdev;
375 	struct fdt_endpoint *ep;
376 	const u_int *data;
377 	int datalen, error, num_crtc, ep_index;
378 
379 	drm_mode_config_init(ddev);
380 	ddev->mode_config.min_width = 0;
381 	ddev->mode_config.min_height = 0;
382 	ddev->mode_config.max_width = RK_DRM_MAX_WIDTH;
383 	ddev->mode_config.max_height = RK_DRM_MAX_HEIGHT;
384 	ddev->mode_config.funcs = &rk_drm_mode_config_funcs;
385 
386 	num_crtc = 0;
387 	data = fdtbus_get_prop(sc->sc_phandle, "ports", &datalen);
388 	while (datalen >= 4) {
389 		const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
390 
391 		TAILQ_FOREACH(sport, &rk_drm_ports, entries)
392 			if (sport->phandle == crtc_phandle && sport->ddev == NULL) {
393 				sport->ddev = ddev;
394 				for (ep_index = 0; (ep = fdt_endpoint_get_from_index(sport->port, 0, ep_index)) != NULL; ep_index++) {
395 					error = fdt_endpoint_activate_direct(ep, true);
396 					if (error != 0)
397 						aprint_debug_dev(sc->sc_dev,
398 						    "failed to activate endpoint %d: %d\n",
399 						    ep_index, error);
400 				}
401 				num_crtc++;
402 			}
403 
404 		datalen -= 4;
405 		data++;
406 	}
407 
408 	if (num_crtc == 0) {
409 		aprint_error_dev(sc->sc_dev, "no display interface ports configured\n");
410 		error = ENXIO;
411 		goto drmerr;
412 	}
413 
414 	fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
415 
416 	drm_fb_helper_prepare(ddev, &fbdev->helper, &rk_drm_fb_helper_funcs);
417 
418 	error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc, num_crtc);
419 	if (error)
420 		goto allocerr;
421 
422 	fbdev->helper.fb = kmem_zalloc(sizeof(struct rk_drm_framebuffer), KM_SLEEP);
423 
424 	drm_fb_helper_single_add_all_connectors(&fbdev->helper);
425 
426 	drm_helper_disable_unused_functions(ddev);
427 
428 	drm_fb_helper_initial_config(&fbdev->helper, 32);
429 
430 	/* XXX */
431 	ddev->irq_enabled = true;
432 	drm_vblank_init(ddev, num_crtc);
433 
434 	return 0;
435 
436 allocerr:
437 	kmem_free(fbdev, sizeof(*fbdev));
438 drmerr:
439 	drm_mode_config_cleanup(ddev);
440 
441 	return error;
442 }
443 
444 static uint32_t
445 rk_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc)
446 {
447 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
448 
449 	if (crtc >= __arraycount(sc->sc_vbl))
450 		return 0;
451 
452 	if (sc->sc_vbl[crtc].get_vblank_counter == NULL)
453 		return 0;
454 
455 	return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv);
456 }
457 
458 static int
459 rk_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc)
460 {
461 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
462 
463 	if (crtc >= __arraycount(sc->sc_vbl))
464 		return 0;
465 
466 	if (sc->sc_vbl[crtc].enable_vblank == NULL)
467 		return 0;
468 
469 	sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv);
470 
471 	return 0;
472 }
473 
474 static void
475 rk_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc)
476 {
477 	struct rk_drm_softc * const sc = rk_drm_private(ddev);
478 
479 	if (crtc >= __arraycount(sc->sc_vbl))
480 		return;
481 
482 	if (sc->sc_vbl[crtc].disable_vblank == NULL)
483 		return;
484 
485 	sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv);
486 }
487 
488 static int
489 rk_drm_unload(struct drm_device *ddev)
490 {
491 	drm_mode_config_cleanup(ddev);
492 
493 	return 0;
494 }
495 
496 int
497 rk_drm_register_port(int phandle, struct fdt_device_ports *port)
498 {
499 	struct rk_drm_ports *sport;
500 
501 	sport = kmem_zalloc(sizeof(*sport), KM_SLEEP);
502 	sport->phandle = phandle;
503 	sport->port = port;
504 	sport->ddev = NULL;
505 	TAILQ_INSERT_TAIL(&rk_drm_ports, sport, entries);
506 
507 	return 0;
508 }
509 
510 struct drm_device *
511 rk_drm_port_device(struct fdt_device_ports *port)
512 {
513 	struct rk_drm_ports *sport;
514 
515 	TAILQ_FOREACH(sport, &rk_drm_ports, entries)
516 		if (sport->port == port)
517 			return sport->ddev;
518 
519 	return NULL;
520 }
521