xref: /netbsd-src/sys/arch/arm/sunxi/sunxi_drm.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /* $NetBSD: sunxi_drm.c,v 1.14 2021/04/24 23:36:28 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 2019 Jared D. McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: sunxi_drm.c,v 1.14 2021/04/24 23:36:28 thorpej Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/bus.h>
34 #include <sys/device.h>
35 #include <sys/intr.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/conf.h>
39 
40 #include <uvm/uvm_extern.h>
41 #include <uvm/uvm_object.h>
42 #include <uvm/uvm_device.h>
43 
44 #include <drm/drmP.h>
45 #include <drm/drm_crtc_helper.h>
46 #include <drm/drm_fb_helper.h>
47 
48 #include <dev/fdt/fdtvar.h>
49 #include <dev/fdt/fdt_port.h>
50 
51 #include <arm/sunxi/sunxi_drm.h>
52 
53 #define	SUNXI_DRM_MAX_WIDTH	3840
54 #define	SUNXI_DRM_MAX_HEIGHT	2160
55 
56 /*
57  * The DRM headers break trunc_page/round_page macros with a redefinition
58  * of PAGE_MASK. Use our own macros instead.
59  */
60 #define	SUNXI_PAGE_MASK		(PAGE_SIZE - 1)
61 #define	SUNXI_TRUNC_PAGE(x)	((x) & ~SUNXI_PAGE_MASK)
62 #define	SUNXI_ROUND_PAGE(x)	(((x) + SUNXI_PAGE_MASK) & ~SUNXI_PAGE_MASK)
63 
64 static TAILQ_HEAD(, sunxi_drm_endpoint) sunxi_drm_endpoints =
65     TAILQ_HEAD_INITIALIZER(sunxi_drm_endpoints);
66 
67 static const struct device_compatible_entry compat_data[] = {
68 	{ .compat = "allwinner,sun8i-h3-display-engine" },
69 	{ .compat = "allwinner,sun50i-a64-display-engine" },
70 	DEVICE_COMPAT_EOL
71 };
72 
73 static const char * fb_compatible[] = {
74 	"allwinner,simple-framebuffer",
75 	NULL
76 };
77 
78 static int	sunxi_drm_match(device_t, cfdata_t, void *);
79 static void	sunxi_drm_attach(device_t, device_t, void *);
80 
81 static void	sunxi_drm_init(device_t);
82 static vmem_t	*sunxi_drm_alloc_cma_pool(struct drm_device *, size_t);
83 
84 static int	sunxi_drm_set_busid(struct drm_device *, struct drm_master *);
85 
86 static uint32_t	sunxi_drm_get_vblank_counter(struct drm_device *, unsigned int);
87 static int	sunxi_drm_enable_vblank(struct drm_device *, unsigned int);
88 static void	sunxi_drm_disable_vblank(struct drm_device *, unsigned int);
89 
90 static int	sunxi_drm_load(struct drm_device *, unsigned long);
91 static int	sunxi_drm_unload(struct drm_device *);
92 
93 static struct drm_driver sunxi_drm_driver = {
94 	.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
95 	.dev_priv_size = 0,
96 	.load = sunxi_drm_load,
97 	.unload = sunxi_drm_unload,
98 
99 	.gem_free_object = drm_gem_cma_free_object,
100 	.mmap_object = drm_gem_or_legacy_mmap_object,
101 	.gem_uvm_ops = &drm_gem_cma_uvm_ops,
102 
103 	.dumb_create = drm_gem_cma_dumb_create,
104 	.dumb_map_offset = drm_gem_cma_dumb_map_offset,
105 	.dumb_destroy = drm_gem_dumb_destroy,
106 
107 	.get_vblank_counter = sunxi_drm_get_vblank_counter,
108 	.enable_vblank = sunxi_drm_enable_vblank,
109 	.disable_vblank = sunxi_drm_disable_vblank,
110 
111 	.name = DRIVER_NAME,
112 	.desc = DRIVER_DESC,
113 	.date = DRIVER_DATE,
114 	.major = DRIVER_MAJOR,
115 	.minor = DRIVER_MINOR,
116 	.patchlevel = DRIVER_PATCHLEVEL,
117 
118 	.set_busid = sunxi_drm_set_busid,
119 };
120 
121 CFATTACH_DECL_NEW(sunxi_drm, sizeof(struct sunxi_drm_softc),
122 	sunxi_drm_match, sunxi_drm_attach, NULL, NULL);
123 
124 static int
125 sunxi_drm_match(device_t parent, cfdata_t cf, void *aux)
126 {
127 	struct fdt_attach_args * const faa = aux;
128 
129 	return of_compatible_match(faa->faa_phandle, compat_data);
130 }
131 
132 static void
133 sunxi_drm_attach(device_t parent, device_t self, void *aux)
134 {
135 	struct sunxi_drm_softc * const sc = device_private(self);
136 	struct fdt_attach_args * const faa = aux;
137 	struct drm_driver * const driver = &sunxi_drm_driver;
138 	prop_dictionary_t dict = device_properties(self);
139 	bool is_disabled;
140 
141 	sc->sc_dev = self;
142 	sc->sc_dmat = faa->faa_dmat;
143 	sc->sc_bst = faa->faa_bst;
144 	sc->sc_phandle = faa->faa_phandle;
145 
146 	aprint_naive("\n");
147 
148 	if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) && is_disabled) {
149 		aprint_normal(": Display Engine Pipeline (disabled)\n");
150 		return;
151 	}
152 
153 	aprint_normal(": Display Engine Pipeline\n");
154 
155 	sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
156 	if (sc->sc_ddev == NULL) {
157 		aprint_error_dev(self, "couldn't allocate DRM device\n");
158 		return;
159 	}
160 	sc->sc_ddev->dev_private = sc;
161 	sc->sc_ddev->bst = sc->sc_bst;
162 	sc->sc_ddev->bus_dmat = sc->sc_dmat;
163 	sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
164 	sc->sc_ddev->dmat_subregion_p = false;
165 
166 	fdt_remove_bycompat(fb_compatible);
167 
168 	config_defer(self, sunxi_drm_init);
169 }
170 
171 static void
172 sunxi_drm_init(device_t dev)
173 {
174 	struct sunxi_drm_softc * const sc = device_private(dev);
175 	struct drm_driver * const driver = &sunxi_drm_driver;
176 	int error;
177 
178 	error = -drm_dev_register(sc->sc_ddev, 0);
179 	if (error) {
180 		drm_dev_unref(sc->sc_ddev);
181 		aprint_error_dev(dev, "couldn't register DRM device: %d\n",
182 		    error);
183 		return;
184 	}
185 
186 	aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
187 	    driver->name, driver->major, driver->minor, driver->patchlevel,
188 	    driver->date, sc->sc_ddev->primary->index);
189 }
190 
191 static vmem_t *
192 sunxi_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
193 {
194 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
195 	bus_dma_segment_t segs[1];
196 	int nsegs;
197 	int error;
198 
199 	error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
200 	    segs, 1, &nsegs, BUS_DMA_NOWAIT);
201 	if (error) {
202 		aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
203 		return NULL;
204 	}
205 
206 	return vmem_create("sunxidrm", segs[0].ds_addr, segs[0].ds_len,
207 	    PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
208 }
209 
210 static int
211 sunxi_drm_set_busid(struct drm_device *ddev, struct drm_master *master)
212 {
213 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
214 	char id[32];
215 
216 	snprintf(id, sizeof(id), "platform:sunxi:%u", device_unit(sc->sc_dev));
217 
218 	master->unique = kzalloc(strlen(id) + 1, GFP_KERNEL);
219 	if (master->unique == NULL)
220 		return -ENOMEM;
221 	strcpy(master->unique, id);
222 	master->unique_len = strlen(master->unique);
223 
224 	return 0;
225 }
226 
227 static int
228 sunxi_drm_fb_create_handle(struct drm_framebuffer *fb,
229     struct drm_file *file, unsigned int *handle)
230 {
231 	struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
232 
233 	return drm_gem_handle_create(file, &sfb->obj->base, handle);
234 }
235 
236 static void
237 sunxi_drm_fb_destroy(struct drm_framebuffer *fb)
238 {
239 	struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
240 
241 	drm_framebuffer_cleanup(fb);
242 	drm_gem_object_unreference_unlocked(&sfb->obj->base);
243 	kmem_free(sfb, sizeof(*sfb));
244 }
245 
246 static const struct drm_framebuffer_funcs sunxi_drm_framebuffer_funcs = {
247 	.create_handle = sunxi_drm_fb_create_handle,
248 	.destroy = sunxi_drm_fb_destroy,
249 };
250 
251 static struct drm_framebuffer *
252 sunxi_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
253     struct drm_mode_fb_cmd2 *cmd)
254 {
255 	struct sunxi_drm_framebuffer *fb;
256 	struct drm_gem_object *gem_obj;
257 	int error;
258 
259 	if (cmd->flags)
260 		return NULL;
261 
262 	gem_obj = drm_gem_object_lookup(ddev, file, cmd->handles[0]);
263 	if (gem_obj == NULL)
264 		return NULL;
265 
266 	fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
267 	fb->obj = to_drm_gem_cma_obj(gem_obj);
268 	fb->base.pitches[0] = cmd->pitches[0];
269 	fb->base.pitches[1] = cmd->pitches[1];
270 	fb->base.pitches[2] = cmd->pitches[2];
271 	fb->base.offsets[0] = cmd->offsets[0];
272 	fb->base.offsets[1] = cmd->offsets[2];
273 	fb->base.offsets[2] = cmd->offsets[1];
274 	fb->base.width = cmd->width;
275 	fb->base.height = cmd->height;
276 	fb->base.pixel_format = cmd->pixel_format;
277 	fb->base.bits_per_pixel = drm_format_plane_cpp(fb->base.pixel_format, 0) * 8;
278 
279 	switch (fb->base.pixel_format) {
280 	case DRM_FORMAT_XRGB8888:
281 	case DRM_FORMAT_ARGB8888:
282 		fb->base.depth = 32;
283 		break;
284 	default:
285 		break;
286 	}
287 
288 	error = drm_framebuffer_init(ddev, &fb->base, &sunxi_drm_framebuffer_funcs);
289 	if (error != 0)
290 		goto dealloc;
291 
292 	return &fb->base;
293 
294 dealloc:
295 	drm_framebuffer_cleanup(&fb->base);
296 	kmem_free(fb, sizeof(*fb));
297 	drm_gem_object_unreference_unlocked(gem_obj);
298 
299 	return NULL;
300 }
301 
302 static struct drm_mode_config_funcs sunxi_drm_mode_config_funcs = {
303 	.fb_create = sunxi_drm_fb_create,
304 };
305 
306 static int
307 sunxi_drm_simplefb_lookup(bus_addr_t *paddr, bus_size_t *psize)
308 {
309 	static const struct device_compatible_entry simplefb_compat[] = {
310 		{ .compat = "simple-framebuffer" },
311 		DEVICE_COMPAT_EOL
312 	};
313 	int chosen, child, error;
314 	bus_addr_t addr_end;
315 
316 	chosen = OF_finddevice("/chosen");
317 	if (chosen == -1)
318 		return ENOENT;
319 
320 	for (child = OF_child(chosen); child; child = OF_peer(child)) {
321 		if (!fdtbus_status_okay(child))
322 			continue;
323 		if (!of_compatible_match(child, simplefb_compat))
324 			continue;
325 		error = fdtbus_get_reg(child, 0, paddr, psize);
326 		if (error != 0)
327 			return error;
328 
329 		/* Reclaim entire pages used by the simplefb */
330 		addr_end = *paddr + *psize;
331 		*paddr = SUNXI_TRUNC_PAGE(*paddr);
332 		*psize = SUNXI_ROUND_PAGE(addr_end - *paddr);
333 		return 0;
334 	}
335 
336 	return ENOENT;
337 }
338 
339 static int
340 sunxi_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
341 {
342 	struct sunxi_drm_softc * const sc = sunxi_drm_private(helper->dev);
343 	struct drm_device *ddev = helper->dev;
344 	struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(helper->fb);
345 	struct drm_framebuffer *fb = helper->fb;
346 	struct sunxi_drmfb_attach_args sfa;
347 	bus_addr_t sfb_addr;
348 	bus_size_t sfb_size;
349 	size_t cma_size;
350 	int error;
351 
352 	const u_int width = sizes->surface_width;
353 	const u_int height = sizes->surface_height;
354 	const u_int pitch = width * (32 / 8);
355 
356 	const size_t size = roundup(height * pitch, PAGE_SIZE);
357 
358 	if (sunxi_drm_simplefb_lookup(&sfb_addr, &sfb_size) != 0)
359 		sfb_size = 0;
360 
361 	/* Reserve enough memory for a 4K plane, rounded to 1MB */
362 	cma_size = (SUNXI_DRM_MAX_WIDTH * SUNXI_DRM_MAX_HEIGHT * 4);
363 	if (sfb_size == 0) {
364 		/* Add memory for FB console if we cannot reclaim bootloader memory */
365 		cma_size += size;
366 	}
367 	cma_size = roundup(cma_size, 1024 * 1024);
368 	sc->sc_ddev->cma_pool = sunxi_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
369 	if (sc->sc_ddev->cma_pool != NULL) {
370 		if (sfb_size != 0) {
371 			error = vmem_add(sc->sc_ddev->cma_pool, sfb_addr,
372 			    sfb_size, VM_SLEEP);
373 			if (error != 0)
374 				sfb_size = 0;
375 		}
376 		aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA",
377 		    (u_int)((cma_size + sfb_size) / (1024 * 1024)));
378 		if (sfb_size != 0)
379 			aprint_normal(" (%u MB reclaimed from bootloader)",
380 			    (u_int)(sfb_size / (1024 * 1024)));
381 		aprint_normal("\n");
382 	}
383 
384 	sfb->obj = drm_gem_cma_create(ddev, size);
385 	if (sfb->obj == NULL) {
386 		DRM_ERROR("failed to allocate memory for framebuffer\n");
387 		return -ENOMEM;
388 	}
389 
390 	fb->pitches[0] = pitch;
391 	fb->offsets[0] = 0;
392 	fb->width = width;
393 	fb->height = height;
394 	fb->pixel_format = DRM_FORMAT_XRGB8888;
395 	drm_fb_get_bpp_depth(fb->pixel_format, &fb->depth, &fb->bits_per_pixel);
396 
397 	error = drm_framebuffer_init(ddev, fb, &sunxi_drm_framebuffer_funcs);
398 	if (error != 0) {
399 		DRM_ERROR("failed to initialize framebuffer\n");
400 		return error;
401 	}
402 
403 	memset(&sfa, 0, sizeof(sfa));
404 	sfa.sfa_drm_dev = ddev;
405 	sfa.sfa_fb_helper = helper;
406 	sfa.sfa_fb_sizes = *sizes;
407 	sfa.sfa_fb_bst = sc->sc_bst;
408 	sfa.sfa_fb_dmat = sc->sc_dmat;
409 	sfa.sfa_fb_linebytes = helper->fb->pitches[0];
410 
411 	helper->fbdev = config_found(ddev->dev, &sfa, NULL,
412 	    CFARG_IATTR, "sunxifbbus",
413 	    CFARG_EOL);
414 	if (helper->fbdev == NULL) {
415 		DRM_ERROR("unable to attach framebuffer\n");
416 		return -ENXIO;
417 	}
418 
419 	return 0;
420 }
421 
422 static struct drm_fb_helper_funcs sunxi_drm_fb_helper_funcs = {
423 	.fb_probe = sunxi_drm_fb_probe,
424 };
425 
426 static int
427 sunxi_drm_load(struct drm_device *ddev, unsigned long flags)
428 {
429 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
430 	struct sunxi_drm_endpoint *sep;
431 	struct sunxi_drm_fbdev *fbdev;
432 	const u_int *data;
433 	int datalen, error, num_crtc;
434 
435 	drm_mode_config_init(ddev);
436 	ddev->mode_config.min_width = 0;
437 	ddev->mode_config.min_height = 0;
438 	ddev->mode_config.max_width = SUNXI_DRM_MAX_WIDTH;
439 	ddev->mode_config.max_height = SUNXI_DRM_MAX_HEIGHT;
440 	ddev->mode_config.funcs = &sunxi_drm_mode_config_funcs;
441 
442 	num_crtc = 0;
443 	data = fdtbus_get_prop(sc->sc_phandle, "allwinner,pipelines", &datalen);
444 	while (datalen >= 4) {
445 		const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
446 
447 		TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
448 			if (sep->phandle == crtc_phandle && sep->ddev == NULL) {
449 				sep->ddev = ddev;
450 				error = fdt_endpoint_activate_direct(sep->ep, true);
451 				if (error != 0) {
452 					aprint_error_dev(sc->sc_dev, "failed to activate endpoint: %d\n",
453 					    error);
454 				}
455 				if (fdt_endpoint_type(sep->ep) == EP_DRM_CRTC)
456 					num_crtc++;
457 			}
458 
459 		datalen -= 4;
460 		data++;
461 	}
462 
463 	if (num_crtc == 0) {
464 		aprint_error_dev(sc->sc_dev, "no pipelines configured\n");
465 		error = ENXIO;
466 		goto drmerr;
467 	}
468 
469 	fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
470 
471 	drm_fb_helper_prepare(ddev, &fbdev->helper, &sunxi_drm_fb_helper_funcs);
472 
473 	error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc, num_crtc);
474 	if (error)
475 		goto allocerr;
476 
477 	fbdev->helper.fb = kmem_zalloc(sizeof(struct sunxi_drm_framebuffer), KM_SLEEP);
478 
479 	drm_fb_helper_single_add_all_connectors(&fbdev->helper);
480 
481 	drm_helper_disable_unused_functions(ddev);
482 
483 	drm_fb_helper_initial_config(&fbdev->helper, 32);
484 
485 	/* XXX */
486 	ddev->irq_enabled = true;
487 	drm_vblank_init(ddev, num_crtc);
488 
489 	return 0;
490 
491 allocerr:
492 	kmem_free(fbdev, sizeof(*fbdev));
493 drmerr:
494 	drm_mode_config_cleanup(ddev);
495 
496 	return error;
497 }
498 
499 static uint32_t
500 sunxi_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc)
501 {
502 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
503 
504 	if (crtc >= __arraycount(sc->sc_vbl))
505 		return 0;
506 
507 	if (sc->sc_vbl[crtc].get_vblank_counter == NULL)
508 		return 0;
509 
510 	return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv);
511 }
512 
513 static int
514 sunxi_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc)
515 {
516 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
517 
518 	if (crtc >= __arraycount(sc->sc_vbl))
519 		return 0;
520 
521 	if (sc->sc_vbl[crtc].enable_vblank == NULL)
522 		return 0;
523 
524 	sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv);
525 
526 	return 0;
527 }
528 
529 static void
530 sunxi_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc)
531 {
532 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
533 
534 	if (crtc >= __arraycount(sc->sc_vbl))
535 		return;
536 
537 	if (sc->sc_vbl[crtc].disable_vblank == NULL)
538 		return;
539 
540 	sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv);
541 }
542 
543 static int
544 sunxi_drm_unload(struct drm_device *ddev)
545 {
546 	drm_mode_config_cleanup(ddev);
547 
548 	return 0;
549 }
550 
551 int
552 sunxi_drm_register_endpoint(int phandle, struct fdt_endpoint *ep)
553 {
554 	struct sunxi_drm_endpoint *sep;
555 
556 	sep = kmem_zalloc(sizeof(*sep), KM_SLEEP);
557 	sep->phandle = phandle;
558 	sep->ep = ep;
559 	sep->ddev = NULL;
560 	TAILQ_INSERT_TAIL(&sunxi_drm_endpoints, sep, entries);
561 
562 	return 0;
563 }
564 
565 struct drm_device *
566 sunxi_drm_endpoint_device(struct fdt_endpoint *ep)
567 {
568 	struct sunxi_drm_endpoint *sep;
569 
570 	TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
571 		if (sep->ep == ep)
572 			return sep->ddev;
573 
574 	return NULL;
575 }
576