xref: /netbsd-src/sys/arch/arm/sunxi/sunxi_drm.c (revision 9a06f9833c40e8cc26ed9722cb01712cbaac38a5)
1*9a06f983Sriastradh /* $NetBSD: sunxi_drm.c,v 1.26 2022/09/25 07:50:23 riastradh Exp $ */
2a9d03646Sjmcneill 
3a9d03646Sjmcneill /*-
4a9d03646Sjmcneill  * Copyright (c) 2019 Jared D. McNeill <jmcneill@invisible.ca>
5a9d03646Sjmcneill  * All rights reserved.
6a9d03646Sjmcneill  *
7a9d03646Sjmcneill  * Redistribution and use in source and binary forms, with or without
8a9d03646Sjmcneill  * modification, are permitted provided that the following conditions
9a9d03646Sjmcneill  * are met:
10a9d03646Sjmcneill  * 1. Redistributions of source code must retain the above copyright
11a9d03646Sjmcneill  *    notice, this list of conditions and the following disclaimer.
12a9d03646Sjmcneill  * 2. Redistributions in binary form must reproduce the above copyright
13a9d03646Sjmcneill  *    notice, this list of conditions and the following disclaimer in the
14a9d03646Sjmcneill  *    documentation and/or other materials provided with the distribution.
15a9d03646Sjmcneill  *
16a9d03646Sjmcneill  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17a9d03646Sjmcneill  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18a9d03646Sjmcneill  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19a9d03646Sjmcneill  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20a9d03646Sjmcneill  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21a9d03646Sjmcneill  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22a9d03646Sjmcneill  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23a9d03646Sjmcneill  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24a9d03646Sjmcneill  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25a9d03646Sjmcneill  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26a9d03646Sjmcneill  * SUCH DAMAGE.
27a9d03646Sjmcneill  */
28a9d03646Sjmcneill 
29a9d03646Sjmcneill #include <sys/cdefs.h>
30*9a06f983Sriastradh __KERNEL_RCSID(0, "$NetBSD: sunxi_drm.c,v 1.26 2022/09/25 07:50:23 riastradh Exp $");
31a9d03646Sjmcneill 
32a9d03646Sjmcneill #include <sys/param.h>
33a9d03646Sjmcneill #include <sys/bus.h>
34dd47db3eSriastradh #include <sys/conf.h>
35a9d03646Sjmcneill #include <sys/device.h>
36a9d03646Sjmcneill #include <sys/intr.h>
37a9d03646Sjmcneill #include <sys/kernel.h>
38dd47db3eSriastradh #include <sys/systm.h>
39a9d03646Sjmcneill 
40dd47db3eSriastradh #include <uvm/uvm_device.h>
41a9d03646Sjmcneill #include <uvm/uvm_extern.h>
42a9d03646Sjmcneill #include <uvm/uvm_object.h>
43dd47db3eSriastradh 
44dd47db3eSriastradh #include <dev/fdt/fdt_port.h>
45dd47db3eSriastradh #include <dev/fdt/fdtvar.h>
46dd47db3eSriastradh 
47dd47db3eSriastradh #include <arm/sunxi/sunxi_drm.h>
48a9d03646Sjmcneill 
493973e774Sriastradh #include <drm/drm_auth.h>
50a9d03646Sjmcneill #include <drm/drm_crtc_helper.h>
513973e774Sriastradh #include <drm/drm_drv.h>
52a9d03646Sjmcneill #include <drm/drm_fb_helper.h>
533973e774Sriastradh #include <drm/drm_fourcc.h>
543973e774Sriastradh #include <drm/drm_vblank.h>
55a9d03646Sjmcneill 
5629967b31Sjmcneill #define	SUNXI_DRM_MAX_WIDTH	3840
5729967b31Sjmcneill #define	SUNXI_DRM_MAX_HEIGHT	2160
5829967b31Sjmcneill 
598c4dd323Sjmcneill /*
608c4dd323Sjmcneill  * The DRM headers break trunc_page/round_page macros with a redefinition
618c4dd323Sjmcneill  * of PAGE_MASK. Use our own macros instead.
628c4dd323Sjmcneill  */
638c4dd323Sjmcneill #define	SUNXI_PAGE_MASK		(PAGE_SIZE - 1)
648c4dd323Sjmcneill #define	SUNXI_TRUNC_PAGE(x)	((x) & ~SUNXI_PAGE_MASK)
658c4dd323Sjmcneill #define	SUNXI_ROUND_PAGE(x)	(((x) + SUNXI_PAGE_MASK) & ~SUNXI_PAGE_MASK)
668c4dd323Sjmcneill 
67a9d03646Sjmcneill static TAILQ_HEAD(, sunxi_drm_endpoint) sunxi_drm_endpoints =
68a9d03646Sjmcneill     TAILQ_HEAD_INITIALIZER(sunxi_drm_endpoints);
69a9d03646Sjmcneill 
706e54367aSthorpej static const struct device_compatible_entry compat_data[] = {
716e54367aSthorpej 	{ .compat = "allwinner,sun8i-h3-display-engine" },
725639be33Sskrll 	{ .compat = "allwinner,sun8i-v3s-display-engine" },
736e54367aSthorpej 	{ .compat = "allwinner,sun50i-a64-display-engine" },
746e54367aSthorpej 	DEVICE_COMPAT_EOL
75a9d03646Sjmcneill };
76a9d03646Sjmcneill 
77d3c4aeb7Sjmcneill static const char * fb_compatible[] = {
78d3c4aeb7Sjmcneill 	"allwinner,simple-framebuffer",
79d3c4aeb7Sjmcneill 	NULL
80d3c4aeb7Sjmcneill };
81d3c4aeb7Sjmcneill 
82a9d03646Sjmcneill static int	sunxi_drm_match(device_t, cfdata_t, void *);
83a9d03646Sjmcneill static void	sunxi_drm_attach(device_t, device_t, void *);
84a9d03646Sjmcneill 
85a9d03646Sjmcneill static void	sunxi_drm_init(device_t);
8629967b31Sjmcneill static vmem_t	*sunxi_drm_alloc_cma_pool(struct drm_device *, size_t);
87a9d03646Sjmcneill 
884f58e2b4Sjmcneill static uint32_t	sunxi_drm_get_vblank_counter(struct drm_device *, unsigned int);
894f58e2b4Sjmcneill static int	sunxi_drm_enable_vblank(struct drm_device *, unsigned int);
904f58e2b4Sjmcneill static void	sunxi_drm_disable_vblank(struct drm_device *, unsigned int);
914f58e2b4Sjmcneill 
92a9d03646Sjmcneill static int	sunxi_drm_load(struct drm_device *, unsigned long);
933973e774Sriastradh static void	sunxi_drm_unload(struct drm_device *);
94a9d03646Sjmcneill 
95607772e0Sriastradh static void	sunxi_drm_task_work(struct work *, void *);
96607772e0Sriastradh 
97a9d03646Sjmcneill static struct drm_driver sunxi_drm_driver = {
983973e774Sriastradh 	.driver_features = DRIVER_MODESET | DRIVER_GEM,
99a9d03646Sjmcneill 	.dev_priv_size = 0,
100a9d03646Sjmcneill 	.load = sunxi_drm_load,
101a9d03646Sjmcneill 	.unload = sunxi_drm_unload,
102a9d03646Sjmcneill 
103a9d03646Sjmcneill 	.gem_free_object = drm_gem_cma_free_object,
104a9d03646Sjmcneill 	.mmap_object = drm_gem_or_legacy_mmap_object,
105a9d03646Sjmcneill 	.gem_uvm_ops = &drm_gem_cma_uvm_ops,
106a9d03646Sjmcneill 
107a9d03646Sjmcneill 	.dumb_create = drm_gem_cma_dumb_create,
108a9d03646Sjmcneill 	.dumb_destroy = drm_gem_dumb_destroy,
109a9d03646Sjmcneill 
110a9d03646Sjmcneill 	.get_vblank_counter = sunxi_drm_get_vblank_counter,
111a9d03646Sjmcneill 	.enable_vblank = sunxi_drm_enable_vblank,
112a9d03646Sjmcneill 	.disable_vblank = sunxi_drm_disable_vblank,
113a9d03646Sjmcneill 
114a9d03646Sjmcneill 	.name = DRIVER_NAME,
115a9d03646Sjmcneill 	.desc = DRIVER_DESC,
116a9d03646Sjmcneill 	.date = DRIVER_DATE,
117a9d03646Sjmcneill 	.major = DRIVER_MAJOR,
118a9d03646Sjmcneill 	.minor = DRIVER_MINOR,
119a9d03646Sjmcneill 	.patchlevel = DRIVER_PATCHLEVEL,
120a9d03646Sjmcneill };
121a9d03646Sjmcneill 
122a9d03646Sjmcneill CFATTACH_DECL_NEW(sunxi_drm, sizeof(struct sunxi_drm_softc),
123a9d03646Sjmcneill 	sunxi_drm_match, sunxi_drm_attach, NULL, NULL);
124a9d03646Sjmcneill 
125a9d03646Sjmcneill static int
sunxi_drm_match(device_t parent,cfdata_t cf,void * aux)126a9d03646Sjmcneill sunxi_drm_match(device_t parent, cfdata_t cf, void *aux)
127a9d03646Sjmcneill {
128a9d03646Sjmcneill 	struct fdt_attach_args * const faa = aux;
129a9d03646Sjmcneill 
1306e54367aSthorpej 	return of_compatible_match(faa->faa_phandle, compat_data);
131a9d03646Sjmcneill }
132a9d03646Sjmcneill 
133a9d03646Sjmcneill static void
sunxi_drm_attach(device_t parent,device_t self,void * aux)134a9d03646Sjmcneill sunxi_drm_attach(device_t parent, device_t self, void *aux)
135a9d03646Sjmcneill {
136a9d03646Sjmcneill 	struct sunxi_drm_softc * const sc = device_private(self);
137a9d03646Sjmcneill 	struct fdt_attach_args * const faa = aux;
138a9d03646Sjmcneill 	struct drm_driver * const driver = &sunxi_drm_driver;
1393e043616Sjmcneill 	prop_dictionary_t dict = device_properties(self);
1403e043616Sjmcneill 	bool is_disabled;
141a9d03646Sjmcneill 
142f1b1a6a9Sriastradh 	aprint_naive("\n");
143f1b1a6a9Sriastradh 
144f1b1a6a9Sriastradh 	if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) &&
145f1b1a6a9Sriastradh 	    is_disabled) {
146f1b1a6a9Sriastradh 		aprint_normal(": Display Engine Pipeline (disabled)\n");
147f1b1a6a9Sriastradh 		return;
148f1b1a6a9Sriastradh 	}
149f1b1a6a9Sriastradh 
150f1b1a6a9Sriastradh 	aprint_normal(": Display Engine Pipeline\n");
151f1b1a6a9Sriastradh 
152*9a06f983Sriastradh #ifdef WSDISPLAY_MULTICONS
153*9a06f983Sriastradh 	const bool is_console = true;
154*9a06f983Sriastradh 	prop_dictionary_set_bool(dict, "is_console", is_console);
155*9a06f983Sriastradh #endif
156*9a06f983Sriastradh 
157a9d03646Sjmcneill 	sc->sc_dev = self;
158a9d03646Sjmcneill 	sc->sc_dmat = faa->faa_dmat;
159a9d03646Sjmcneill 	sc->sc_bst = faa->faa_bst;
160a9d03646Sjmcneill 	sc->sc_phandle = faa->faa_phandle;
161607772e0Sriastradh 	sc->sc_task_thread = NULL;
162607772e0Sriastradh 	SIMPLEQ_INIT(&sc->sc_tasks);
163607772e0Sriastradh 	if (workqueue_create(&sc->sc_task_wq, "sunxidrm",
164607772e0Sriastradh 		&sunxi_drm_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE)) {
165607772e0Sriastradh 		aprint_error_dev(self, "unable to create workqueue\n");
166607772e0Sriastradh 		sc->sc_task_wq = NULL;
167607772e0Sriastradh 		return;
168607772e0Sriastradh 	}
169a9d03646Sjmcneill 
170a9d03646Sjmcneill 	sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev);
1715fd19f0eSriastradh 	if (IS_ERR(sc->sc_ddev)) {
172a9d03646Sjmcneill 		aprint_error_dev(self, "couldn't allocate DRM device\n");
173a9d03646Sjmcneill 		return;
174a9d03646Sjmcneill 	}
175a9d03646Sjmcneill 	sc->sc_ddev->dev_private = sc;
176a9d03646Sjmcneill 	sc->sc_ddev->bst = sc->sc_bst;
177a9d03646Sjmcneill 	sc->sc_ddev->bus_dmat = sc->sc_dmat;
178a9d03646Sjmcneill 	sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat;
179a9d03646Sjmcneill 	sc->sc_ddev->dmat_subregion_p = false;
180a9d03646Sjmcneill 
181d3c4aeb7Sjmcneill 	fdt_remove_bycompat(fb_compatible);
182d3c4aeb7Sjmcneill 
183a9d03646Sjmcneill 	config_defer(self, sunxi_drm_init);
184a9d03646Sjmcneill }
185a9d03646Sjmcneill 
186a9d03646Sjmcneill static void
sunxi_drm_init(device_t dev)187a9d03646Sjmcneill sunxi_drm_init(device_t dev)
188a9d03646Sjmcneill {
189a9d03646Sjmcneill 	struct sunxi_drm_softc * const sc = device_private(dev);
190a9d03646Sjmcneill 	struct drm_driver * const driver = &sunxi_drm_driver;
191a9d03646Sjmcneill 	int error;
192a9d03646Sjmcneill 
193607772e0Sriastradh 	/*
194607772e0Sriastradh 	 * Cause any tasks issued synchronously during attach to be
195607772e0Sriastradh 	 * processed at the end of this function.
196607772e0Sriastradh 	 */
197607772e0Sriastradh 	sc->sc_task_thread = curlwp;
198607772e0Sriastradh 
199a9d03646Sjmcneill 	error = -drm_dev_register(sc->sc_ddev, 0);
200a9d03646Sjmcneill 	if (error) {
201a9d03646Sjmcneill 		aprint_error_dev(dev, "couldn't register DRM device: %d\n",
202a9d03646Sjmcneill 		    error);
203607772e0Sriastradh 		goto out;
204a9d03646Sjmcneill 	}
205607772e0Sriastradh 	sc->sc_dev_registered = true;
206a9d03646Sjmcneill 
207a9d03646Sjmcneill 	aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n",
208a9d03646Sjmcneill 	    driver->name, driver->major, driver->minor, driver->patchlevel,
209a9d03646Sjmcneill 	    driver->date, sc->sc_ddev->primary->index);
210607772e0Sriastradh 
211607772e0Sriastradh 	/*
212607772e0Sriastradh 	 * Process asynchronous tasks queued synchronously during
213607772e0Sriastradh 	 * attach.  This will be for display detection to attach a
214607772e0Sriastradh 	 * framebuffer, so we have the opportunity for a console device
215607772e0Sriastradh 	 * to attach before autoconf has completed, in time for init(8)
216607772e0Sriastradh 	 * to find that console without panicking.
217607772e0Sriastradh 	 */
218607772e0Sriastradh 	while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) {
219607772e0Sriastradh 		struct sunxi_drm_task *const task =
220607772e0Sriastradh 		    SIMPLEQ_FIRST(&sc->sc_tasks);
221607772e0Sriastradh 
222607772e0Sriastradh 		SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, sdt_u.queue);
223607772e0Sriastradh 		(*task->sdt_fn)(task);
224607772e0Sriastradh 	}
225607772e0Sriastradh 
226bba6ca2dSandvar out:	/* Cause any subsequent tasks to be processed by the workqueue.  */
227607772e0Sriastradh 	atomic_store_relaxed(&sc->sc_task_thread, NULL);
228a9d03646Sjmcneill }
229a9d03646Sjmcneill 
23029967b31Sjmcneill static vmem_t *
sunxi_drm_alloc_cma_pool(struct drm_device * ddev,size_t cma_size)23129967b31Sjmcneill sunxi_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size)
23229967b31Sjmcneill {
23329967b31Sjmcneill 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
23429967b31Sjmcneill 	bus_dma_segment_t segs[1];
23529967b31Sjmcneill 	int nsegs;
23629967b31Sjmcneill 	int error;
23729967b31Sjmcneill 
23829967b31Sjmcneill 	error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0,
23929967b31Sjmcneill 	    segs, 1, &nsegs, BUS_DMA_NOWAIT);
24029967b31Sjmcneill 	if (error) {
24129967b31Sjmcneill 		aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n");
24229967b31Sjmcneill 		return NULL;
24329967b31Sjmcneill 	}
24429967b31Sjmcneill 
24529967b31Sjmcneill 	return vmem_create("sunxidrm", segs[0].ds_addr, segs[0].ds_len,
24629967b31Sjmcneill 	    PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
24729967b31Sjmcneill }
24829967b31Sjmcneill 
249a9d03646Sjmcneill static int
sunxi_drm_fb_create_handle(struct drm_framebuffer * fb,struct drm_file * file,unsigned int * handle)250a9d03646Sjmcneill sunxi_drm_fb_create_handle(struct drm_framebuffer *fb,
251a9d03646Sjmcneill     struct drm_file *file, unsigned int *handle)
252a9d03646Sjmcneill {
253a9d03646Sjmcneill 	struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
254a9d03646Sjmcneill 
255a9d03646Sjmcneill 	return drm_gem_handle_create(file, &sfb->obj->base, handle);
256a9d03646Sjmcneill }
257a9d03646Sjmcneill 
258a9d03646Sjmcneill static void
sunxi_drm_fb_destroy(struct drm_framebuffer * fb)259a9d03646Sjmcneill sunxi_drm_fb_destroy(struct drm_framebuffer *fb)
260a9d03646Sjmcneill {
261a9d03646Sjmcneill 	struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb);
262a9d03646Sjmcneill 
263a9d03646Sjmcneill 	drm_framebuffer_cleanup(fb);
2643973e774Sriastradh 	drm_gem_object_put_unlocked(&sfb->obj->base);
265a9d03646Sjmcneill 	kmem_free(sfb, sizeof(*sfb));
266a9d03646Sjmcneill }
267a9d03646Sjmcneill 
268a9d03646Sjmcneill static const struct drm_framebuffer_funcs sunxi_drm_framebuffer_funcs = {
269a9d03646Sjmcneill 	.create_handle = sunxi_drm_fb_create_handle,
270a9d03646Sjmcneill 	.destroy = sunxi_drm_fb_destroy,
271a9d03646Sjmcneill };
272a9d03646Sjmcneill 
273a9d03646Sjmcneill static struct drm_framebuffer *
sunxi_drm_fb_create(struct drm_device * ddev,struct drm_file * file,const struct drm_mode_fb_cmd2 * cmd)274a9d03646Sjmcneill sunxi_drm_fb_create(struct drm_device *ddev, struct drm_file *file,
2753973e774Sriastradh     const struct drm_mode_fb_cmd2 *cmd)
276a9d03646Sjmcneill {
277a9d03646Sjmcneill 	struct sunxi_drm_framebuffer *fb;
278a9d03646Sjmcneill 	struct drm_gem_object *gem_obj;
279a9d03646Sjmcneill 	int error;
280a9d03646Sjmcneill 
281a9d03646Sjmcneill 	if (cmd->flags)
282a9d03646Sjmcneill 		return NULL;
283a9d03646Sjmcneill 
2843973e774Sriastradh 	gem_obj = drm_gem_object_lookup(file, cmd->handles[0]);
285a9d03646Sjmcneill 	if (gem_obj == NULL)
286a9d03646Sjmcneill 		return NULL;
287a9d03646Sjmcneill 
288a9d03646Sjmcneill 	fb = kmem_zalloc(sizeof(*fb), KM_SLEEP);
289a9d03646Sjmcneill 	fb->obj = to_drm_gem_cma_obj(gem_obj);
290c765b105Sriastradh 	drm_helper_mode_fill_fb_struct(ddev, &fb->base, cmd);
291a9d03646Sjmcneill 
292a9d03646Sjmcneill 	error = drm_framebuffer_init(ddev, &fb->base, &sunxi_drm_framebuffer_funcs);
293a9d03646Sjmcneill 	if (error != 0)
294a9d03646Sjmcneill 		goto dealloc;
295a9d03646Sjmcneill 
296a9d03646Sjmcneill 	return &fb->base;
297a9d03646Sjmcneill 
298a9d03646Sjmcneill dealloc:
299a9d03646Sjmcneill 	drm_framebuffer_cleanup(&fb->base);
300a9d03646Sjmcneill 	kmem_free(fb, sizeof(*fb));
3013973e774Sriastradh 	drm_gem_object_put_unlocked(gem_obj);
302a9d03646Sjmcneill 
303a9d03646Sjmcneill 	return NULL;
304a9d03646Sjmcneill }
305a9d03646Sjmcneill 
306a9d03646Sjmcneill static struct drm_mode_config_funcs sunxi_drm_mode_config_funcs = {
307a9d03646Sjmcneill 	.fb_create = sunxi_drm_fb_create,
308a9d03646Sjmcneill };
309a9d03646Sjmcneill 
310a9d03646Sjmcneill static int
sunxi_drm_simplefb_lookup(bus_addr_t * paddr,bus_size_t * psize)311d71e64cdSjmcneill sunxi_drm_simplefb_lookup(bus_addr_t *paddr, bus_size_t *psize)
312d71e64cdSjmcneill {
3136e54367aSthorpej 	static const struct device_compatible_entry simplefb_compat[] = {
3146e54367aSthorpej 		{ .compat = "simple-framebuffer" },
3156e54367aSthorpej 		DEVICE_COMPAT_EOL
3166e54367aSthorpej 	};
3178c4dd323Sjmcneill 	int chosen, child, error;
3188c4dd323Sjmcneill 	bus_addr_t addr_end;
319d71e64cdSjmcneill 
320d71e64cdSjmcneill 	chosen = OF_finddevice("/chosen");
321d71e64cdSjmcneill 	if (chosen == -1)
322d71e64cdSjmcneill 		return ENOENT;
323d71e64cdSjmcneill 
324d71e64cdSjmcneill 	for (child = OF_child(chosen); child; child = OF_peer(child)) {
325d71e64cdSjmcneill 		if (!fdtbus_status_okay(child))
326d71e64cdSjmcneill 			continue;
3276e54367aSthorpej 		if (!of_compatible_match(child, simplefb_compat))
328d71e64cdSjmcneill 			continue;
3298c4dd323Sjmcneill 		error = fdtbus_get_reg(child, 0, paddr, psize);
3308c4dd323Sjmcneill 		if (error != 0)
3318c4dd323Sjmcneill 			return error;
3328c4dd323Sjmcneill 
3338c4dd323Sjmcneill 		/* Reclaim entire pages used by the simplefb */
3348c4dd323Sjmcneill 		addr_end = *paddr + *psize;
3358c4dd323Sjmcneill 		*paddr = SUNXI_TRUNC_PAGE(*paddr);
3368c4dd323Sjmcneill 		*psize = SUNXI_ROUND_PAGE(addr_end - *paddr);
3378c4dd323Sjmcneill 		return 0;
338d71e64cdSjmcneill 	}
339d71e64cdSjmcneill 
340d71e64cdSjmcneill 	return ENOENT;
341d71e64cdSjmcneill }
342d71e64cdSjmcneill 
343d71e64cdSjmcneill static int
sunxi_drm_fb_probe(struct drm_fb_helper * helper,struct drm_fb_helper_surface_size * sizes)344a9d03646Sjmcneill sunxi_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes)
345a9d03646Sjmcneill {
346a9d03646Sjmcneill 	struct sunxi_drm_softc * const sc = sunxi_drm_private(helper->dev);
347a9d03646Sjmcneill 	struct drm_device *ddev = helper->dev;
348a9d03646Sjmcneill 	struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(helper->fb);
349a9d03646Sjmcneill 	struct drm_framebuffer *fb = helper->fb;
350a9d03646Sjmcneill 	struct sunxi_drmfb_attach_args sfa;
351d71e64cdSjmcneill 	bus_addr_t sfb_addr;
352d71e64cdSjmcneill 	bus_size_t sfb_size;
35329967b31Sjmcneill 	size_t cma_size;
354a9d03646Sjmcneill 	int error;
355a9d03646Sjmcneill 
356a9d03646Sjmcneill 	const u_int width = sizes->surface_width;
357a9d03646Sjmcneill 	const u_int height = sizes->surface_height;
358a9d03646Sjmcneill 	const u_int pitch = width * (32 / 8);
359a9d03646Sjmcneill 
360a9d03646Sjmcneill 	const size_t size = roundup(height * pitch, PAGE_SIZE);
361a9d03646Sjmcneill 
362d71e64cdSjmcneill 	if (sunxi_drm_simplefb_lookup(&sfb_addr, &sfb_size) != 0)
363d71e64cdSjmcneill 		sfb_size = 0;
364d71e64cdSjmcneill 
365d71e64cdSjmcneill 	/* Reserve enough memory for a 4K plane, rounded to 1MB */
366d71e64cdSjmcneill 	cma_size = (SUNXI_DRM_MAX_WIDTH * SUNXI_DRM_MAX_HEIGHT * 4);
367d71e64cdSjmcneill 	if (sfb_size == 0) {
368d71e64cdSjmcneill 		/* Add memory for FB console if we cannot reclaim bootloader memory */
369d71e64cdSjmcneill 		cma_size += size;
370d71e64cdSjmcneill 	}
37129967b31Sjmcneill 	cma_size = roundup(cma_size, 1024 * 1024);
37229967b31Sjmcneill 	sc->sc_ddev->cma_pool = sunxi_drm_alloc_cma_pool(sc->sc_ddev, cma_size);
373d71e64cdSjmcneill 	if (sc->sc_ddev->cma_pool != NULL) {
374d71e64cdSjmcneill 		if (sfb_size != 0) {
375d71e64cdSjmcneill 			error = vmem_add(sc->sc_ddev->cma_pool, sfb_addr,
376d71e64cdSjmcneill 			    sfb_size, VM_SLEEP);
377d71e64cdSjmcneill 			if (error != 0)
378d71e64cdSjmcneill 				sfb_size = 0;
379d71e64cdSjmcneill 		}
380d71e64cdSjmcneill 		aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA",
381d71e64cdSjmcneill 		    (u_int)((cma_size + sfb_size) / (1024 * 1024)));
382d71e64cdSjmcneill 		if (sfb_size != 0)
383d71e64cdSjmcneill 			aprint_normal(" (%u MB reclaimed from bootloader)",
384d71e64cdSjmcneill 			    (u_int)(sfb_size / (1024 * 1024)));
385d71e64cdSjmcneill 		aprint_normal("\n");
386d71e64cdSjmcneill 	}
38729967b31Sjmcneill 
388a9d03646Sjmcneill 	sfb->obj = drm_gem_cma_create(ddev, size);
389a9d03646Sjmcneill 	if (sfb->obj == NULL) {
390a9d03646Sjmcneill 		DRM_ERROR("failed to allocate memory for framebuffer\n");
391a9d03646Sjmcneill 		return -ENOMEM;
392a9d03646Sjmcneill 	}
393a9d03646Sjmcneill 
394a9d03646Sjmcneill 	fb->pitches[0] = pitch;
395a9d03646Sjmcneill 	fb->offsets[0] = 0;
396a9d03646Sjmcneill 	fb->width = width;
397a9d03646Sjmcneill 	fb->height = height;
3983973e774Sriastradh 	fb->format = drm_format_info(DRM_FORMAT_XRGB8888);
399d7d36fd4Sriastradh 	fb->dev = ddev;
400a9d03646Sjmcneill 
401a9d03646Sjmcneill 	error = drm_framebuffer_init(ddev, fb, &sunxi_drm_framebuffer_funcs);
402a9d03646Sjmcneill 	if (error != 0) {
403a9d03646Sjmcneill 		DRM_ERROR("failed to initialize framebuffer\n");
404a9d03646Sjmcneill 		return error;
405a9d03646Sjmcneill 	}
406a9d03646Sjmcneill 
407a9d03646Sjmcneill 	memset(&sfa, 0, sizeof(sfa));
408a9d03646Sjmcneill 	sfa.sfa_drm_dev = ddev;
409a9d03646Sjmcneill 	sfa.sfa_fb_helper = helper;
410a9d03646Sjmcneill 	sfa.sfa_fb_sizes = *sizes;
411a9d03646Sjmcneill 	sfa.sfa_fb_bst = sc->sc_bst;
412a9d03646Sjmcneill 	sfa.sfa_fb_dmat = sc->sc_dmat;
413a9d03646Sjmcneill 	sfa.sfa_fb_linebytes = helper->fb->pitches[0];
414a9d03646Sjmcneill 
4152685996bSthorpej 	helper->fbdev = config_found(ddev->dev, &sfa, NULL,
416c7fb772bSthorpej 	    CFARGS(.iattr = "sunxifbbus"));
417a9d03646Sjmcneill 	if (helper->fbdev == NULL) {
418a9d03646Sjmcneill 		DRM_ERROR("unable to attach framebuffer\n");
419a9d03646Sjmcneill 		return -ENXIO;
420a9d03646Sjmcneill 	}
421a9d03646Sjmcneill 
422a9d03646Sjmcneill 	return 0;
423a9d03646Sjmcneill }
424a9d03646Sjmcneill 
425a9d03646Sjmcneill static struct drm_fb_helper_funcs sunxi_drm_fb_helper_funcs = {
426a9d03646Sjmcneill 	.fb_probe = sunxi_drm_fb_probe,
427a9d03646Sjmcneill };
428a9d03646Sjmcneill 
429a9d03646Sjmcneill static int
sunxi_drm_load(struct drm_device * ddev,unsigned long flags)430a9d03646Sjmcneill sunxi_drm_load(struct drm_device *ddev, unsigned long flags)
431a9d03646Sjmcneill {
432a9d03646Sjmcneill 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
433a9d03646Sjmcneill 	struct sunxi_drm_endpoint *sep;
434a9d03646Sjmcneill 	struct sunxi_drm_fbdev *fbdev;
435a9d03646Sjmcneill 	const u_int *data;
436a9d03646Sjmcneill 	int datalen, error, num_crtc;
437a9d03646Sjmcneill 
438a9d03646Sjmcneill 	drm_mode_config_init(ddev);
439a9d03646Sjmcneill 	ddev->mode_config.min_width = 0;
440a9d03646Sjmcneill 	ddev->mode_config.min_height = 0;
44129967b31Sjmcneill 	ddev->mode_config.max_width = SUNXI_DRM_MAX_WIDTH;
44229967b31Sjmcneill 	ddev->mode_config.max_height = SUNXI_DRM_MAX_HEIGHT;
443a9d03646Sjmcneill 	ddev->mode_config.funcs = &sunxi_drm_mode_config_funcs;
444a9d03646Sjmcneill 
445a9d03646Sjmcneill 	num_crtc = 0;
446a9d03646Sjmcneill 	data = fdtbus_get_prop(sc->sc_phandle, "allwinner,pipelines", &datalen);
447a9d03646Sjmcneill 	while (datalen >= 4) {
448a9d03646Sjmcneill 		const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data));
449a9d03646Sjmcneill 
450a9d03646Sjmcneill 		TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
451a9d03646Sjmcneill 			if (sep->phandle == crtc_phandle && sep->ddev == NULL) {
452a9d03646Sjmcneill 				sep->ddev = ddev;
453a9d03646Sjmcneill 				error = fdt_endpoint_activate_direct(sep->ep, true);
454a9d03646Sjmcneill 				if (error != 0) {
455a9d03646Sjmcneill 					aprint_error_dev(sc->sc_dev, "failed to activate endpoint: %d\n",
456a9d03646Sjmcneill 					    error);
457a9d03646Sjmcneill 				}
458a9d03646Sjmcneill 				if (fdt_endpoint_type(sep->ep) == EP_DRM_CRTC)
459a9d03646Sjmcneill 					num_crtc++;
460a9d03646Sjmcneill 			}
461a9d03646Sjmcneill 
462a9d03646Sjmcneill 		datalen -= 4;
463a9d03646Sjmcneill 		data++;
464a9d03646Sjmcneill 	}
465a9d03646Sjmcneill 
466a9d03646Sjmcneill 	if (num_crtc == 0) {
467a9d03646Sjmcneill 		aprint_error_dev(sc->sc_dev, "no pipelines configured\n");
468ec202c93Smrg 		error = ENXIO;
469ec202c93Smrg 		goto drmerr;
470a9d03646Sjmcneill 	}
471a9d03646Sjmcneill 
472a9d03646Sjmcneill 	fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP);
473a9d03646Sjmcneill 
474a9d03646Sjmcneill 	drm_fb_helper_prepare(ddev, &fbdev->helper, &sunxi_drm_fb_helper_funcs);
475a9d03646Sjmcneill 
4763973e774Sriastradh 	error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc);
477a9d03646Sjmcneill 	if (error)
478ec202c93Smrg 		goto allocerr;
479a9d03646Sjmcneill 
480a9d03646Sjmcneill 	fbdev->helper.fb = kmem_zalloc(sizeof(struct sunxi_drm_framebuffer), KM_SLEEP);
481a9d03646Sjmcneill 
482a9d03646Sjmcneill 	drm_fb_helper_single_add_all_connectors(&fbdev->helper);
483a9d03646Sjmcneill 
484a9d03646Sjmcneill 	drm_helper_disable_unused_functions(ddev);
485a9d03646Sjmcneill 
486a9d03646Sjmcneill 	drm_fb_helper_initial_config(&fbdev->helper, 32);
487a9d03646Sjmcneill 
4884f58e2b4Sjmcneill 	/* XXX */
4894f58e2b4Sjmcneill 	ddev->irq_enabled = true;
4904f58e2b4Sjmcneill 	drm_vblank_init(ddev, num_crtc);
4914f58e2b4Sjmcneill 
492a9d03646Sjmcneill 	return 0;
493a9d03646Sjmcneill 
494ec202c93Smrg allocerr:
495ec202c93Smrg 	kmem_free(fbdev, sizeof(*fbdev));
496a9d03646Sjmcneill drmerr:
497a9d03646Sjmcneill 	drm_mode_config_cleanup(ddev);
498a9d03646Sjmcneill 
499a9d03646Sjmcneill 	return error;
500a9d03646Sjmcneill }
501a9d03646Sjmcneill 
5024f58e2b4Sjmcneill static uint32_t
sunxi_drm_get_vblank_counter(struct drm_device * ddev,unsigned int crtc)5034f58e2b4Sjmcneill sunxi_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc)
5044f58e2b4Sjmcneill {
5054f58e2b4Sjmcneill 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
5064f58e2b4Sjmcneill 
5074f58e2b4Sjmcneill 	if (crtc >= __arraycount(sc->sc_vbl))
5084f58e2b4Sjmcneill 		return 0;
5094f58e2b4Sjmcneill 
5104f58e2b4Sjmcneill 	if (sc->sc_vbl[crtc].get_vblank_counter == NULL)
5114f58e2b4Sjmcneill 		return 0;
5124f58e2b4Sjmcneill 
5134f58e2b4Sjmcneill 	return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv);
5144f58e2b4Sjmcneill }
5154f58e2b4Sjmcneill 
5164f58e2b4Sjmcneill static int
sunxi_drm_enable_vblank(struct drm_device * ddev,unsigned int crtc)5174f58e2b4Sjmcneill sunxi_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc)
5184f58e2b4Sjmcneill {
5194f58e2b4Sjmcneill 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
5204f58e2b4Sjmcneill 
5214f58e2b4Sjmcneill 	if (crtc >= __arraycount(sc->sc_vbl))
5224f58e2b4Sjmcneill 		return 0;
5234f58e2b4Sjmcneill 
5244f58e2b4Sjmcneill 	if (sc->sc_vbl[crtc].enable_vblank == NULL)
5254f58e2b4Sjmcneill 		return 0;
5264f58e2b4Sjmcneill 
5274f58e2b4Sjmcneill 	sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv);
5284f58e2b4Sjmcneill 
5294f58e2b4Sjmcneill 	return 0;
5304f58e2b4Sjmcneill }
5314f58e2b4Sjmcneill 
5324f58e2b4Sjmcneill static void
sunxi_drm_disable_vblank(struct drm_device * ddev,unsigned int crtc)5334f58e2b4Sjmcneill sunxi_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc)
5344f58e2b4Sjmcneill {
5354f58e2b4Sjmcneill 	struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev);
5364f58e2b4Sjmcneill 
5374f58e2b4Sjmcneill 	if (crtc >= __arraycount(sc->sc_vbl))
5384f58e2b4Sjmcneill 		return;
5394f58e2b4Sjmcneill 
5404f58e2b4Sjmcneill 	if (sc->sc_vbl[crtc].disable_vblank == NULL)
5414f58e2b4Sjmcneill 		return;
5424f58e2b4Sjmcneill 
5434f58e2b4Sjmcneill 	sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv);
5444f58e2b4Sjmcneill }
5454f58e2b4Sjmcneill 
5463973e774Sriastradh static void
sunxi_drm_unload(struct drm_device * ddev)547a9d03646Sjmcneill sunxi_drm_unload(struct drm_device *ddev)
548a9d03646Sjmcneill {
549a9d03646Sjmcneill 	drm_mode_config_cleanup(ddev);
550a9d03646Sjmcneill }
551a9d03646Sjmcneill 
552a9d03646Sjmcneill int
sunxi_drm_register_endpoint(int phandle,struct fdt_endpoint * ep)553a9d03646Sjmcneill sunxi_drm_register_endpoint(int phandle, struct fdt_endpoint *ep)
554a9d03646Sjmcneill {
555a9d03646Sjmcneill 	struct sunxi_drm_endpoint *sep;
556a9d03646Sjmcneill 
557a9d03646Sjmcneill 	sep = kmem_zalloc(sizeof(*sep), KM_SLEEP);
558a9d03646Sjmcneill 	sep->phandle = phandle;
559a9d03646Sjmcneill 	sep->ep = ep;
560a9d03646Sjmcneill 	sep->ddev = NULL;
561a9d03646Sjmcneill 	TAILQ_INSERT_TAIL(&sunxi_drm_endpoints, sep, entries);
562a9d03646Sjmcneill 
563a9d03646Sjmcneill 	return 0;
564a9d03646Sjmcneill }
565a9d03646Sjmcneill 
566a9d03646Sjmcneill struct drm_device *
sunxi_drm_endpoint_device(struct fdt_endpoint * ep)567a9d03646Sjmcneill sunxi_drm_endpoint_device(struct fdt_endpoint *ep)
568a9d03646Sjmcneill {
569a9d03646Sjmcneill 	struct sunxi_drm_endpoint *sep;
570a9d03646Sjmcneill 
571a9d03646Sjmcneill 	TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries)
572a9d03646Sjmcneill 		if (sep->ep == ep)
573a9d03646Sjmcneill 			return sep->ddev;
574a9d03646Sjmcneill 
575a9d03646Sjmcneill 	return NULL;
576a9d03646Sjmcneill }
577607772e0Sriastradh 
578607772e0Sriastradh static void
sunxi_drm_task_work(struct work * work,void * cookie)579607772e0Sriastradh sunxi_drm_task_work(struct work *work, void *cookie)
580607772e0Sriastradh {
581607772e0Sriastradh 	struct sunxi_drm_task *task = container_of(work, struct sunxi_drm_task,
582607772e0Sriastradh 	    sdt_u.work);
583607772e0Sriastradh 
584607772e0Sriastradh 	(*task->sdt_fn)(task);
585607772e0Sriastradh }
586607772e0Sriastradh 
587607772e0Sriastradh void
sunxi_task_init(struct sunxi_drm_task * task,void (* fn)(struct sunxi_drm_task *))588607772e0Sriastradh sunxi_task_init(struct sunxi_drm_task *task,
589607772e0Sriastradh     void (*fn)(struct sunxi_drm_task *))
590607772e0Sriastradh {
591607772e0Sriastradh 
592607772e0Sriastradh 	task->sdt_fn = fn;
593607772e0Sriastradh }
594607772e0Sriastradh 
595607772e0Sriastradh void
sunxi_task_schedule(device_t self,struct sunxi_drm_task * task)596607772e0Sriastradh sunxi_task_schedule(device_t self, struct sunxi_drm_task *task)
597607772e0Sriastradh {
598607772e0Sriastradh 	struct sunxi_drm_softc *sc = device_private(self);
599607772e0Sriastradh 
600607772e0Sriastradh 	if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp)
601607772e0Sriastradh 		SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, sdt_u.queue);
602607772e0Sriastradh 	else
603607772e0Sriastradh 		workqueue_enqueue(sc->sc_task_wq, &task->sdt_u.work, NULL);
604607772e0Sriastradh }
605