1 /* $NetBSD: sunxi_drm.c,v 1.11 2019/12/16 12:40:17 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2019 Jared D. McNeill <jmcneill@invisible.ca> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: sunxi_drm.c,v 1.11 2019/12/16 12:40:17 jmcneill Exp $"); 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/device.h> 35 #include <sys/intr.h> 36 #include <sys/systm.h> 37 #include <sys/kernel.h> 38 #include <sys/conf.h> 39 40 #include <uvm/uvm_extern.h> 41 #include <uvm/uvm_object.h> 42 #include <uvm/uvm_device.h> 43 44 #include <drm/drmP.h> 45 #include <drm/drm_crtc_helper.h> 46 #include <drm/drm_fb_helper.h> 47 48 #include <dev/fdt/fdtvar.h> 49 #include <dev/fdt/fdt_port.h> 50 51 #include <arm/sunxi/sunxi_drm.h> 52 53 #define SUNXI_DRM_MAX_WIDTH 3840 54 #define SUNXI_DRM_MAX_HEIGHT 2160 55 56 /* 57 * The DRM headers break trunc_page/round_page macros with a redefinition 58 * of PAGE_MASK. Use our own macros instead. 59 */ 60 #define SUNXI_PAGE_MASK (PAGE_SIZE - 1) 61 #define SUNXI_TRUNC_PAGE(x) ((x) & ~SUNXI_PAGE_MASK) 62 #define SUNXI_ROUND_PAGE(x) (((x) + SUNXI_PAGE_MASK) & ~SUNXI_PAGE_MASK) 63 64 static TAILQ_HEAD(, sunxi_drm_endpoint) sunxi_drm_endpoints = 65 TAILQ_HEAD_INITIALIZER(sunxi_drm_endpoints); 66 67 static const char * const compatible[] = { 68 "allwinner,sun8i-h3-display-engine", 69 "allwinner,sun50i-a64-display-engine", 70 NULL 71 }; 72 73 static const char * fb_compatible[] = { 74 "allwinner,simple-framebuffer", 75 NULL 76 }; 77 78 static int sunxi_drm_match(device_t, cfdata_t, void *); 79 static void sunxi_drm_attach(device_t, device_t, void *); 80 81 static void sunxi_drm_init(device_t); 82 static vmem_t *sunxi_drm_alloc_cma_pool(struct drm_device *, size_t); 83 84 static int sunxi_drm_set_busid(struct drm_device *, struct drm_master *); 85 86 static uint32_t sunxi_drm_get_vblank_counter(struct drm_device *, unsigned int); 87 static int sunxi_drm_enable_vblank(struct drm_device *, unsigned int); 88 static void sunxi_drm_disable_vblank(struct drm_device *, unsigned int); 89 90 static int sunxi_drm_load(struct drm_device *, unsigned long); 91 static int sunxi_drm_unload(struct drm_device *); 92 93 static struct drm_driver sunxi_drm_driver = { 94 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 95 .dev_priv_size = 0, 96 .load = sunxi_drm_load, 97 .unload = sunxi_drm_unload, 98 99 .gem_free_object = drm_gem_cma_free_object, 100 .mmap_object = drm_gem_or_legacy_mmap_object, 101 .gem_uvm_ops = &drm_gem_cma_uvm_ops, 102 103 .dumb_create = drm_gem_cma_dumb_create, 104 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 105 .dumb_destroy = drm_gem_dumb_destroy, 106 107 .get_vblank_counter = sunxi_drm_get_vblank_counter, 108 .enable_vblank = sunxi_drm_enable_vblank, 109 .disable_vblank = sunxi_drm_disable_vblank, 110 111 .name = DRIVER_NAME, 112 .desc = DRIVER_DESC, 113 .date = DRIVER_DATE, 114 .major = DRIVER_MAJOR, 115 .minor = DRIVER_MINOR, 116 .patchlevel = DRIVER_PATCHLEVEL, 117 118 .set_busid = sunxi_drm_set_busid, 119 }; 120 121 CFATTACH_DECL_NEW(sunxi_drm, sizeof(struct sunxi_drm_softc), 122 sunxi_drm_match, sunxi_drm_attach, NULL, NULL); 123 124 static int 125 sunxi_drm_match(device_t parent, cfdata_t cf, void *aux) 126 { 127 struct fdt_attach_args * const faa = aux; 128 129 return of_match_compatible(faa->faa_phandle, compatible); 130 } 131 132 static void 133 sunxi_drm_attach(device_t parent, device_t self, void *aux) 134 { 135 struct sunxi_drm_softc * const sc = device_private(self); 136 struct fdt_attach_args * const faa = aux; 137 struct drm_driver * const driver = &sunxi_drm_driver; 138 prop_dictionary_t dict = device_properties(self); 139 bool is_disabled; 140 141 sc->sc_dev = self; 142 sc->sc_dmat = faa->faa_dmat; 143 sc->sc_bst = faa->faa_bst; 144 sc->sc_phandle = faa->faa_phandle; 145 146 aprint_naive("\n"); 147 148 if (prop_dictionary_get_bool(dict, "disabled", &is_disabled) && is_disabled) { 149 aprint_normal(": Display Engine Pipeline (disabled)\n"); 150 return; 151 } 152 153 aprint_normal(": Display Engine Pipeline\n"); 154 155 sc->sc_ddev = drm_dev_alloc(driver, sc->sc_dev); 156 if (sc->sc_ddev == NULL) { 157 aprint_error_dev(self, "couldn't allocate DRM device\n"); 158 return; 159 } 160 sc->sc_ddev->dev_private = sc; 161 sc->sc_ddev->bst = sc->sc_bst; 162 sc->sc_ddev->bus_dmat = sc->sc_dmat; 163 sc->sc_ddev->dmat = sc->sc_ddev->bus_dmat; 164 sc->sc_ddev->dmat_subregion_p = false; 165 166 fdt_remove_bycompat(fb_compatible); 167 168 config_defer(self, sunxi_drm_init); 169 } 170 171 static void 172 sunxi_drm_init(device_t dev) 173 { 174 struct sunxi_drm_softc * const sc = device_private(dev); 175 struct drm_driver * const driver = &sunxi_drm_driver; 176 int error; 177 178 error = -drm_dev_register(sc->sc_ddev, 0); 179 if (error) { 180 drm_dev_unref(sc->sc_ddev); 181 aprint_error_dev(dev, "couldn't register DRM device: %d\n", 182 error); 183 return; 184 } 185 186 aprint_normal_dev(dev, "initialized %s %d.%d.%d %s on minor %d\n", 187 driver->name, driver->major, driver->minor, driver->patchlevel, 188 driver->date, sc->sc_ddev->primary->index); 189 } 190 191 static vmem_t * 192 sunxi_drm_alloc_cma_pool(struct drm_device *ddev, size_t cma_size) 193 { 194 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 195 bus_dma_segment_t segs[1]; 196 int nsegs; 197 int error; 198 199 error = bus_dmamem_alloc(sc->sc_dmat, cma_size, PAGE_SIZE, 0, 200 segs, 1, &nsegs, BUS_DMA_NOWAIT); 201 if (error) { 202 aprint_error_dev(sc->sc_dev, "couldn't allocate CMA pool\n"); 203 return NULL; 204 } 205 206 return vmem_create("sunxidrm", segs[0].ds_addr, segs[0].ds_len, 207 PAGE_SIZE, NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE); 208 } 209 210 static int 211 sunxi_drm_set_busid(struct drm_device *ddev, struct drm_master *master) 212 { 213 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 214 char id[32]; 215 216 snprintf(id, sizeof(id), "platform:sunxi:%u", device_unit(sc->sc_dev)); 217 218 master->unique = kzalloc(strlen(id) + 1, GFP_KERNEL); 219 if (master->unique == NULL) 220 return -ENOMEM; 221 strcpy(master->unique, id); 222 master->unique_len = strlen(master->unique); 223 224 return 0; 225 } 226 227 static int 228 sunxi_drm_fb_create_handle(struct drm_framebuffer *fb, 229 struct drm_file *file, unsigned int *handle) 230 { 231 struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb); 232 233 return drm_gem_handle_create(file, &sfb->obj->base, handle); 234 } 235 236 static void 237 sunxi_drm_fb_destroy(struct drm_framebuffer *fb) 238 { 239 struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(fb); 240 241 drm_framebuffer_cleanup(fb); 242 drm_gem_object_unreference_unlocked(&sfb->obj->base); 243 kmem_free(sfb, sizeof(*sfb)); 244 } 245 246 static const struct drm_framebuffer_funcs sunxi_drm_framebuffer_funcs = { 247 .create_handle = sunxi_drm_fb_create_handle, 248 .destroy = sunxi_drm_fb_destroy, 249 }; 250 251 static struct drm_framebuffer * 252 sunxi_drm_fb_create(struct drm_device *ddev, struct drm_file *file, 253 struct drm_mode_fb_cmd2 *cmd) 254 { 255 struct sunxi_drm_framebuffer *fb; 256 struct drm_gem_object *gem_obj; 257 int error; 258 259 if (cmd->flags) 260 return NULL; 261 262 gem_obj = drm_gem_object_lookup(ddev, file, cmd->handles[0]); 263 if (gem_obj == NULL) 264 return NULL; 265 266 fb = kmem_zalloc(sizeof(*fb), KM_SLEEP); 267 fb->obj = to_drm_gem_cma_obj(gem_obj); 268 fb->base.pitches[0] = cmd->pitches[0]; 269 fb->base.pitches[1] = cmd->pitches[1]; 270 fb->base.pitches[2] = cmd->pitches[2]; 271 fb->base.offsets[0] = cmd->offsets[0]; 272 fb->base.offsets[1] = cmd->offsets[2]; 273 fb->base.offsets[2] = cmd->offsets[1]; 274 fb->base.width = cmd->width; 275 fb->base.height = cmd->height; 276 fb->base.pixel_format = cmd->pixel_format; 277 fb->base.bits_per_pixel = drm_format_plane_cpp(fb->base.pixel_format, 0) * 8; 278 279 switch (fb->base.pixel_format) { 280 case DRM_FORMAT_XRGB8888: 281 case DRM_FORMAT_ARGB8888: 282 fb->base.depth = 32; 283 break; 284 default: 285 break; 286 } 287 288 error = drm_framebuffer_init(ddev, &fb->base, &sunxi_drm_framebuffer_funcs); 289 if (error != 0) 290 goto dealloc; 291 292 return &fb->base; 293 294 dealloc: 295 drm_framebuffer_cleanup(&fb->base); 296 kmem_free(fb, sizeof(*fb)); 297 drm_gem_object_unreference_unlocked(gem_obj); 298 299 return NULL; 300 } 301 302 static struct drm_mode_config_funcs sunxi_drm_mode_config_funcs = { 303 .fb_create = sunxi_drm_fb_create, 304 }; 305 306 static int 307 sunxi_drm_simplefb_lookup(bus_addr_t *paddr, bus_size_t *psize) 308 { 309 static const char * compat[] = { "simple-framebuffer", NULL }; 310 int chosen, child, error; 311 bus_addr_t addr_end; 312 313 chosen = OF_finddevice("/chosen"); 314 if (chosen == -1) 315 return ENOENT; 316 317 for (child = OF_child(chosen); child; child = OF_peer(child)) { 318 if (!fdtbus_status_okay(child)) 319 continue; 320 if (!of_match_compatible(child, compat)) 321 continue; 322 error = fdtbus_get_reg(child, 0, paddr, psize); 323 if (error != 0) 324 return error; 325 326 /* Reclaim entire pages used by the simplefb */ 327 addr_end = *paddr + *psize; 328 *paddr = SUNXI_TRUNC_PAGE(*paddr); 329 *psize = SUNXI_ROUND_PAGE(addr_end - *paddr); 330 return 0; 331 } 332 333 return ENOENT; 334 } 335 336 static int 337 sunxi_drm_fb_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) 338 { 339 struct sunxi_drm_softc * const sc = sunxi_drm_private(helper->dev); 340 struct drm_device *ddev = helper->dev; 341 struct sunxi_drm_framebuffer *sfb = to_sunxi_drm_framebuffer(helper->fb); 342 struct drm_framebuffer *fb = helper->fb; 343 struct sunxi_drmfb_attach_args sfa; 344 bus_addr_t sfb_addr; 345 bus_size_t sfb_size; 346 size_t cma_size; 347 int error; 348 349 const u_int width = sizes->surface_width; 350 const u_int height = sizes->surface_height; 351 const u_int pitch = width * (32 / 8); 352 353 const size_t size = roundup(height * pitch, PAGE_SIZE); 354 355 if (sunxi_drm_simplefb_lookup(&sfb_addr, &sfb_size) != 0) 356 sfb_size = 0; 357 358 /* Reserve enough memory for a 4K plane, rounded to 1MB */ 359 cma_size = (SUNXI_DRM_MAX_WIDTH * SUNXI_DRM_MAX_HEIGHT * 4); 360 if (sfb_size == 0) { 361 /* Add memory for FB console if we cannot reclaim bootloader memory */ 362 cma_size += size; 363 } 364 cma_size = roundup(cma_size, 1024 * 1024); 365 sc->sc_ddev->cma_pool = sunxi_drm_alloc_cma_pool(sc->sc_ddev, cma_size); 366 if (sc->sc_ddev->cma_pool != NULL) { 367 if (sfb_size != 0) { 368 error = vmem_add(sc->sc_ddev->cma_pool, sfb_addr, 369 sfb_size, VM_SLEEP); 370 if (error != 0) 371 sfb_size = 0; 372 } 373 aprint_normal_dev(sc->sc_dev, "reserved %u MB DRAM for CMA", 374 (u_int)((cma_size + sfb_size) / (1024 * 1024))); 375 if (sfb_size != 0) 376 aprint_normal(" (%u MB reclaimed from bootloader)", 377 (u_int)(sfb_size / (1024 * 1024))); 378 aprint_normal("\n"); 379 } 380 381 sfb->obj = drm_gem_cma_create(ddev, size); 382 if (sfb->obj == NULL) { 383 DRM_ERROR("failed to allocate memory for framebuffer\n"); 384 return -ENOMEM; 385 } 386 387 fb->pitches[0] = pitch; 388 fb->offsets[0] = 0; 389 fb->width = width; 390 fb->height = height; 391 fb->pixel_format = DRM_FORMAT_XRGB8888; 392 drm_fb_get_bpp_depth(fb->pixel_format, &fb->depth, &fb->bits_per_pixel); 393 394 error = drm_framebuffer_init(ddev, fb, &sunxi_drm_framebuffer_funcs); 395 if (error != 0) { 396 DRM_ERROR("failed to initialize framebuffer\n"); 397 return error; 398 } 399 400 memset(&sfa, 0, sizeof(sfa)); 401 sfa.sfa_drm_dev = ddev; 402 sfa.sfa_fb_helper = helper; 403 sfa.sfa_fb_sizes = *sizes; 404 sfa.sfa_fb_bst = sc->sc_bst; 405 sfa.sfa_fb_dmat = sc->sc_dmat; 406 sfa.sfa_fb_linebytes = helper->fb->pitches[0]; 407 408 helper->fbdev = config_found_ia(ddev->dev, "sunxifbbus", &sfa, NULL); 409 if (helper->fbdev == NULL) { 410 DRM_ERROR("unable to attach framebuffer\n"); 411 return -ENXIO; 412 } 413 414 return 0; 415 } 416 417 static struct drm_fb_helper_funcs sunxi_drm_fb_helper_funcs = { 418 .fb_probe = sunxi_drm_fb_probe, 419 }; 420 421 static int 422 sunxi_drm_load(struct drm_device *ddev, unsigned long flags) 423 { 424 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 425 struct sunxi_drm_endpoint *sep; 426 struct sunxi_drm_fbdev *fbdev; 427 const u_int *data; 428 int datalen, error, num_crtc; 429 430 drm_mode_config_init(ddev); 431 ddev->mode_config.min_width = 0; 432 ddev->mode_config.min_height = 0; 433 ddev->mode_config.max_width = SUNXI_DRM_MAX_WIDTH; 434 ddev->mode_config.max_height = SUNXI_DRM_MAX_HEIGHT; 435 ddev->mode_config.funcs = &sunxi_drm_mode_config_funcs; 436 437 num_crtc = 0; 438 data = fdtbus_get_prop(sc->sc_phandle, "allwinner,pipelines", &datalen); 439 while (datalen >= 4) { 440 const int crtc_phandle = fdtbus_get_phandle_from_native(be32dec(data)); 441 442 TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries) 443 if (sep->phandle == crtc_phandle && sep->ddev == NULL) { 444 sep->ddev = ddev; 445 error = fdt_endpoint_activate_direct(sep->ep, true); 446 if (error != 0) { 447 aprint_error_dev(sc->sc_dev, "failed to activate endpoint: %d\n", 448 error); 449 } 450 if (fdt_endpoint_type(sep->ep) == EP_DRM_CRTC) 451 num_crtc++; 452 } 453 454 datalen -= 4; 455 data++; 456 } 457 458 if (num_crtc == 0) { 459 aprint_error_dev(sc->sc_dev, "no pipelines configured\n"); 460 error = ENXIO; 461 goto drmerr; 462 } 463 464 fbdev = kmem_zalloc(sizeof(*fbdev), KM_SLEEP); 465 466 drm_fb_helper_prepare(ddev, &fbdev->helper, &sunxi_drm_fb_helper_funcs); 467 468 error = drm_fb_helper_init(ddev, &fbdev->helper, num_crtc, num_crtc); 469 if (error) 470 goto allocerr; 471 472 fbdev->helper.fb = kmem_zalloc(sizeof(struct sunxi_drm_framebuffer), KM_SLEEP); 473 474 drm_fb_helper_single_add_all_connectors(&fbdev->helper); 475 476 drm_helper_disable_unused_functions(ddev); 477 478 drm_fb_helper_initial_config(&fbdev->helper, 32); 479 480 /* XXX */ 481 ddev->irq_enabled = true; 482 drm_vblank_init(ddev, num_crtc); 483 484 return 0; 485 486 allocerr: 487 kmem_free(fbdev, sizeof(*fbdev)); 488 drmerr: 489 drm_mode_config_cleanup(ddev); 490 491 return error; 492 } 493 494 static uint32_t 495 sunxi_drm_get_vblank_counter(struct drm_device *ddev, unsigned int crtc) 496 { 497 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 498 499 if (crtc >= __arraycount(sc->sc_vbl)) 500 return 0; 501 502 if (sc->sc_vbl[crtc].get_vblank_counter == NULL) 503 return 0; 504 505 return sc->sc_vbl[crtc].get_vblank_counter(sc->sc_vbl[crtc].priv); 506 } 507 508 static int 509 sunxi_drm_enable_vblank(struct drm_device *ddev, unsigned int crtc) 510 { 511 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 512 513 if (crtc >= __arraycount(sc->sc_vbl)) 514 return 0; 515 516 if (sc->sc_vbl[crtc].enable_vblank == NULL) 517 return 0; 518 519 sc->sc_vbl[crtc].enable_vblank(sc->sc_vbl[crtc].priv); 520 521 return 0; 522 } 523 524 static void 525 sunxi_drm_disable_vblank(struct drm_device *ddev, unsigned int crtc) 526 { 527 struct sunxi_drm_softc * const sc = sunxi_drm_private(ddev); 528 529 if (crtc >= __arraycount(sc->sc_vbl)) 530 return; 531 532 if (sc->sc_vbl[crtc].disable_vblank == NULL) 533 return; 534 535 sc->sc_vbl[crtc].disable_vblank(sc->sc_vbl[crtc].priv); 536 } 537 538 static int 539 sunxi_drm_unload(struct drm_device *ddev) 540 { 541 drm_mode_config_cleanup(ddev); 542 543 return 0; 544 } 545 546 int 547 sunxi_drm_register_endpoint(int phandle, struct fdt_endpoint *ep) 548 { 549 struct sunxi_drm_endpoint *sep; 550 551 sep = kmem_zalloc(sizeof(*sep), KM_SLEEP); 552 sep->phandle = phandle; 553 sep->ep = ep; 554 sep->ddev = NULL; 555 TAILQ_INSERT_TAIL(&sunxi_drm_endpoints, sep, entries); 556 557 return 0; 558 } 559 560 struct drm_device * 561 sunxi_drm_endpoint_device(struct fdt_endpoint *ep) 562 { 563 struct sunxi_drm_endpoint *sep; 564 565 TAILQ_FOREACH(sep, &sunxi_drm_endpoints, entries) 566 if (sep->ep == ep) 567 return sep->ddev; 568 569 return NULL; 570 } 571