1 /* 2 * Copyright © 2007 David Airlie 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * David Airlie 25 */ 26 27 #include <linux/async.h> 28 #include <linux/console.h> 29 #include <linux/delay.h> 30 #include <linux/errno.h> 31 #include <linux/init.h> 32 #include <linux/kernel.h> 33 #include <linux/mm.h> 34 #include <linux/module.h> 35 #include <linux/string.h> 36 #include <linux/sysrq.h> 37 #include <linux/tty.h> 38 #include <linux/vga_switcheroo.h> 39 40 #include <drm/drm_crtc.h> 41 #include <drm/drm_fb_helper.h> 42 #include <drm/drm_fourcc.h> 43 44 #include "gem/i915_gem_lmem.h" 45 46 #include "i915_drv.h" 47 #include "intel_display_types.h" 48 #include "intel_fbdev.h" 49 #include "intel_frontbuffer.h" 50 51 static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev) 52 { 53 return ifbdev->fb->frontbuffer; 54 } 55 56 static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev) 57 { 58 intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU); 59 } 60 61 static int intel_fbdev_set_par(struct fb_info *info) 62 { 63 struct drm_fb_helper *fb_helper = info->par; 64 struct intel_fbdev *ifbdev = 65 container_of(fb_helper, struct intel_fbdev, helper); 66 int ret; 67 68 ret = drm_fb_helper_set_par(info); 69 if (ret == 0) 70 intel_fbdev_invalidate(ifbdev); 71 72 return ret; 73 } 74 75 static int intel_fbdev_blank(int blank, struct fb_info *info) 76 { 77 struct drm_fb_helper *fb_helper = info->par; 78 struct intel_fbdev *ifbdev = 79 container_of(fb_helper, struct intel_fbdev, helper); 80 int ret; 81 82 ret = drm_fb_helper_blank(blank, info); 83 if (ret == 0) 84 intel_fbdev_invalidate(ifbdev); 85 86 return ret; 87 } 88 89 static int intel_fbdev_pan_display(struct fb_var_screeninfo *var, 90 struct fb_info *info) 91 { 92 struct drm_fb_helper *fb_helper = info->par; 93 struct intel_fbdev *ifbdev = 94 container_of(fb_helper, struct intel_fbdev, helper); 95 int ret; 96 97 ret = drm_fb_helper_pan_display(var, info); 98 if (ret == 0) 99 intel_fbdev_invalidate(ifbdev); 100 101 return ret; 102 } 103 104 static const struct fb_ops intelfb_ops = { 105 #ifdef notyet 106 .owner = THIS_MODULE, 107 DRM_FB_HELPER_DEFAULT_OPS, 108 #endif 109 .fb_set_par = intel_fbdev_set_par, 110 #ifdef notyet 111 .fb_fillrect = drm_fb_helper_cfb_fillrect, 112 .fb_copyarea = drm_fb_helper_cfb_copyarea, 113 .fb_imageblit = drm_fb_helper_cfb_imageblit, 114 .fb_pan_display = intel_fbdev_pan_display, 115 .fb_blank = intel_fbdev_blank, 116 #endif 117 }; 118 119 static int intelfb_alloc(struct drm_fb_helper *helper, 120 struct drm_fb_helper_surface_size *sizes) 121 { 122 struct intel_fbdev *ifbdev = 123 container_of(helper, struct intel_fbdev, helper); 124 struct drm_framebuffer *fb; 125 struct drm_device *dev = helper->dev; 126 struct drm_i915_private *dev_priv = to_i915(dev); 127 struct drm_mode_fb_cmd2 mode_cmd = {}; 128 struct drm_i915_gem_object *obj; 129 int size; 130 131 /* we don't do packed 24bpp */ 132 if (sizes->surface_bpp == 24) 133 sizes->surface_bpp = 32; 134 135 mode_cmd.width = sizes->surface_width; 136 mode_cmd.height = sizes->surface_height; 137 138 mode_cmd.pitches[0] = roundup2(mode_cmd.width * 139 DIV_ROUND_UP(sizes->surface_bpp, 8), 64); 140 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 141 sizes->surface_depth); 142 143 size = mode_cmd.pitches[0] * mode_cmd.height; 144 size = PAGE_ALIGN(size); 145 146 obj = ERR_PTR(-ENODEV); 147 if (HAS_LMEM(dev_priv)) { 148 obj = i915_gem_object_create_lmem(dev_priv, size, 149 I915_BO_ALLOC_CONTIGUOUS); 150 } else { 151 /* 152 * If the FB is too big, just don't use it since fbdev is not very 153 * important and we should probably use that space with FBC or other 154 * features. 155 */ 156 if (size * 2 < dev_priv->stolen_usable_size) 157 obj = i915_gem_object_create_stolen(dev_priv, size); 158 if (IS_ERR(obj)) 159 obj = i915_gem_object_create_shmem(dev_priv, size); 160 } 161 162 if (IS_ERR(obj)) { 163 drm_err(&dev_priv->drm, "failed to allocate framebuffer\n"); 164 return PTR_ERR(obj); 165 } 166 167 fb = intel_framebuffer_create(obj, &mode_cmd); 168 i915_gem_object_put(obj); 169 if (IS_ERR(fb)) 170 return PTR_ERR(fb); 171 172 ifbdev->fb = to_intel_framebuffer(fb); 173 return 0; 174 } 175 176 static int intelfb_create(struct drm_fb_helper *helper, 177 struct drm_fb_helper_surface_size *sizes) 178 { 179 struct intel_fbdev *ifbdev = 180 container_of(helper, struct intel_fbdev, helper); 181 struct intel_framebuffer *intel_fb = ifbdev->fb; 182 struct drm_device *dev = helper->dev; 183 struct drm_i915_private *dev_priv = to_i915(dev); 184 struct pci_dev *pdev = dev_priv->drm.pdev; 185 struct i915_ggtt *ggtt = &dev_priv->ggtt; 186 const struct i915_ggtt_view view = { 187 .type = I915_GGTT_VIEW_NORMAL, 188 }; 189 intel_wakeref_t wakeref; 190 struct fb_info *info; 191 struct i915_vma *vma; 192 unsigned long flags = 0; 193 bool prealloc = false; 194 void __iomem *vaddr; 195 struct drm_i915_gem_object *obj; 196 int ret; 197 198 if (intel_fb && 199 (sizes->fb_width > intel_fb->base.width || 200 sizes->fb_height > intel_fb->base.height)) { 201 drm_dbg_kms(&dev_priv->drm, 202 "BIOS fb too small (%dx%d), we require (%dx%d)," 203 " releasing it\n", 204 intel_fb->base.width, intel_fb->base.height, 205 sizes->fb_width, sizes->fb_height); 206 drm_framebuffer_put(&intel_fb->base); 207 intel_fb = ifbdev->fb = NULL; 208 } 209 if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) { 210 drm_dbg_kms(&dev_priv->drm, 211 "no BIOS fb, allocating a new one\n"); 212 ret = intelfb_alloc(helper, sizes); 213 if (ret) 214 return ret; 215 intel_fb = ifbdev->fb; 216 } else { 217 drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n"); 218 prealloc = true; 219 sizes->fb_width = intel_fb->base.width; 220 sizes->fb_height = intel_fb->base.height; 221 } 222 223 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 224 225 /* Pin the GGTT vma for our access via info->screen_base. 226 * This also validates that any existing fb inherited from the 227 * BIOS is suitable for own access. 228 */ 229 vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, false, 230 &view, false, &flags); 231 if (IS_ERR(vma)) { 232 ret = PTR_ERR(vma); 233 goto out_unlock; 234 } 235 236 intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB); 237 238 info = drm_fb_helper_alloc_fbi(helper); 239 if (IS_ERR(info)) { 240 drm_err(&dev_priv->drm, "Failed to allocate fb_info\n"); 241 ret = PTR_ERR(info); 242 goto out_unpin; 243 } 244 245 ifbdev->helper.fb = &ifbdev->fb->base; 246 247 info->fbops = &intelfb_ops; 248 249 #ifdef __linux__ 250 /* setup aperture base/size for vesafb takeover */ 251 obj = intel_fb_obj(&intel_fb->base); 252 if (i915_gem_object_is_lmem(obj)) { 253 struct intel_memory_region *mem = obj->mm.region; 254 255 info->apertures->ranges[0].base = mem->io_start; 256 info->apertures->ranges[0].size = mem->total; 257 258 /* Use fbdev's framebuffer from lmem for discrete */ 259 info->fix.smem_start = 260 (unsigned long)(mem->io_start + 261 i915_gem_object_get_dma_address(obj, 0)); 262 info->fix.smem_len = obj->base.size; 263 } else { 264 info->apertures->ranges[0].base = ggtt->gmadr.start; 265 info->apertures->ranges[0].size = ggtt->mappable_end; 266 267 /* Our framebuffer is the entirety of fbdev's system memory */ 268 info->fix.smem_start = 269 (unsigned long)(ggtt->gmadr.start + vma->node.start); 270 info->fix.smem_len = vma->node.size; 271 } 272 273 vaddr = i915_vma_pin_iomap(vma); 274 if (IS_ERR(vaddr)) { 275 drm_err(&dev_priv->drm, 276 "Failed to remap framebuffer into virtual memory\n"); 277 ret = PTR_ERR(vaddr); 278 goto out_unpin; 279 } 280 info->screen_base = vaddr; 281 info->screen_size = vma->node.size; 282 283 drm_fb_helper_fill_info(info, &ifbdev->helper, sizes); 284 285 /* If the object is shmemfs backed, it will have given us zeroed pages. 286 * If the object is stolen however, it will be full of whatever 287 * garbage was left in there. 288 */ 289 if (!i915_gem_object_is_shmem(vma->obj) && !prealloc) 290 memset_io(info->screen_base, 0, info->screen_size); 291 292 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 293 #else 294 { 295 struct drm_framebuffer *fb = ifbdev->helper.fb; 296 struct rasops_info *ri = &dev_priv->ro; 297 bus_space_handle_t bsh; 298 int err; 299 300 vaddr = i915_vma_pin_iomap(vma); 301 if (IS_ERR(vaddr)) { 302 DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); 303 ret = PTR_ERR(vaddr); 304 goto out_unpin; 305 } 306 307 drm_fb_helper_fill_info(info, &ifbdev->helper, sizes); 308 309 ri->ri_bits = vaddr; 310 ri->ri_depth = fb->format->cpp[0] * 8; 311 ri->ri_stride = fb->pitches[0]; 312 ri->ri_width = sizes->fb_width; 313 ri->ri_height = sizes->fb_height; 314 315 switch (fb->format->format) { 316 case DRM_FORMAT_XRGB8888: 317 ri->ri_rnum = 8; 318 ri->ri_rpos = 16; 319 ri->ri_gnum = 8; 320 ri->ri_gpos = 8; 321 ri->ri_bnum = 8; 322 ri->ri_bpos = 0; 323 break; 324 case DRM_FORMAT_RGB565: 325 ri->ri_rnum = 5; 326 ri->ri_rpos = 11; 327 ri->ri_gnum = 6; 328 ri->ri_gpos = 5; 329 ri->ri_bnum = 5; 330 ri->ri_bpos = 0; 331 break; 332 } 333 334 if (vma->obj->stolen && !prealloc) 335 memset(ri->ri_bits, 0, vma->node.size); 336 } 337 #endif 338 339 drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n", 340 ifbdev->fb->base.width, ifbdev->fb->base.height, 341 i915_ggtt_offset(vma)); 342 ifbdev->vma = vma; 343 ifbdev->vma_flags = flags; 344 345 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 346 vga_switcheroo_client_fb_set(pdev, info); 347 return 0; 348 349 out_unpin: 350 intel_unpin_fb_vma(vma, flags); 351 out_unlock: 352 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 353 return ret; 354 } 355 356 static const struct drm_fb_helper_funcs intel_fb_helper_funcs = { 357 .fb_probe = intelfb_create, 358 }; 359 360 static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) 361 { 362 /* We rely on the object-free to release the VMA pinning for 363 * the info->screen_base mmaping. Leaking the VMA is simpler than 364 * trying to rectify all the possible error paths leading here. 365 */ 366 367 drm_fb_helper_fini(&ifbdev->helper); 368 369 if (ifbdev->vma) 370 intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags); 371 372 if (ifbdev->fb) 373 drm_framebuffer_remove(&ifbdev->fb->base); 374 375 kfree(ifbdev); 376 } 377 378 /* 379 * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible. 380 * The core display code will have read out the current plane configuration, 381 * so we use that to figure out if there's an object for us to use as the 382 * fb, and if so, we re-use it for the fbdev configuration. 383 * 384 * Note we only support a single fb shared across pipes for boot (mostly for 385 * fbcon), so we just find the biggest and use that. 386 */ 387 static bool intel_fbdev_init_bios(struct drm_device *dev, 388 struct intel_fbdev *ifbdev) 389 { 390 struct drm_i915_private *i915 = to_i915(dev); 391 struct intel_framebuffer *fb = NULL; 392 struct intel_crtc *crtc; 393 unsigned int max_size = 0; 394 395 /* Find the largest fb */ 396 for_each_intel_crtc(dev, crtc) { 397 struct intel_crtc_state *crtc_state = 398 to_intel_crtc_state(crtc->base.state); 399 struct intel_plane *plane = 400 to_intel_plane(crtc->base.primary); 401 struct intel_plane_state *plane_state = 402 to_intel_plane_state(plane->base.state); 403 struct drm_i915_gem_object *obj = 404 intel_fb_obj(plane_state->uapi.fb); 405 406 if (!crtc_state->uapi.active) { 407 drm_dbg_kms(&i915->drm, 408 "[CRTC:%d:%s] not active, skipping\n", 409 crtc->base.base.id, crtc->base.name); 410 continue; 411 } 412 413 if (!obj) { 414 drm_dbg_kms(&i915->drm, 415 "[PLANE:%d:%s] no fb, skipping\n", 416 plane->base.base.id, plane->base.name); 417 continue; 418 } 419 420 if (obj->base.size > max_size) { 421 drm_dbg_kms(&i915->drm, 422 "found possible fb from [PLANE:%d:%s]\n", 423 plane->base.base.id, plane->base.name); 424 fb = to_intel_framebuffer(plane_state->uapi.fb); 425 max_size = obj->base.size; 426 } 427 } 428 429 if (!fb) { 430 drm_dbg_kms(&i915->drm, 431 "no active fbs found, not using BIOS config\n"); 432 goto out; 433 } 434 435 /* Now make sure all the pipes will fit into it */ 436 for_each_intel_crtc(dev, crtc) { 437 struct intel_crtc_state *crtc_state = 438 to_intel_crtc_state(crtc->base.state); 439 struct intel_plane *plane = 440 to_intel_plane(crtc->base.primary); 441 unsigned int cur_size; 442 443 if (!crtc_state->uapi.active) { 444 drm_dbg_kms(&i915->drm, 445 "[CRTC:%d:%s] not active, skipping\n", 446 crtc->base.base.id, crtc->base.name); 447 continue; 448 } 449 450 drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n", 451 plane->base.base.id, plane->base.name); 452 453 /* 454 * See if the plane fb we found above will fit on this 455 * pipe. Note we need to use the selected fb's pitch and bpp 456 * rather than the current pipe's, since they differ. 457 */ 458 cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay; 459 cur_size = cur_size * fb->base.format->cpp[0]; 460 if (fb->base.pitches[0] < cur_size) { 461 drm_dbg_kms(&i915->drm, 462 "fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n", 463 plane->base.base.id, plane->base.name, 464 cur_size, fb->base.pitches[0]); 465 fb = NULL; 466 break; 467 } 468 469 cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay; 470 cur_size = intel_fb_align_height(&fb->base, 0, cur_size); 471 cur_size *= fb->base.pitches[0]; 472 drm_dbg_kms(&i915->drm, 473 "[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n", 474 crtc->base.base.id, crtc->base.name, 475 crtc_state->uapi.adjusted_mode.crtc_hdisplay, 476 crtc_state->uapi.adjusted_mode.crtc_vdisplay, 477 fb->base.format->cpp[0] * 8, 478 cur_size); 479 480 if (cur_size > max_size) { 481 drm_dbg_kms(&i915->drm, 482 "fb not big enough for [PLANE:%d:%s] (%d vs %d)\n", 483 plane->base.base.id, plane->base.name, 484 cur_size, max_size); 485 fb = NULL; 486 break; 487 } 488 489 drm_dbg_kms(&i915->drm, 490 "fb big enough [PLANE:%d:%s] (%d >= %d)\n", 491 plane->base.base.id, plane->base.name, 492 max_size, cur_size); 493 } 494 495 if (!fb) { 496 drm_dbg_kms(&i915->drm, 497 "BIOS fb not suitable for all pipes, not using\n"); 498 goto out; 499 } 500 501 ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8; 502 ifbdev->fb = fb; 503 504 drm_framebuffer_get(&ifbdev->fb->base); 505 506 /* Final pass to check if any active pipes don't have fbs */ 507 for_each_intel_crtc(dev, crtc) { 508 struct intel_crtc_state *crtc_state = 509 to_intel_crtc_state(crtc->base.state); 510 struct intel_plane *plane = 511 to_intel_plane(crtc->base.primary); 512 struct intel_plane_state *plane_state = 513 to_intel_plane_state(plane->base.state); 514 515 if (!crtc_state->uapi.active) 516 continue; 517 518 drm_WARN(dev, !plane_state->uapi.fb, 519 "re-used BIOS config but lost an fb on [PLANE:%d:%s]\n", 520 plane->base.base.id, plane->base.name); 521 } 522 523 524 drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n"); 525 return true; 526 527 out: 528 529 return false; 530 } 531 532 static void intel_fbdev_suspend_worker(struct work_struct *work) 533 { 534 intel_fbdev_set_suspend(&container_of(work, 535 struct drm_i915_private, 536 fbdev_suspend_work)->drm, 537 FBINFO_STATE_RUNNING, 538 true); 539 } 540 541 int intel_fbdev_init(struct drm_device *dev) 542 { 543 struct drm_i915_private *dev_priv = to_i915(dev); 544 struct intel_fbdev *ifbdev; 545 int ret; 546 547 if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv))) 548 return -ENODEV; 549 550 ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); 551 if (ifbdev == NULL) 552 return -ENOMEM; 553 554 rw_init(&ifbdev->hpd_lock, "hdplk"); 555 drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); 556 557 if (!intel_fbdev_init_bios(dev, ifbdev)) 558 ifbdev->preferred_bpp = 32; 559 560 ret = drm_fb_helper_init(dev, &ifbdev->helper); 561 if (ret) { 562 kfree(ifbdev); 563 return ret; 564 } 565 566 dev_priv->fbdev = ifbdev; 567 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); 568 569 return 0; 570 } 571 572 static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) 573 { 574 struct intel_fbdev *ifbdev = data; 575 576 /* Due to peculiar init order wrt to hpd handling this is separate. */ 577 if (drm_fb_helper_initial_config(&ifbdev->helper, 578 ifbdev->preferred_bpp)) 579 intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); 580 } 581 582 void intel_fbdev_initial_config_async(struct drm_device *dev) 583 { 584 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 585 586 if (!ifbdev) 587 return; 588 589 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); 590 } 591 592 static void intel_fbdev_sync(struct intel_fbdev *ifbdev) 593 { 594 #ifdef __linux__ 595 if (!ifbdev->cookie) 596 return; 597 598 /* Only serialises with all preceding async calls, hence +1 */ 599 async_synchronize_cookie(ifbdev->cookie + 1); 600 ifbdev->cookie = 0; 601 #endif 602 } 603 604 void intel_fbdev_unregister(struct drm_i915_private *dev_priv) 605 { 606 struct intel_fbdev *ifbdev = dev_priv->fbdev; 607 608 if (!ifbdev) 609 return; 610 611 cancel_work_sync(&dev_priv->fbdev_suspend_work); 612 #ifdef __linux__ 613 if (!current_is_async()) 614 intel_fbdev_sync(ifbdev); 615 #endif 616 617 drm_fb_helper_unregister_fbi(&ifbdev->helper); 618 } 619 620 void intel_fbdev_fini(struct drm_i915_private *dev_priv) 621 { 622 struct intel_fbdev *ifbdev = fetch_and_zero(&dev_priv->fbdev); 623 624 if (!ifbdev) 625 return; 626 627 intel_fbdev_destroy(ifbdev); 628 } 629 630 /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD 631 * processing, fbdev will perform a full connector reprobe if a hotplug event 632 * was received while HPD was suspended. 633 */ 634 static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state) 635 { 636 struct intel_fbdev *ifbdev = i915->fbdev; 637 bool send_hpd = false; 638 639 mutex_lock(&ifbdev->hpd_lock); 640 ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; 641 send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; 642 ifbdev->hpd_waiting = false; 643 mutex_unlock(&ifbdev->hpd_lock); 644 645 if (send_hpd) { 646 drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n"); 647 drm_fb_helper_hotplug_event(&ifbdev->helper); 648 } 649 } 650 651 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) 652 { 653 #ifdef __linux__ 654 struct drm_i915_private *dev_priv = to_i915(dev); 655 struct intel_fbdev *ifbdev = dev_priv->fbdev; 656 struct fb_info *info; 657 658 if (!ifbdev || !ifbdev->vma) 659 return; 660 661 info = ifbdev->helper.fbdev; 662 663 if (synchronous) { 664 /* Flush any pending work to turn the console on, and then 665 * wait to turn it off. It must be synchronous as we are 666 * about to suspend or unload the driver. 667 * 668 * Note that from within the work-handler, we cannot flush 669 * ourselves, so only flush outstanding work upon suspend! 670 */ 671 if (state != FBINFO_STATE_RUNNING) 672 flush_work(&dev_priv->fbdev_suspend_work); 673 674 console_lock(); 675 } else { 676 /* 677 * The console lock can be pretty contented on resume due 678 * to all the printk activity. Try to keep it out of the hot 679 * path of resume if possible. 680 */ 681 drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING); 682 if (!console_trylock()) { 683 /* Don't block our own workqueue as this can 684 * be run in parallel with other i915.ko tasks. 685 */ 686 schedule_work(&dev_priv->fbdev_suspend_work); 687 return; 688 } 689 } 690 691 /* On resume from hibernation: If the object is shmemfs backed, it has 692 * been restored from swap. If the object is stolen however, it will be 693 * full of whatever garbage was left in there. 694 */ 695 if (state == FBINFO_STATE_RUNNING && 696 !i915_gem_object_is_shmem(intel_fb_obj(&ifbdev->fb->base))) 697 memset_io(info->screen_base, 0, info->screen_size); 698 699 drm_fb_helper_set_suspend(&ifbdev->helper, state); 700 console_unlock(); 701 702 intel_fbdev_hpd_set_suspend(dev_priv, state); 703 #endif 704 } 705 706 void intel_fbdev_output_poll_changed(struct drm_device *dev) 707 { 708 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 709 bool send_hpd; 710 711 if (!ifbdev) 712 return; 713 714 intel_fbdev_sync(ifbdev); 715 716 mutex_lock(&ifbdev->hpd_lock); 717 send_hpd = !ifbdev->hpd_suspended; 718 ifbdev->hpd_waiting = true; 719 mutex_unlock(&ifbdev->hpd_lock); 720 721 if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) 722 drm_fb_helper_hotplug_event(&ifbdev->helper); 723 } 724 725 void intel_fbdev_restore_mode(struct drm_device *dev) 726 { 727 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 728 729 if (!ifbdev) 730 return; 731 732 intel_fbdev_sync(ifbdev); 733 if (!ifbdev->vma) 734 return; 735 736 if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0) 737 intel_fbdev_invalidate(ifbdev); 738 } 739