1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/dma-fence-array.h> 29 #include <linux/kthread.h> 30 #include <linux/dma-resv.h> 31 #include <linux/shmem_fs.h> 32 #include <linux/slab.h> 33 #include <linux/stop_machine.h> 34 #include <linux/swap.h> 35 #include <linux/pci.h> 36 #include <linux/dma-buf.h> 37 #include <linux/mman.h> 38 39 #include <drm/drm_cache.h> 40 #include <drm/drm_vma_manager.h> 41 42 #include <dev/pci/agpvar.h> 43 44 #include "display/intel_display.h" 45 #include "display/intel_frontbuffer.h" 46 47 #include "gem/i915_gem_clflush.h" 48 #include "gem/i915_gem_context.h" 49 #include "gem/i915_gem_ioctls.h" 50 #include "gem/i915_gem_mman.h" 51 #include "gem/i915_gem_pm.h" 52 #include "gem/i915_gem_region.h" 53 #include "gem/i915_gem_userptr.h" 54 #include "gt/intel_engine_user.h" 55 #include "gt/intel_gt.h" 56 #include "gt/intel_gt_pm.h" 57 #include "gt/intel_workarounds.h" 58 59 #include "i915_drv.h" 60 #include "i915_file_private.h" 61 #include "i915_trace.h" 62 #include "i915_vgpu.h" 63 #include "intel_pm.h" 64 65 static int 66 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size) 67 { 68 int err; 69 70 err = mutex_lock_interruptible(&ggtt->vm.mutex); 71 if (err) 72 return err; 73 74 memset(node, 0, sizeof(*node)); 75 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node, 76 size, 0, I915_COLOR_UNEVICTABLE, 77 0, ggtt->mappable_end, 78 DRM_MM_INSERT_LOW); 79 80 mutex_unlock(&ggtt->vm.mutex); 81 82 return err; 83 } 84 85 static void 86 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node) 87 { 88 mutex_lock(&ggtt->vm.mutex); 89 drm_mm_remove_node(node); 90 mutex_unlock(&ggtt->vm.mutex); 91 } 92 93 int 94 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 95 struct drm_file *file) 96 { 97 struct drm_i915_private *i915 = to_i915(dev); 98 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 99 struct drm_i915_gem_get_aperture *args = data; 100 struct i915_vma *vma; 101 u64 pinned; 102 103 if (mutex_lock_interruptible(&ggtt->vm.mutex)) 104 return -EINTR; 105 106 pinned = ggtt->vm.reserved; 107 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) 108 if (i915_vma_is_pinned(vma)) 109 pinned += vma->node.size; 110 111 mutex_unlock(&ggtt->vm.mutex); 112 113 args->aper_size = ggtt->vm.total; 114 args->aper_available_size = args->aper_size - pinned; 115 116 return 0; 117 } 118 119 int i915_gem_object_unbind(struct drm_i915_gem_object *obj, 120 unsigned long flags) 121 { 122 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm; 123 bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK); 124 DRM_LIST_HEAD(still_in_list); 125 intel_wakeref_t wakeref; 126 struct i915_vma *vma; 127 int ret; 128 129 assert_object_held(obj); 130 131 if (list_empty(&obj->vma.list)) 132 return 0; 133 134 /* 135 * As some machines use ACPI to handle runtime-resume callbacks, and 136 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex 137 * as they are required by the shrinker. Ergo, we wake the device up 138 * first just in case. 139 */ 140 wakeref = intel_runtime_pm_get(rpm); 141 142 try_again: 143 ret = 0; 144 spin_lock(&obj->vma.lock); 145 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, 146 struct i915_vma, 147 obj_link))) { 148 list_move_tail(&vma->obj_link, &still_in_list); 149 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) 150 continue; 151 152 if (flags & I915_GEM_OBJECT_UNBIND_TEST) { 153 ret = -EBUSY; 154 break; 155 } 156 157 /* 158 * Requiring the vm destructor to take the object lock 159 * before destroying a vma would help us eliminate the 160 * i915_vm_tryget() here, AND thus also the barrier stuff 161 * at the end. That's an easy fix, but sleeping locks in 162 * a kthread should generally be avoided. 163 */ 164 ret = -EAGAIN; 165 if (!i915_vm_tryget(vma->vm)) 166 break; 167 168 spin_unlock(&obj->vma.lock); 169 170 /* 171 * Since i915_vma_parked() takes the object lock 172 * before vma destruction, it won't race us here, 173 * and destroy the vma from under us. 174 */ 175 176 ret = -EBUSY; 177 if (flags & I915_GEM_OBJECT_UNBIND_ASYNC) { 178 assert_object_held(vma->obj); 179 ret = i915_vma_unbind_async(vma, vm_trylock); 180 } 181 182 if (ret == -EBUSY && (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || 183 !i915_vma_is_active(vma))) { 184 if (vm_trylock) { 185 if (mutex_trylock(&vma->vm->mutex)) { 186 ret = __i915_vma_unbind(vma); 187 mutex_unlock(&vma->vm->mutex); 188 } 189 } else { 190 ret = i915_vma_unbind(vma); 191 } 192 } 193 194 i915_vm_put(vma->vm); 195 spin_lock(&obj->vma.lock); 196 } 197 list_splice_init(&still_in_list, &obj->vma.list); 198 spin_unlock(&obj->vma.lock); 199 200 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) { 201 rcu_barrier(); /* flush the i915_vm_release() */ 202 goto try_again; 203 } 204 205 intel_runtime_pm_put(rpm, wakeref); 206 207 return ret; 208 } 209 210 static int 211 shmem_pread(struct vm_page *page, int offset, int len, char __user *user_data, 212 bool needs_clflush) 213 { 214 char *vaddr; 215 int ret; 216 217 vaddr = kmap(page); 218 219 if (needs_clflush) 220 drm_clflush_virt_range(vaddr + offset, len); 221 222 ret = __copy_to_user(user_data, vaddr + offset, len); 223 224 kunmap_va(vaddr); 225 226 return ret ? -EFAULT : 0; 227 } 228 229 static int 230 i915_gem_shmem_pread(struct drm_i915_gem_object *obj, 231 struct drm_i915_gem_pread *args) 232 { 233 unsigned int needs_clflush; 234 unsigned int idx, offset; 235 char __user *user_data; 236 u64 remain; 237 int ret; 238 239 ret = i915_gem_object_lock_interruptible(obj, NULL); 240 if (ret) 241 return ret; 242 243 ret = i915_gem_object_pin_pages(obj); 244 if (ret) 245 goto err_unlock; 246 247 ret = i915_gem_object_prepare_read(obj, &needs_clflush); 248 if (ret) 249 goto err_unpin; 250 251 i915_gem_object_finish_access(obj); 252 i915_gem_object_unlock(obj); 253 254 remain = args->size; 255 user_data = u64_to_user_ptr(args->data_ptr); 256 offset = offset_in_page(args->offset); 257 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 258 struct vm_page *page = i915_gem_object_get_page(obj, idx); 259 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); 260 261 ret = shmem_pread(page, offset, length, user_data, 262 needs_clflush); 263 if (ret) 264 break; 265 266 remain -= length; 267 user_data += length; 268 offset = 0; 269 } 270 271 i915_gem_object_unpin_pages(obj); 272 return ret; 273 274 err_unpin: 275 i915_gem_object_unpin_pages(obj); 276 err_unlock: 277 i915_gem_object_unlock(obj); 278 return ret; 279 } 280 281 #ifdef __linux__ 282 static inline bool 283 gtt_user_read(struct io_mapping *mapping, 284 loff_t base, int offset, 285 char __user *user_data, int length) 286 { 287 void __iomem *vaddr; 288 unsigned long unwritten; 289 290 /* We can use the cpu mem copy function because this is X86. */ 291 vaddr = io_mapping_map_atomic_wc(mapping, base); 292 unwritten = __copy_to_user_inatomic(user_data, 293 (void __force *)vaddr + offset, 294 length); 295 io_mapping_unmap_atomic(vaddr); 296 if (unwritten) { 297 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); 298 unwritten = copy_to_user(user_data, 299 (void __force *)vaddr + offset, 300 length); 301 io_mapping_unmap(vaddr); 302 } 303 return unwritten; 304 } 305 #else 306 static inline bool 307 gtt_user_read(struct drm_i915_private *dev_priv, 308 loff_t base, int offset, 309 char __user *user_data, int length) 310 { 311 bus_space_handle_t bsh; 312 void __iomem *vaddr; 313 unsigned long unwritten; 314 315 /* We can use the cpu mem copy function because this is X86. */ 316 agp_map_atomic(dev_priv->agph, base, &bsh); 317 vaddr = bus_space_vaddr(dev_priv->bst, bsh); 318 unwritten = __copy_to_user_inatomic(user_data, 319 (void __force *)vaddr + offset, 320 length); 321 agp_unmap_atomic(dev_priv->agph, bsh); 322 if (unwritten) { 323 agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh); 324 vaddr = bus_space_vaddr(dev_priv->bst, bsh); 325 unwritten = copy_to_user(user_data, 326 (void __force *)vaddr + offset, 327 length); 328 agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE); 329 } 330 return unwritten; 331 } 332 #endif 333 334 static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj, 335 struct drm_mm_node *node, 336 bool write) 337 { 338 struct drm_i915_private *i915 = to_i915(obj->base.dev); 339 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 340 struct i915_vma *vma; 341 struct i915_gem_ww_ctx ww; 342 int ret; 343 344 i915_gem_ww_ctx_init(&ww, true); 345 retry: 346 vma = ERR_PTR(-ENODEV); 347 ret = i915_gem_object_lock(obj, &ww); 348 if (ret) 349 goto err_ww; 350 351 ret = i915_gem_object_set_to_gtt_domain(obj, write); 352 if (ret) 353 goto err_ww; 354 355 if (!i915_gem_object_is_tiled(obj)) 356 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, 357 PIN_MAPPABLE | 358 PIN_NONBLOCK /* NOWARN */ | 359 PIN_NOEVICT); 360 if (vma == ERR_PTR(-EDEADLK)) { 361 ret = -EDEADLK; 362 goto err_ww; 363 } else if (!IS_ERR(vma)) { 364 node->start = i915_ggtt_offset(vma); 365 node->flags = 0; 366 } else { 367 ret = insert_mappable_node(ggtt, node, PAGE_SIZE); 368 if (ret) 369 goto err_ww; 370 GEM_BUG_ON(!drm_mm_node_allocated(node)); 371 vma = NULL; 372 } 373 374 ret = i915_gem_object_pin_pages(obj); 375 if (ret) { 376 if (drm_mm_node_allocated(node)) { 377 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); 378 remove_mappable_node(ggtt, node); 379 } else { 380 i915_vma_unpin(vma); 381 } 382 } 383 384 err_ww: 385 if (ret == -EDEADLK) { 386 ret = i915_gem_ww_ctx_backoff(&ww); 387 if (!ret) 388 goto retry; 389 } 390 i915_gem_ww_ctx_fini(&ww); 391 392 return ret ? ERR_PTR(ret) : vma; 393 } 394 395 static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj, 396 struct drm_mm_node *node, 397 struct i915_vma *vma) 398 { 399 struct drm_i915_private *i915 = to_i915(obj->base.dev); 400 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 401 402 i915_gem_object_unpin_pages(obj); 403 if (drm_mm_node_allocated(node)) { 404 ggtt->vm.clear_range(&ggtt->vm, node->start, node->size); 405 remove_mappable_node(ggtt, node); 406 } else { 407 i915_vma_unpin(vma); 408 } 409 } 410 411 static int 412 i915_gem_gtt_pread(struct drm_i915_gem_object *obj, 413 const struct drm_i915_gem_pread *args) 414 { 415 struct drm_i915_private *i915 = to_i915(obj->base.dev); 416 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 417 intel_wakeref_t wakeref; 418 struct drm_mm_node node; 419 void __user *user_data; 420 struct i915_vma *vma; 421 u64 remain, offset; 422 int ret = 0; 423 424 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 425 426 vma = i915_gem_gtt_prepare(obj, &node, false); 427 if (IS_ERR(vma)) { 428 ret = PTR_ERR(vma); 429 goto out_rpm; 430 } 431 432 user_data = u64_to_user_ptr(args->data_ptr); 433 remain = args->size; 434 offset = args->offset; 435 436 while (remain > 0) { 437 /* Operation in this page 438 * 439 * page_base = page offset within aperture 440 * page_offset = offset within page 441 * page_length = bytes to copy for this page 442 */ 443 u32 page_base = node.start; 444 unsigned page_offset = offset_in_page(offset); 445 unsigned page_length = PAGE_SIZE - page_offset; 446 page_length = remain < page_length ? remain : page_length; 447 if (drm_mm_node_allocated(&node)) { 448 ggtt->vm.insert_page(&ggtt->vm, 449 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 450 node.start, I915_CACHE_NONE, 0); 451 } else { 452 page_base += offset & LINUX_PAGE_MASK; 453 } 454 455 if (gtt_user_read(i915, page_base, page_offset, 456 user_data, page_length)) { 457 ret = -EFAULT; 458 break; 459 } 460 461 remain -= page_length; 462 user_data += page_length; 463 offset += page_length; 464 } 465 466 i915_gem_gtt_cleanup(obj, &node, vma); 467 out_rpm: 468 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 469 return ret; 470 } 471 472 /** 473 * Reads data from the object referenced by handle. 474 * @dev: drm device pointer 475 * @data: ioctl data blob 476 * @file: drm file pointer 477 * 478 * On error, the contents of *data are undefined. 479 */ 480 int 481 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 482 struct drm_file *file) 483 { 484 struct drm_i915_private *i915 = to_i915(dev); 485 struct drm_i915_gem_pread *args = data; 486 struct drm_i915_gem_object *obj; 487 int ret; 488 489 /* PREAD is disallowed for all platforms after TGL-LP. This also 490 * covers all platforms with local memory. 491 */ 492 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915)) 493 return -EOPNOTSUPP; 494 495 if (args->size == 0) 496 return 0; 497 498 if (!access_ok(u64_to_user_ptr(args->data_ptr), 499 args->size)) 500 return -EFAULT; 501 502 obj = i915_gem_object_lookup(file, args->handle); 503 if (!obj) 504 return -ENOENT; 505 506 /* Bounds check source. */ 507 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 508 ret = -EINVAL; 509 goto out; 510 } 511 512 trace_i915_gem_object_pread(obj, args->offset, args->size); 513 ret = -ENODEV; 514 if (obj->ops->pread) 515 ret = obj->ops->pread(obj, args); 516 if (ret != -ENODEV) 517 goto out; 518 519 ret = i915_gem_object_wait(obj, 520 I915_WAIT_INTERRUPTIBLE, 521 MAX_SCHEDULE_TIMEOUT); 522 if (ret) 523 goto out; 524 525 ret = i915_gem_shmem_pread(obj, args); 526 if (ret == -EFAULT || ret == -ENODEV) 527 ret = i915_gem_gtt_pread(obj, args); 528 529 out: 530 i915_gem_object_put(obj); 531 return ret; 532 } 533 534 /* This is the fast write path which cannot handle 535 * page faults in the source data 536 */ 537 #ifdef __linux__ 538 static inline bool 539 ggtt_write(struct io_mapping *mapping, 540 loff_t base, int offset, 541 char __user *user_data, int length) 542 { 543 void __iomem *vaddr; 544 unsigned long unwritten; 545 546 /* We can use the cpu mem copy function because this is X86. */ 547 vaddr = io_mapping_map_atomic_wc(mapping, base); 548 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, 549 user_data, length); 550 io_mapping_unmap_atomic(vaddr); 551 if (unwritten) { 552 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); 553 unwritten = copy_from_user((void __force *)vaddr + offset, 554 user_data, length); 555 io_mapping_unmap(vaddr); 556 } 557 558 return unwritten; 559 } 560 #else 561 static inline bool 562 ggtt_write(struct drm_i915_private *dev_priv, 563 loff_t base, int offset, 564 char __user *user_data, int length) 565 { 566 bus_space_handle_t bsh; 567 void __iomem *vaddr; 568 unsigned long unwritten; 569 570 /* We can use the cpu mem copy function because this is X86. */ 571 agp_map_atomic(dev_priv->agph, base, &bsh); 572 vaddr = bus_space_vaddr(dev_priv->bst, bsh); 573 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, 574 user_data, length); 575 agp_unmap_atomic(dev_priv->agph, bsh); 576 if (unwritten) { 577 agp_map_subregion(dev_priv->agph, base, PAGE_SIZE, &bsh); 578 vaddr = bus_space_vaddr(dev_priv->bst, bsh); 579 unwritten = copy_from_user((void __force *)vaddr + offset, 580 user_data, length); 581 agp_unmap_subregion(dev_priv->agph, bsh, PAGE_SIZE); 582 } 583 584 return unwritten; 585 } 586 #endif 587 588 /** 589 * This is the fast pwrite path, where we copy the data directly from the 590 * user into the GTT, uncached. 591 * @obj: i915 GEM object 592 * @args: pwrite arguments structure 593 */ 594 static int 595 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, 596 const struct drm_i915_gem_pwrite *args) 597 { 598 struct drm_i915_private *i915 = to_i915(obj->base.dev); 599 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 600 struct intel_runtime_pm *rpm = &i915->runtime_pm; 601 intel_wakeref_t wakeref; 602 struct drm_mm_node node; 603 struct i915_vma *vma; 604 u64 remain, offset; 605 void __user *user_data; 606 int ret = 0; 607 608 if (i915_gem_object_has_struct_page(obj)) { 609 /* 610 * Avoid waking the device up if we can fallback, as 611 * waking/resuming is very slow (worst-case 10-100 ms 612 * depending on PCI sleeps and our own resume time). 613 * This easily dwarfs any performance advantage from 614 * using the cache bypass of indirect GGTT access. 615 */ 616 wakeref = intel_runtime_pm_get_if_in_use(rpm); 617 if (!wakeref) 618 return -EFAULT; 619 } else { 620 /* No backing pages, no fallback, we must force GGTT access */ 621 wakeref = intel_runtime_pm_get(rpm); 622 } 623 624 vma = i915_gem_gtt_prepare(obj, &node, true); 625 if (IS_ERR(vma)) { 626 ret = PTR_ERR(vma); 627 goto out_rpm; 628 } 629 630 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU); 631 632 user_data = u64_to_user_ptr(args->data_ptr); 633 offset = args->offset; 634 remain = args->size; 635 while (remain) { 636 /* Operation in this page 637 * 638 * page_base = page offset within aperture 639 * page_offset = offset within page 640 * page_length = bytes to copy for this page 641 */ 642 u32 page_base = node.start; 643 unsigned int page_offset = offset_in_page(offset); 644 unsigned int page_length = PAGE_SIZE - page_offset; 645 page_length = remain < page_length ? remain : page_length; 646 if (drm_mm_node_allocated(&node)) { 647 /* flush the write before we modify the GGTT */ 648 intel_gt_flush_ggtt_writes(ggtt->vm.gt); 649 ggtt->vm.insert_page(&ggtt->vm, 650 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 651 node.start, I915_CACHE_NONE, 0); 652 wmb(); /* flush modifications to the GGTT (insert_page) */ 653 } else { 654 page_base += offset & LINUX_PAGE_MASK; 655 } 656 /* If we get a fault while copying data, then (presumably) our 657 * source page isn't available. Return the error and we'll 658 * retry in the slow path. 659 * If the object is non-shmem backed, we retry again with the 660 * path that handles page fault. 661 */ 662 if (ggtt_write(i915, page_base, page_offset, 663 user_data, page_length)) { 664 ret = -EFAULT; 665 break; 666 } 667 668 remain -= page_length; 669 user_data += page_length; 670 offset += page_length; 671 } 672 673 intel_gt_flush_ggtt_writes(ggtt->vm.gt); 674 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); 675 676 i915_gem_gtt_cleanup(obj, &node, vma); 677 out_rpm: 678 intel_runtime_pm_put(rpm, wakeref); 679 return ret; 680 } 681 682 /* Per-page copy function for the shmem pwrite fastpath. 683 * Flushes invalid cachelines before writing to the target if 684 * needs_clflush_before is set and flushes out any written cachelines after 685 * writing if needs_clflush is set. 686 */ 687 static int 688 shmem_pwrite(struct vm_page *page, int offset, int len, char __user *user_data, 689 bool needs_clflush_before, 690 bool needs_clflush_after) 691 { 692 char *vaddr; 693 int ret; 694 695 vaddr = kmap(page); 696 697 if (needs_clflush_before) 698 drm_clflush_virt_range(vaddr + offset, len); 699 700 ret = __copy_from_user(vaddr + offset, user_data, len); 701 if (!ret && needs_clflush_after) 702 drm_clflush_virt_range(vaddr + offset, len); 703 704 kunmap_va(vaddr); 705 706 return ret ? -EFAULT : 0; 707 } 708 709 static int 710 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, 711 const struct drm_i915_gem_pwrite *args) 712 { 713 unsigned int partial_cacheline_write; 714 unsigned int needs_clflush; 715 unsigned int offset, idx; 716 void __user *user_data; 717 u64 remain; 718 int ret; 719 720 ret = i915_gem_object_lock_interruptible(obj, NULL); 721 if (ret) 722 return ret; 723 724 ret = i915_gem_object_pin_pages(obj); 725 if (ret) 726 goto err_unlock; 727 728 ret = i915_gem_object_prepare_write(obj, &needs_clflush); 729 if (ret) 730 goto err_unpin; 731 732 i915_gem_object_finish_access(obj); 733 i915_gem_object_unlock(obj); 734 735 /* If we don't overwrite a cacheline completely we need to be 736 * careful to have up-to-date data by first clflushing. Don't 737 * overcomplicate things and flush the entire patch. 738 */ 739 partial_cacheline_write = 0; 740 if (needs_clflush & CLFLUSH_BEFORE) 741 partial_cacheline_write = curcpu()->ci_cflushsz - 1; 742 743 user_data = u64_to_user_ptr(args->data_ptr); 744 remain = args->size; 745 offset = offset_in_page(args->offset); 746 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 747 struct vm_page *page = i915_gem_object_get_page(obj, idx); 748 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset); 749 750 ret = shmem_pwrite(page, offset, length, user_data, 751 (offset | length) & partial_cacheline_write, 752 needs_clflush & CLFLUSH_AFTER); 753 if (ret) 754 break; 755 756 remain -= length; 757 user_data += length; 758 offset = 0; 759 } 760 761 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU); 762 763 i915_gem_object_unpin_pages(obj); 764 return ret; 765 766 err_unpin: 767 i915_gem_object_unpin_pages(obj); 768 err_unlock: 769 i915_gem_object_unlock(obj); 770 return ret; 771 } 772 773 /** 774 * Writes data to the object referenced by handle. 775 * @dev: drm device 776 * @data: ioctl data blob 777 * @file: drm file 778 * 779 * On error, the contents of the buffer that were to be modified are undefined. 780 */ 781 int 782 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 783 struct drm_file *file) 784 { 785 struct drm_i915_private *i915 = to_i915(dev); 786 struct drm_i915_gem_pwrite *args = data; 787 struct drm_i915_gem_object *obj; 788 int ret; 789 790 /* PWRITE is disallowed for all platforms after TGL-LP. This also 791 * covers all platforms with local memory. 792 */ 793 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915)) 794 return -EOPNOTSUPP; 795 796 if (args->size == 0) 797 return 0; 798 799 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size)) 800 return -EFAULT; 801 802 obj = i915_gem_object_lookup(file, args->handle); 803 if (!obj) 804 return -ENOENT; 805 806 /* Bounds check destination. */ 807 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 808 ret = -EINVAL; 809 goto err; 810 } 811 812 /* Writes not allowed into this read-only object */ 813 if (i915_gem_object_is_readonly(obj)) { 814 ret = -EINVAL; 815 goto err; 816 } 817 818 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 819 820 ret = -ENODEV; 821 if (obj->ops->pwrite) 822 ret = obj->ops->pwrite(obj, args); 823 if (ret != -ENODEV) 824 goto err; 825 826 ret = i915_gem_object_wait(obj, 827 I915_WAIT_INTERRUPTIBLE | 828 I915_WAIT_ALL, 829 MAX_SCHEDULE_TIMEOUT); 830 if (ret) 831 goto err; 832 833 ret = -EFAULT; 834 /* We can only do the GTT pwrite on untiled buffers, as otherwise 835 * it would end up going through the fenced access, and we'll get 836 * different detiling behavior between reading and writing. 837 * pread/pwrite currently are reading and writing from the CPU 838 * perspective, requiring manual detiling by the client. 839 */ 840 if (!i915_gem_object_has_struct_page(obj) || 841 i915_gem_cpu_write_needs_clflush(obj)) 842 /* Note that the gtt paths might fail with non-page-backed user 843 * pointers (e.g. gtt mappings when moving data between 844 * textures). Fallback to the shmem path in that case. 845 */ 846 ret = i915_gem_gtt_pwrite_fast(obj, args); 847 848 if (ret == -EFAULT || ret == -ENOSPC) { 849 if (i915_gem_object_has_struct_page(obj)) 850 ret = i915_gem_shmem_pwrite(obj, args); 851 } 852 853 err: 854 i915_gem_object_put(obj); 855 return ret; 856 } 857 858 /** 859 * Called when user space has done writes to this buffer 860 * @dev: drm device 861 * @data: ioctl data blob 862 * @file: drm file 863 */ 864 int 865 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 866 struct drm_file *file) 867 { 868 struct drm_i915_gem_sw_finish *args = data; 869 struct drm_i915_gem_object *obj; 870 871 obj = i915_gem_object_lookup(file, args->handle); 872 if (!obj) 873 return -ENOENT; 874 875 /* 876 * Proxy objects are barred from CPU access, so there is no 877 * need to ban sw_finish as it is a nop. 878 */ 879 880 /* Pinned buffers may be scanout, so flush the cache */ 881 i915_gem_object_flush_if_display(obj); 882 i915_gem_object_put(obj); 883 884 return 0; 885 } 886 887 void i915_gem_runtime_suspend(struct drm_i915_private *i915) 888 { 889 struct drm_i915_gem_object *obj, *on; 890 int i; 891 892 /* 893 * Only called during RPM suspend. All users of the userfault_list 894 * must be holding an RPM wakeref to ensure that this can not 895 * run concurrently with themselves (and use the struct_mutex for 896 * protection between themselves). 897 */ 898 899 list_for_each_entry_safe(obj, on, 900 &to_gt(i915)->ggtt->userfault_list, userfault_link) 901 __i915_gem_object_release_mmap_gtt(obj); 902 903 list_for_each_entry_safe(obj, on, 904 &i915->runtime_pm.lmem_userfault_list, userfault_link) 905 i915_gem_object_runtime_pm_release_mmap_offset(obj); 906 907 /* 908 * The fence will be lost when the device powers down. If any were 909 * in use by hardware (i.e. they are pinned), we should not be powering 910 * down! All other fences will be reacquired by the user upon waking. 911 */ 912 for (i = 0; i < to_gt(i915)->ggtt->num_fences; i++) { 913 struct i915_fence_reg *reg = &to_gt(i915)->ggtt->fence_regs[i]; 914 915 /* 916 * Ideally we want to assert that the fence register is not 917 * live at this point (i.e. that no piece of code will be 918 * trying to write through fence + GTT, as that both violates 919 * our tracking of activity and associated locking/barriers, 920 * but also is illegal given that the hw is powered down). 921 * 922 * Previously we used reg->pin_count as a "liveness" indicator. 923 * That is not sufficient, and we need a more fine-grained 924 * tool if we want to have a sanity check here. 925 */ 926 927 if (!reg->vma) 928 continue; 929 930 GEM_BUG_ON(i915_vma_has_userfault(reg->vma)); 931 reg->dirty = true; 932 } 933 } 934 935 static void discard_ggtt_vma(struct i915_vma *vma) 936 { 937 struct drm_i915_gem_object *obj = vma->obj; 938 939 spin_lock(&obj->vma.lock); 940 if (!RB_EMPTY_NODE(&vma->obj_node)) { 941 rb_erase(&vma->obj_node, &obj->vma.tree); 942 RB_CLEAR_NODE(&vma->obj_node); 943 } 944 spin_unlock(&obj->vma.lock); 945 } 946 947 struct i915_vma * 948 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj, 949 struct i915_gem_ww_ctx *ww, 950 const struct i915_gtt_view *view, 951 u64 size, u64 alignment, u64 flags) 952 { 953 struct drm_i915_private *i915 = to_i915(obj->base.dev); 954 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 955 struct i915_vma *vma; 956 int ret; 957 958 GEM_WARN_ON(!ww); 959 960 if (flags & PIN_MAPPABLE && 961 (!view || view->type == I915_GTT_VIEW_NORMAL)) { 962 /* 963 * If the required space is larger than the available 964 * aperture, we will not able to find a slot for the 965 * object and unbinding the object now will be in 966 * vain. Worse, doing so may cause us to ping-pong 967 * the object in and out of the Global GTT and 968 * waste a lot of cycles under the mutex. 969 */ 970 if (obj->base.size > ggtt->mappable_end) 971 return ERR_PTR(-E2BIG); 972 973 /* 974 * If NONBLOCK is set the caller is optimistically 975 * trying to cache the full object within the mappable 976 * aperture, and *must* have a fallback in place for 977 * situations where we cannot bind the object. We 978 * can be a little more lax here and use the fallback 979 * more often to avoid costly migrations of ourselves 980 * and other objects within the aperture. 981 * 982 * Half-the-aperture is used as a simple heuristic. 983 * More interesting would to do search for a free 984 * block prior to making the commitment to unbind. 985 * That caters for the self-harm case, and with a 986 * little more heuristics (e.g. NOFAULT, NOEVICT) 987 * we could try to minimise harm to others. 988 */ 989 if (flags & PIN_NONBLOCK && 990 obj->base.size > ggtt->mappable_end / 2) 991 return ERR_PTR(-ENOSPC); 992 } 993 994 new_vma: 995 vma = i915_vma_instance(obj, &ggtt->vm, view); 996 if (IS_ERR(vma)) 997 return vma; 998 999 if (i915_vma_misplaced(vma, size, alignment, flags)) { 1000 if (flags & PIN_NONBLOCK) { 1001 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) 1002 return ERR_PTR(-ENOSPC); 1003 1004 /* 1005 * If this misplaced vma is too big (i.e, at-least 1006 * half the size of aperture) or hasn't been pinned 1007 * mappable before, we ignore the misplacement when 1008 * PIN_NONBLOCK is set in order to avoid the ping-pong 1009 * issue described above. In other words, we try to 1010 * avoid the costly operation of unbinding this vma 1011 * from the GGTT and rebinding it back because there 1012 * may not be enough space for this vma in the aperture. 1013 */ 1014 if (flags & PIN_MAPPABLE && 1015 (vma->fence_size > ggtt->mappable_end / 2 || 1016 !i915_vma_is_map_and_fenceable(vma))) 1017 return ERR_PTR(-ENOSPC); 1018 } 1019 1020 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) { 1021 discard_ggtt_vma(vma); 1022 goto new_vma; 1023 } 1024 1025 ret = i915_vma_unbind(vma); 1026 if (ret) 1027 return ERR_PTR(ret); 1028 } 1029 1030 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL); 1031 1032 if (ret) 1033 return ERR_PTR(ret); 1034 1035 if (vma->fence && !i915_gem_object_is_tiled(obj)) { 1036 mutex_lock(&ggtt->vm.mutex); 1037 i915_vma_revoke_fence(vma); 1038 mutex_unlock(&ggtt->vm.mutex); 1039 } 1040 1041 ret = i915_vma_wait_for_bind(vma); 1042 if (ret) { 1043 i915_vma_unpin(vma); 1044 return ERR_PTR(ret); 1045 } 1046 1047 return vma; 1048 } 1049 1050 struct i915_vma * __must_check 1051 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 1052 const struct i915_gtt_view *view, 1053 u64 size, u64 alignment, u64 flags) 1054 { 1055 struct i915_gem_ww_ctx ww; 1056 struct i915_vma *ret; 1057 int err; 1058 1059 for_i915_gem_ww(&ww, err, true) { 1060 err = i915_gem_object_lock(obj, &ww); 1061 if (err) 1062 continue; 1063 1064 ret = i915_gem_object_ggtt_pin_ww(obj, &ww, view, size, 1065 alignment, flags); 1066 if (IS_ERR(ret)) 1067 err = PTR_ERR(ret); 1068 } 1069 1070 return err ? ERR_PTR(err) : ret; 1071 } 1072 1073 int 1074 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 1075 struct drm_file *file_priv) 1076 { 1077 struct drm_i915_private *i915 = to_i915(dev); 1078 struct drm_i915_gem_madvise *args = data; 1079 struct drm_i915_gem_object *obj; 1080 int err; 1081 1082 switch (args->madv) { 1083 case I915_MADV_DONTNEED: 1084 case I915_MADV_WILLNEED: 1085 break; 1086 default: 1087 return -EINVAL; 1088 } 1089 1090 obj = i915_gem_object_lookup(file_priv, args->handle); 1091 if (!obj) 1092 return -ENOENT; 1093 1094 err = i915_gem_object_lock_interruptible(obj, NULL); 1095 if (err) 1096 goto out; 1097 1098 if (i915_gem_object_has_pages(obj) && 1099 i915_gem_object_is_tiled(obj) && 1100 i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) { 1101 if (obj->mm.madv == I915_MADV_WILLNEED) { 1102 GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj)); 1103 i915_gem_object_clear_tiling_quirk(obj); 1104 i915_gem_object_make_shrinkable(obj); 1105 } 1106 if (args->madv == I915_MADV_WILLNEED) { 1107 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj)); 1108 i915_gem_object_make_unshrinkable(obj); 1109 i915_gem_object_set_tiling_quirk(obj); 1110 } 1111 } 1112 1113 if (obj->mm.madv != __I915_MADV_PURGED) { 1114 obj->mm.madv = args->madv; 1115 if (obj->ops->adjust_lru) 1116 obj->ops->adjust_lru(obj); 1117 } 1118 1119 if (i915_gem_object_has_pages(obj) || 1120 i915_gem_object_has_self_managed_shrink_list(obj)) { 1121 unsigned long flags; 1122 1123 spin_lock_irqsave(&i915->mm.obj_lock, flags); 1124 if (!list_empty(&obj->mm.link)) { 1125 struct list_head *list; 1126 1127 if (obj->mm.madv != I915_MADV_WILLNEED) 1128 list = &i915->mm.purge_list; 1129 else 1130 list = &i915->mm.shrink_list; 1131 list_move_tail(&obj->mm.link, list); 1132 1133 } 1134 spin_unlock_irqrestore(&i915->mm.obj_lock, flags); 1135 } 1136 1137 /* if the object is no longer attached, discard its backing storage */ 1138 if (obj->mm.madv == I915_MADV_DONTNEED && 1139 !i915_gem_object_has_pages(obj)) 1140 i915_gem_object_truncate(obj); 1141 1142 args->retained = obj->mm.madv != __I915_MADV_PURGED; 1143 1144 i915_gem_object_unlock(obj); 1145 out: 1146 i915_gem_object_put(obj); 1147 return err; 1148 } 1149 1150 /* 1151 * A single pass should suffice to release all the freed objects (along most 1152 * call paths), but be a little more paranoid in that freeing the objects does 1153 * take a little amount of time, during which the rcu callbacks could have added 1154 * new objects into the freed list, and armed the work again. 1155 */ 1156 void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 1157 { 1158 while (atomic_read(&i915->mm.free_count)) { 1159 flush_work(&i915->mm.free_work); 1160 flush_delayed_work(&i915->bdev.wq); 1161 rcu_barrier(); 1162 } 1163 } 1164 1165 /* 1166 * Similar to objects above (see i915_gem_drain_freed-objects), in general we 1167 * have workers that are armed by RCU and then rearm themselves in their 1168 * callbacks. To be paranoid, we need to drain the workqueue a second time after 1169 * waiting for the RCU grace period so that we catch work queued via RCU from 1170 * the first pass. As neither drain_workqueue() nor flush_workqueue() report a 1171 * result, we make an assumption that we only don't require more than 3 passes 1172 * to catch all _recursive_ RCU delayed work. 1173 */ 1174 void i915_gem_drain_workqueue(struct drm_i915_private *i915) 1175 { 1176 int i; 1177 1178 for (i = 0; i < 3; i++) { 1179 flush_workqueue(i915->wq); 1180 rcu_barrier(); 1181 i915_gem_drain_freed_objects(i915); 1182 } 1183 1184 drain_workqueue(i915->wq); 1185 } 1186 1187 int i915_gem_init(struct drm_i915_private *dev_priv) 1188 { 1189 struct intel_gt *gt; 1190 unsigned int i; 1191 int ret; 1192 1193 /* We need to fallback to 4K pages if host doesn't support huge gtt. */ 1194 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) 1195 RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K; 1196 1197 ret = i915_gem_init_userptr(dev_priv); 1198 if (ret) 1199 return ret; 1200 1201 intel_uc_fetch_firmwares(&to_gt(dev_priv)->uc); 1202 intel_wopcm_init(&dev_priv->wopcm); 1203 1204 ret = i915_init_ggtt(dev_priv); 1205 if (ret) { 1206 GEM_BUG_ON(ret == -EIO); 1207 goto err_unlock; 1208 } 1209 1210 /* 1211 * Despite its name intel_init_clock_gating applies both display 1212 * clock gating workarounds; GT mmio workarounds and the occasional 1213 * GT power context workaround. Worse, sometimes it includes a context 1214 * register workaround which we need to apply before we record the 1215 * default HW state for all contexts. 1216 * 1217 * FIXME: break up the workarounds and apply them at the right time! 1218 */ 1219 intel_init_clock_gating(dev_priv); 1220 1221 for_each_gt(gt, dev_priv, i) { 1222 ret = intel_gt_init(gt); 1223 if (ret) 1224 goto err_unlock; 1225 } 1226 1227 return 0; 1228 1229 /* 1230 * Unwinding is complicated by that we want to handle -EIO to mean 1231 * disable GPU submission but keep KMS alive. We want to mark the 1232 * HW as irrevisibly wedged, but keep enough state around that the 1233 * driver doesn't explode during runtime. 1234 */ 1235 err_unlock: 1236 i915_gem_drain_workqueue(dev_priv); 1237 1238 if (ret != -EIO) { 1239 for_each_gt(gt, dev_priv, i) { 1240 intel_gt_driver_remove(gt); 1241 intel_gt_driver_release(gt); 1242 intel_uc_cleanup_firmwares(>->uc); 1243 } 1244 } 1245 1246 if (ret == -EIO) { 1247 /* 1248 * Allow engines or uC initialisation to fail by marking the GPU 1249 * as wedged. But we only want to do this when the GPU is angry, 1250 * for all other failure, such as an allocation failure, bail. 1251 */ 1252 for_each_gt(gt, dev_priv, i) { 1253 if (!intel_gt_is_wedged(gt)) { 1254 i915_probe_error(dev_priv, 1255 "Failed to initialize GPU, declaring it wedged!\n"); 1256 intel_gt_set_wedged(gt); 1257 } 1258 } 1259 1260 /* Minimal basic recovery for KMS */ 1261 ret = i915_ggtt_enable_hw(dev_priv); 1262 i915_ggtt_resume(to_gt(dev_priv)->ggtt); 1263 intel_init_clock_gating(dev_priv); 1264 } 1265 1266 i915_gem_drain_freed_objects(dev_priv); 1267 1268 return ret; 1269 } 1270 1271 void i915_gem_driver_register(struct drm_i915_private *i915) 1272 { 1273 i915_gem_driver_register__shrinker(i915); 1274 1275 intel_engines_driver_register(i915); 1276 } 1277 1278 void i915_gem_driver_unregister(struct drm_i915_private *i915) 1279 { 1280 i915_gem_driver_unregister__shrinker(i915); 1281 } 1282 1283 void i915_gem_driver_remove(struct drm_i915_private *dev_priv) 1284 { 1285 struct intel_gt *gt; 1286 unsigned int i; 1287 1288 i915_gem_suspend_late(dev_priv); 1289 for_each_gt(gt, dev_priv, i) 1290 intel_gt_driver_remove(gt); 1291 dev_priv->uabi_engines = RB_ROOT; 1292 1293 /* Flush any outstanding unpin_work. */ 1294 i915_gem_drain_workqueue(dev_priv); 1295 1296 i915_gem_drain_freed_objects(dev_priv); 1297 } 1298 1299 void i915_gem_driver_release(struct drm_i915_private *dev_priv) 1300 { 1301 struct intel_gt *gt; 1302 unsigned int i; 1303 1304 for_each_gt(gt, dev_priv, i) { 1305 intel_gt_driver_release(gt); 1306 intel_uc_cleanup_firmwares(>->uc); 1307 } 1308 1309 /* Flush any outstanding work, including i915_gem_context.release_work. */ 1310 i915_gem_drain_workqueue(dev_priv); 1311 1312 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list)); 1313 } 1314 1315 static void i915_gem_init__mm(struct drm_i915_private *i915) 1316 { 1317 mtx_init(&i915->mm.obj_lock, IPL_TTY); 1318 1319 init_llist_head(&i915->mm.free_list); 1320 1321 INIT_LIST_HEAD(&i915->mm.purge_list); 1322 INIT_LIST_HEAD(&i915->mm.shrink_list); 1323 1324 i915_gem_init__objects(i915); 1325 } 1326 1327 void i915_gem_init_early(struct drm_i915_private *dev_priv) 1328 { 1329 i915_gem_init__mm(dev_priv); 1330 i915_gem_init__contexts(dev_priv); 1331 1332 mtx_init(&dev_priv->display.fb_tracking.lock, IPL_NONE); 1333 } 1334 1335 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) 1336 { 1337 i915_gem_drain_freed_objects(dev_priv); 1338 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); 1339 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); 1340 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count); 1341 } 1342 1343 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) 1344 { 1345 struct drm_i915_file_private *file_priv; 1346 struct i915_drm_client *client; 1347 int ret = -ENOMEM; 1348 1349 DRM_DEBUG("\n"); 1350 1351 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 1352 if (!file_priv) 1353 goto err_alloc; 1354 1355 client = i915_drm_client_add(&i915->clients); 1356 if (IS_ERR(client)) { 1357 ret = PTR_ERR(client); 1358 goto err_client; 1359 } 1360 1361 file->driver_priv = file_priv; 1362 file_priv->dev_priv = i915; 1363 file_priv->file = file; 1364 file_priv->client = client; 1365 1366 file_priv->bsd_engine = -1; 1367 file_priv->hang_timestamp = jiffies; 1368 1369 ret = i915_gem_context_open(i915, file); 1370 if (ret) 1371 goto err_context; 1372 1373 return 0; 1374 1375 err_context: 1376 i915_drm_client_put(client); 1377 err_client: 1378 kfree(file_priv); 1379 err_alloc: 1380 return ret; 1381 } 1382 1383 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1384 #include "selftests/mock_gem_device.c" 1385 #include "selftests/i915_gem.c" 1386 #endif 1387