1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include <drm/drm_vma_manager.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_gem_clflush.h" 33 #include "i915_vgpu.h" 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 #include "intel_frontbuffer.h" 37 #include "intel_mocs.h" 38 #include "i915_gemfs.h" 39 #include <linux/dma-fence-array.h> 40 #include <linux/kthread.h> 41 #include <linux/reservation.h> 42 #include <linux/shmem_fs.h> 43 #include <linux/slab.h> 44 #include <linux/stop_machine.h> 45 #include <linux/swap.h> 46 #include <linux/pci.h> 47 #include <linux/dma-buf.h> 48 #include <linux/swiotlb.h> 49 50 #include <sys/mman.h> 51 #include <vm/vm_map.h> 52 #include <vm/vm_param.h> 53 54 #undef USE_INSERT 55 56 static void i915_gem_flush_free_objects(struct drm_i915_private *i915); 57 58 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 59 { 60 if (obj->cache_dirty) 61 return false; 62 63 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 64 return true; 65 66 return obj->pin_global; /* currently in use by HW, keep flushed */ 67 } 68 69 static int 70 insert_mappable_node(struct i915_ggtt *ggtt, 71 struct drm_mm_node *node, u32 size) 72 { 73 memset(node, 0, sizeof(*node)); 74 return drm_mm_insert_node_in_range(&ggtt->base.mm, node, 75 size, 0, I915_COLOR_UNEVICTABLE, 76 0, ggtt->mappable_end, 77 DRM_MM_INSERT_LOW); 78 } 79 80 static void 81 remove_mappable_node(struct drm_mm_node *node) 82 { 83 drm_mm_remove_node(node); 84 } 85 86 /* some bookkeeping */ 87 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 88 u64 size) 89 { 90 lockmgr(&dev_priv->mm.object_stat_lock, LK_EXCLUSIVE); 91 dev_priv->mm.object_count++; 92 dev_priv->mm.object_memory += size; 93 lockmgr(&dev_priv->mm.object_stat_lock, LK_RELEASE); 94 } 95 96 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, 97 u64 size) 98 { 99 lockmgr(&dev_priv->mm.object_stat_lock, LK_EXCLUSIVE); 100 dev_priv->mm.object_count--; 101 dev_priv->mm.object_memory -= size; 102 lockmgr(&dev_priv->mm.object_stat_lock, LK_RELEASE); 103 } 104 105 static int 106 i915_gem_wait_for_error(struct i915_gpu_error *error) 107 { 108 int ret; 109 110 might_sleep(); 111 112 /* 113 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 114 * userspace. If it takes that long something really bad is going on and 115 * we should simply try to bail out and fail as gracefully as possible. 116 */ 117 ret = wait_event_interruptible_timeout(error->reset_queue, 118 !i915_reset_backoff(error), 119 I915_RESET_TIMEOUT); 120 if (ret == 0) { 121 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 122 return -EIO; 123 } else if (ret < 0) { 124 return ret; 125 } else { 126 return 0; 127 } 128 } 129 130 int i915_mutex_lock_interruptible(struct drm_device *dev) 131 { 132 struct drm_i915_private *dev_priv = to_i915(dev); 133 int ret; 134 135 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 136 if (ret) 137 return ret; 138 139 ret = mutex_lock_interruptible(&dev->struct_mutex); 140 if (ret) 141 return ret; 142 143 return 0; 144 } 145 146 int 147 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 148 struct drm_file *file) 149 { 150 struct drm_i915_private *dev_priv = to_i915(dev); 151 struct i915_ggtt *ggtt = &dev_priv->ggtt; 152 struct drm_i915_gem_get_aperture *args = data; 153 struct i915_vma *vma; 154 u64 pinned; 155 156 pinned = ggtt->base.reserved; 157 mutex_lock(&dev->struct_mutex); 158 list_for_each_entry(vma, &ggtt->base.active_list, vm_link) 159 if (i915_vma_is_pinned(vma)) 160 pinned += vma->node.size; 161 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) 162 if (i915_vma_is_pinned(vma)) 163 pinned += vma->node.size; 164 mutex_unlock(&dev->struct_mutex); 165 166 args->aper_size = ggtt->base.total; 167 args->aper_available_size = args->aper_size - pinned; 168 169 return 0; 170 } 171 172 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 173 { 174 #if 0 175 struct address_space *mapping = obj->base.filp->f_mapping; 176 #else 177 vm_object_t vm_obj = obj->base.filp; 178 #endif 179 drm_dma_handle_t *phys; 180 struct sg_table *st; 181 struct scatterlist *sg; 182 char *vaddr; 183 int i; 184 int err; 185 186 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 187 return -EINVAL; 188 189 /* Always aligning to the object size, allows a single allocation 190 * to handle all possible callers, and given typical object sizes, 191 * the alignment of the buddy allocation will naturally match. 192 */ 193 phys = drm_pci_alloc(obj->base.dev, 194 roundup_pow_of_two(obj->base.size), 195 roundup_pow_of_two(obj->base.size)); 196 if (!phys) 197 return -ENOMEM; 198 199 vaddr = phys->vaddr; 200 VM_OBJECT_LOCK(vm_obj); 201 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 202 struct page *page; 203 char *src; 204 205 #if 0 206 page = shmem_read_mapping_page(mapping, i); 207 #else 208 page = shmem_read_mapping_page(vm_obj, i); 209 #endif 210 if (IS_ERR(page)) { 211 err = PTR_ERR(page); 212 goto err_phys; 213 } 214 215 src = kmap_atomic(page); 216 memcpy(vaddr, src, PAGE_SIZE); 217 drm_clflush_virt_range(vaddr, PAGE_SIZE); 218 kunmap_atomic(src); 219 220 put_page(page); 221 vaddr += PAGE_SIZE; 222 } 223 VM_OBJECT_UNLOCK(vm_obj); 224 225 i915_gem_chipset_flush(to_i915(obj->base.dev)); 226 227 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL); 228 if (!st) { 229 err = -ENOMEM; 230 goto err_phys; 231 } 232 233 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 234 kfree(st); 235 err = -ENOMEM; 236 goto err_phys; 237 } 238 239 sg = st->sgl; 240 sg->offset = 0; 241 sg->length = obj->base.size; 242 243 sg_dma_address(sg) = phys->busaddr; 244 sg_dma_len(sg) = obj->base.size; 245 246 obj->phys_handle = phys; 247 248 __i915_gem_object_set_pages(obj, st, sg->length); 249 250 return 0; 251 252 err_phys: 253 drm_pci_free(obj->base.dev, phys); 254 255 return err; 256 } 257 258 static void __start_cpu_write(struct drm_i915_gem_object *obj) 259 { 260 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 261 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 262 if (cpu_write_needs_clflush(obj)) 263 obj->cache_dirty = true; 264 } 265 266 static void 267 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, 268 struct sg_table *pages, 269 bool needs_clflush) 270 { 271 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); 272 273 if (obj->mm.madv == I915_MADV_DONTNEED) 274 obj->mm.dirty = false; 275 276 if (needs_clflush && 277 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 278 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 279 drm_clflush_sg(pages); 280 281 __start_cpu_write(obj); 282 } 283 284 static void 285 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, 286 struct sg_table *pages) 287 { 288 __i915_gem_object_release_shmem(obj, pages, false); 289 290 if (obj->mm.dirty) { 291 #if 0 292 struct address_space *mapping = obj->base.filp->f_mapping; 293 #else 294 vm_object_t vm_obj = obj->base.filp; 295 #endif 296 char *vaddr = obj->phys_handle->vaddr; 297 int i; 298 299 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 300 struct page *page; 301 char *dst; 302 303 page = shmem_read_mapping_page(vm_obj, i); 304 if (IS_ERR(page)) 305 continue; 306 307 dst = kmap_atomic(page); 308 drm_clflush_virt_range(vaddr, PAGE_SIZE); 309 memcpy(dst, vaddr, PAGE_SIZE); 310 kunmap_atomic(dst); 311 312 set_page_dirty(page); 313 if (obj->mm.madv == I915_MADV_WILLNEED) 314 mark_page_accessed(page); 315 put_page(page); 316 vaddr += PAGE_SIZE; 317 } 318 obj->mm.dirty = false; 319 } 320 321 sg_free_table(pages); 322 kfree(pages); 323 324 drm_pci_free(obj->base.dev, obj->phys_handle); 325 } 326 327 static void 328 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 329 { 330 i915_gem_object_unpin_pages(obj); 331 } 332 333 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { 334 .get_pages = i915_gem_object_get_pages_phys, 335 .put_pages = i915_gem_object_put_pages_phys, 336 .release = i915_gem_object_release_phys, 337 }; 338 339 static const struct drm_i915_gem_object_ops i915_gem_object_ops; 340 341 int i915_gem_object_unbind(struct drm_i915_gem_object *obj) 342 { 343 struct i915_vma *vma; 344 LINUX_LIST_HEAD(still_in_list); 345 int ret; 346 347 lockdep_assert_held(&obj->base.dev->struct_mutex); 348 349 /* Closed vma are removed from the obj->vma_list - but they may 350 * still have an active binding on the object. To remove those we 351 * must wait for all rendering to complete to the object (as unbinding 352 * must anyway), and retire the requests. 353 */ 354 ret = i915_gem_object_set_to_cpu_domain(obj, false); 355 if (ret) 356 return ret; 357 358 while ((vma = list_first_entry_or_null(&obj->vma_list, 359 struct i915_vma, 360 obj_link))) { 361 list_move_tail(&vma->obj_link, &still_in_list); 362 ret = i915_vma_unbind(vma); 363 if (ret) 364 break; 365 } 366 list_splice(&still_in_list, &obj->vma_list); 367 368 return ret; 369 } 370 371 static long 372 i915_gem_object_wait_fence(struct dma_fence *fence, 373 unsigned int flags, 374 long timeout, 375 struct intel_rps_client *rps_client) 376 { 377 struct drm_i915_gem_request *rq; 378 379 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 380 381 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 382 return timeout; 383 384 if (!dma_fence_is_i915(fence)) 385 return dma_fence_wait_timeout(fence, 386 flags & I915_WAIT_INTERRUPTIBLE, 387 timeout); 388 389 rq = to_request(fence); 390 if (i915_gem_request_completed(rq)) 391 goto out; 392 393 /* This client is about to stall waiting for the GPU. In many cases 394 * this is undesirable and limits the throughput of the system, as 395 * many clients cannot continue processing user input/output whilst 396 * blocked. RPS autotuning may take tens of milliseconds to respond 397 * to the GPU load and thus incurs additional latency for the client. 398 * We can circumvent that by promoting the GPU frequency to maximum 399 * before we wait. This makes the GPU throttle up much more quickly 400 * (good for benchmarks and user experience, e.g. window animations), 401 * but at a cost of spending more power processing the workload 402 * (bad for battery). Not all clients even want their results 403 * immediately and for them we should just let the GPU select its own 404 * frequency to maximise efficiency. To prevent a single client from 405 * forcing the clocks too high for the whole system, we only allow 406 * each client to waitboost once in a busy period. 407 */ 408 if (rps_client) { 409 if (INTEL_GEN(rq->i915) >= 6) 410 gen6_rps_boost(rq, rps_client); 411 else 412 rps_client = NULL; 413 } 414 415 timeout = i915_wait_request(rq, flags, timeout); 416 417 out: 418 if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq)) 419 i915_gem_request_retire_upto(rq); 420 421 return timeout; 422 } 423 424 static long 425 i915_gem_object_wait_reservation(struct reservation_object *resv, 426 unsigned int flags, 427 long timeout, 428 struct intel_rps_client *rps_client) 429 { 430 unsigned int seq = __read_seqcount_begin(&resv->seq); 431 struct dma_fence *excl; 432 bool prune_fences = false; 433 434 if (flags & I915_WAIT_ALL) { 435 struct dma_fence **shared; 436 unsigned int count, i; 437 int ret; 438 439 ret = reservation_object_get_fences_rcu(resv, 440 &excl, &count, &shared); 441 if (ret) 442 return ret; 443 444 for (i = 0; i < count; i++) { 445 timeout = i915_gem_object_wait_fence(shared[i], 446 flags, timeout, 447 rps_client); 448 if (timeout < 0) 449 break; 450 451 dma_fence_put(shared[i]); 452 } 453 454 for (; i < count; i++) 455 dma_fence_put(shared[i]); 456 kfree(shared); 457 458 prune_fences = count && timeout >= 0; 459 } else { 460 excl = reservation_object_get_excl_rcu(resv); 461 } 462 463 if (excl && timeout >= 0) { 464 timeout = i915_gem_object_wait_fence(excl, flags, timeout, 465 rps_client); 466 prune_fences = timeout >= 0; 467 } 468 469 dma_fence_put(excl); 470 471 /* Oportunistically prune the fences iff we know they have *all* been 472 * signaled and that the reservation object has not been changed (i.e. 473 * no new fences have been added). 474 */ 475 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) { 476 if (reservation_object_trylock(resv)) { 477 if (!__read_seqcount_retry(&resv->seq, seq)) 478 reservation_object_add_excl_fence(resv, NULL); 479 reservation_object_unlock(resv); 480 } 481 } 482 483 return timeout; 484 } 485 486 static void __fence_set_priority(struct dma_fence *fence, int prio) 487 { 488 struct drm_i915_gem_request *rq; 489 struct intel_engine_cs *engine; 490 491 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) 492 return; 493 494 rq = to_request(fence); 495 engine = rq->engine; 496 if (!engine->schedule) 497 return; 498 499 engine->schedule(rq, prio); 500 } 501 502 static void fence_set_priority(struct dma_fence *fence, int prio) 503 { 504 /* Recurse once into a fence-array */ 505 if (dma_fence_is_array(fence)) { 506 struct dma_fence_array *array = to_dma_fence_array(fence); 507 int i; 508 509 for (i = 0; i < array->num_fences; i++) 510 __fence_set_priority(array->fences[i], prio); 511 } else { 512 __fence_set_priority(fence, prio); 513 } 514 } 515 516 int 517 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 518 unsigned int flags, 519 int prio) 520 { 521 struct dma_fence *excl; 522 523 if (flags & I915_WAIT_ALL) { 524 struct dma_fence **shared; 525 unsigned int count, i; 526 int ret; 527 528 ret = reservation_object_get_fences_rcu(obj->resv, 529 &excl, &count, &shared); 530 if (ret) 531 return ret; 532 533 for (i = 0; i < count; i++) { 534 fence_set_priority(shared[i], prio); 535 dma_fence_put(shared[i]); 536 } 537 538 kfree(shared); 539 } else { 540 excl = reservation_object_get_excl_rcu(obj->resv); 541 } 542 543 if (excl) { 544 fence_set_priority(excl, prio); 545 dma_fence_put(excl); 546 } 547 return 0; 548 } 549 550 /** 551 * Waits for rendering to the object to be completed 552 * @obj: i915 gem object 553 * @flags: how to wait (under a lock, for all rendering or just for writes etc) 554 * @timeout: how long to wait 555 * @rps: client (user process) to charge for any waitboosting 556 */ 557 int 558 i915_gem_object_wait(struct drm_i915_gem_object *obj, 559 unsigned int flags, 560 long timeout, 561 struct intel_rps_client *rps_client) 562 { 563 might_sleep(); 564 #if IS_ENABLED(CONFIG_LOCKDEP) 565 GEM_BUG_ON(debug_locks && 566 !!lockdep_is_held(&obj->base.dev->struct_mutex) != 567 !!(flags & I915_WAIT_LOCKED)); 568 #endif 569 GEM_BUG_ON(timeout < 0); 570 571 timeout = i915_gem_object_wait_reservation(obj->resv, 572 flags, timeout, 573 rps_client); 574 return timeout < 0 ? timeout : 0; 575 } 576 577 static struct intel_rps_client *to_rps_client(struct drm_file *file) 578 { 579 struct drm_i915_file_private *fpriv = file->driver_priv; 580 581 return &fpriv->rps_client; 582 } 583 584 static int 585 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, 586 struct drm_i915_gem_pwrite *args, 587 struct drm_file *file) 588 { 589 void *vaddr = obj->phys_handle->vaddr + args->offset; 590 char __user *user_data = u64_to_user_ptr(args->data_ptr); 591 592 /* We manually control the domain here and pretend that it 593 * remains coherent i.e. in the GTT domain, like shmem_pwrite. 594 */ 595 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 596 if (copy_from_user(vaddr, user_data, args->size)) 597 return -EFAULT; 598 599 drm_clflush_virt_range(vaddr, args->size); 600 i915_gem_chipset_flush(to_i915(obj->base.dev)); 601 602 intel_fb_obj_flush(obj, ORIGIN_CPU); 603 return 0; 604 } 605 606 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) 607 { 608 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); 609 } 610 611 void i915_gem_object_free(struct drm_i915_gem_object *obj) 612 { 613 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 614 kmem_cache_free(dev_priv->objects, obj); 615 } 616 617 static int 618 i915_gem_create(struct drm_file *file, 619 struct drm_i915_private *dev_priv, 620 uint64_t size, 621 uint32_t *handle_p) 622 { 623 struct drm_i915_gem_object *obj; 624 int ret; 625 u32 handle; 626 627 size = roundup(size, PAGE_SIZE); 628 if (size == 0) 629 return -EINVAL; 630 631 /* Allocate the new object */ 632 obj = i915_gem_object_create(dev_priv, size); 633 if (IS_ERR(obj)) 634 return PTR_ERR(obj); 635 636 ret = drm_gem_handle_create(file, &obj->base, &handle); 637 /* drop reference from allocate - handle holds it now */ 638 i915_gem_object_put(obj); 639 if (ret) 640 return ret; 641 642 *handle_p = handle; 643 return 0; 644 } 645 646 int 647 i915_gem_dumb_create(struct drm_file *file, 648 struct drm_device *dev, 649 struct drm_mode_create_dumb *args) 650 { 651 /* have to work out size/pitch and return them */ 652 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 653 args->size = args->pitch * args->height; 654 return i915_gem_create(file, to_i915(dev), 655 args->size, &args->handle); 656 } 657 658 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) 659 { 660 return !(obj->cache_level == I915_CACHE_NONE || 661 obj->cache_level == I915_CACHE_WT); 662 } 663 664 /** 665 * Creates a new mm object and returns a handle to it. 666 * @dev: drm device pointer 667 * @data: ioctl data blob 668 * @file: drm file pointer 669 */ 670 int 671 i915_gem_create_ioctl(struct drm_device *dev, void *data, 672 struct drm_file *file) 673 { 674 struct drm_i915_private *dev_priv = to_i915(dev); 675 struct drm_i915_gem_create *args = data; 676 677 i915_gem_flush_free_objects(dev_priv); 678 679 return i915_gem_create(file, dev_priv, 680 args->size, &args->handle); 681 } 682 683 static inline enum fb_op_origin 684 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) 685 { 686 return (domain == I915_GEM_DOMAIN_GTT ? 687 obj->frontbuffer_ggtt_origin : ORIGIN_CPU); 688 } 689 690 static void 691 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) 692 { 693 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 694 695 if (!(obj->base.write_domain & flush_domains)) 696 return; 697 698 /* No actual flushing is required for the GTT write domain. Writes 699 * to it "immediately" go to main memory as far as we know, so there's 700 * no chipset flush. It also doesn't land in render cache. 701 * 702 * However, we do have to enforce the order so that all writes through 703 * the GTT land before any writes to the device, such as updates to 704 * the GATT itself. 705 * 706 * We also have to wait a bit for the writes to land from the GTT. 707 * An uncached read (i.e. mmio) seems to be ideal for the round-trip 708 * timing. This issue has only been observed when switching quickly 709 * between GTT writes and CPU reads from inside the kernel on recent hw, 710 * and it appears to only affect discrete GTT blocks (i.e. on LLC 711 * system agents we cannot reproduce this behaviour). 712 */ 713 wmb(); 714 715 switch (obj->base.write_domain) { 716 case I915_GEM_DOMAIN_GTT: 717 if (!HAS_LLC(dev_priv)) { 718 intel_runtime_pm_get(dev_priv); 719 spin_lock_irq(&dev_priv->uncore.lock); 720 POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base)); 721 spin_unlock_irq(&dev_priv->uncore.lock); 722 intel_runtime_pm_put(dev_priv); 723 } 724 725 intel_fb_obj_flush(obj, 726 fb_write_origin(obj, I915_GEM_DOMAIN_GTT)); 727 break; 728 729 case I915_GEM_DOMAIN_CPU: 730 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 731 break; 732 733 case I915_GEM_DOMAIN_RENDER: 734 if (gpu_write_needs_clflush(obj)) 735 obj->cache_dirty = true; 736 break; 737 } 738 739 obj->base.write_domain = 0; 740 } 741 742 static inline int 743 __copy_to_user_swizzled(char __user *cpu_vaddr, 744 const char *gpu_vaddr, int gpu_offset, 745 int length) 746 { 747 int ret, cpu_offset = 0; 748 749 while (length > 0) { 750 int cacheline_end = ALIGN(gpu_offset + 1, 64); 751 int this_length = min(cacheline_end - gpu_offset, length); 752 int swizzled_gpu_offset = gpu_offset ^ 64; 753 754 ret = __copy_to_user(cpu_vaddr + cpu_offset, 755 gpu_vaddr + swizzled_gpu_offset, 756 this_length); 757 if (ret) 758 return ret + length; 759 760 cpu_offset += this_length; 761 gpu_offset += this_length; 762 length -= this_length; 763 } 764 765 return 0; 766 } 767 768 static inline int 769 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset, 770 const char __user *cpu_vaddr, 771 int length) 772 { 773 int ret, cpu_offset = 0; 774 775 while (length > 0) { 776 int cacheline_end = ALIGN(gpu_offset + 1, 64); 777 int this_length = min(cacheline_end - gpu_offset, length); 778 int swizzled_gpu_offset = gpu_offset ^ 64; 779 780 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset, 781 cpu_vaddr + cpu_offset, 782 this_length); 783 if (ret) 784 return ret + length; 785 786 cpu_offset += this_length; 787 gpu_offset += this_length; 788 length -= this_length; 789 } 790 791 return 0; 792 } 793 794 /* 795 * Pins the specified object's pages and synchronizes the object with 796 * GPU accesses. Sets needs_clflush to non-zero if the caller should 797 * flush the object from the CPU cache. 798 */ 799 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 800 unsigned int *needs_clflush) 801 { 802 int ret; 803 804 lockdep_assert_held(&obj->base.dev->struct_mutex); 805 806 *needs_clflush = 0; 807 if (!i915_gem_object_has_struct_page(obj)) 808 return -ENODEV; 809 810 ret = i915_gem_object_wait(obj, 811 I915_WAIT_INTERRUPTIBLE | 812 I915_WAIT_LOCKED, 813 MAX_SCHEDULE_TIMEOUT, 814 NULL); 815 if (ret) 816 return ret; 817 818 ret = i915_gem_object_pin_pages(obj); 819 if (ret) 820 return ret; 821 822 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ || 823 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 824 ret = i915_gem_object_set_to_cpu_domain(obj, false); 825 if (ret) 826 goto err_unpin; 827 else 828 goto out; 829 } 830 831 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 832 833 /* If we're not in the cpu read domain, set ourself into the gtt 834 * read domain and manually flush cachelines (if required). This 835 * optimizes for the case when the gpu will dirty the data 836 * anyway again before the next pread happens. 837 */ 838 if (!obj->cache_dirty && 839 !(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 840 *needs_clflush = CLFLUSH_BEFORE; 841 842 out: 843 /* return with the pages pinned */ 844 return 0; 845 846 err_unpin: 847 i915_gem_object_unpin_pages(obj); 848 return ret; 849 } 850 851 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 852 unsigned int *needs_clflush) 853 { 854 int ret; 855 856 lockdep_assert_held(&obj->base.dev->struct_mutex); 857 858 *needs_clflush = 0; 859 if (!i915_gem_object_has_struct_page(obj)) 860 return -ENODEV; 861 862 ret = i915_gem_object_wait(obj, 863 I915_WAIT_INTERRUPTIBLE | 864 I915_WAIT_LOCKED | 865 I915_WAIT_ALL, 866 MAX_SCHEDULE_TIMEOUT, 867 NULL); 868 if (ret) 869 return ret; 870 871 ret = i915_gem_object_pin_pages(obj); 872 if (ret) 873 return ret; 874 875 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE || 876 !static_cpu_has(X86_FEATURE_CLFLUSH)) { 877 ret = i915_gem_object_set_to_cpu_domain(obj, true); 878 if (ret) 879 goto err_unpin; 880 else 881 goto out; 882 } 883 884 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 885 886 /* If we're not in the cpu write domain, set ourself into the 887 * gtt write domain and manually flush cachelines (as required). 888 * This optimizes for the case when the gpu will use the data 889 * right away and we therefore have to clflush anyway. 890 */ 891 if (!obj->cache_dirty) { 892 *needs_clflush |= CLFLUSH_AFTER; 893 894 /* 895 * Same trick applies to invalidate partially written 896 * cachelines read before writing. 897 */ 898 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 899 *needs_clflush |= CLFLUSH_BEFORE; 900 } 901 902 out: 903 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 904 obj->mm.dirty = true; 905 /* return with the pages pinned */ 906 return 0; 907 908 err_unpin: 909 i915_gem_object_unpin_pages(obj); 910 return ret; 911 } 912 913 static void 914 shmem_clflush_swizzled_range(char *addr, unsigned long length, 915 bool swizzled) 916 { 917 if (unlikely(swizzled)) { 918 unsigned long start = (unsigned long) addr; 919 unsigned long end = (unsigned long) addr + length; 920 921 /* For swizzling simply ensure that we always flush both 922 * channels. Lame, but simple and it works. Swizzled 923 * pwrite/pread is far from a hotpath - current userspace 924 * doesn't use it at all. */ 925 start = round_down(start, 128); 926 end = round_up(end, 128); 927 928 drm_clflush_virt_range((void *)start, end - start); 929 } else { 930 drm_clflush_virt_range(addr, length); 931 } 932 933 } 934 935 /* Only difference to the fast-path function is that this can handle bit17 936 * and uses non-atomic copy and kmap functions. */ 937 static int 938 shmem_pread_slow(struct page *page, int offset, int length, 939 char __user *user_data, 940 bool page_do_bit17_swizzling, bool needs_clflush) 941 { 942 char *vaddr; 943 int ret; 944 945 vaddr = kmap(page); 946 if (needs_clflush) 947 shmem_clflush_swizzled_range(vaddr + offset, length, 948 page_do_bit17_swizzling); 949 950 if (page_do_bit17_swizzling) 951 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length); 952 else 953 ret = __copy_to_user(user_data, vaddr + offset, length); 954 kunmap(page); 955 956 return ret ? - EFAULT : 0; 957 } 958 959 static int 960 shmem_pread(struct page *page, int offset, int length, char __user *user_data, 961 bool page_do_bit17_swizzling, bool needs_clflush) 962 { 963 int ret; 964 965 ret = -ENODEV; 966 if (!page_do_bit17_swizzling) { 967 char *vaddr = kmap_atomic(page); 968 969 if (needs_clflush) 970 drm_clflush_virt_range(vaddr + offset, length); 971 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length); 972 kunmap_atomic(vaddr); 973 } 974 if (ret == 0) 975 return 0; 976 977 return shmem_pread_slow(page, offset, length, user_data, 978 page_do_bit17_swizzling, needs_clflush); 979 } 980 981 static int 982 i915_gem_shmem_pread(struct drm_i915_gem_object *obj, 983 struct drm_i915_gem_pread *args) 984 { 985 char __user *user_data; 986 u64 remain; 987 unsigned int obj_do_bit17_swizzling; 988 unsigned int needs_clflush; 989 unsigned int idx, offset; 990 int ret; 991 992 obj_do_bit17_swizzling = 0; 993 if (i915_gem_object_needs_bit17_swizzle(obj)) 994 obj_do_bit17_swizzling = BIT(17); 995 996 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex); 997 if (ret) 998 return ret; 999 1000 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush); 1001 mutex_unlock(&obj->base.dev->struct_mutex); 1002 if (ret) 1003 return ret; 1004 1005 remain = args->size; 1006 user_data = u64_to_user_ptr(args->data_ptr); 1007 offset = offset_in_page(args->offset); 1008 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 1009 struct page *page = i915_gem_object_get_page(obj, idx); 1010 int length; 1011 1012 length = remain; 1013 if (offset + length > PAGE_SIZE) 1014 length = PAGE_SIZE - offset; 1015 1016 ret = shmem_pread(page, offset, length, user_data, 1017 page_to_phys(page) & obj_do_bit17_swizzling, 1018 needs_clflush); 1019 if (ret) 1020 break; 1021 1022 remain -= length; 1023 user_data += length; 1024 offset = 0; 1025 } 1026 1027 i915_gem_obj_finish_shmem_access(obj); 1028 return ret; 1029 } 1030 1031 static inline bool 1032 gtt_user_read(struct io_mapping *mapping, 1033 loff_t base, int offset, 1034 char __user *user_data, int length) 1035 { 1036 void __iomem *vaddr; 1037 unsigned long unwritten; 1038 1039 /* We can use the cpu mem copy function because this is X86. */ 1040 vaddr = io_mapping_map_atomic_wc(mapping, base); 1041 unwritten = __copy_to_user_inatomic(user_data, 1042 (void __force *)vaddr + offset, 1043 length); 1044 io_mapping_unmap_atomic(vaddr); 1045 if (unwritten) { 1046 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); 1047 unwritten = copy_to_user(user_data, 1048 (void __force *)vaddr + offset, 1049 length); 1050 io_mapping_unmap(vaddr); 1051 } 1052 return unwritten; 1053 } 1054 1055 static int 1056 i915_gem_gtt_pread(struct drm_i915_gem_object *obj, 1057 const struct drm_i915_gem_pread *args) 1058 { 1059 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1060 struct i915_ggtt *ggtt = &i915->ggtt; 1061 struct drm_mm_node node; 1062 struct i915_vma *vma; 1063 void __user *user_data; 1064 u64 remain, offset; 1065 int ret; 1066 1067 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1068 if (ret) 1069 return ret; 1070 1071 intel_runtime_pm_get(i915); 1072 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1073 PIN_MAPPABLE | 1074 PIN_NONFAULT | 1075 PIN_NONBLOCK); 1076 if (!IS_ERR(vma)) { 1077 node.start = i915_ggtt_offset(vma); 1078 node.allocated = false; 1079 ret = i915_vma_put_fence(vma); 1080 if (ret) { 1081 i915_vma_unpin(vma); 1082 vma = ERR_PTR(ret); 1083 } 1084 } 1085 if (IS_ERR(vma)) { 1086 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1087 if (ret) 1088 goto out_unlock; 1089 GEM_BUG_ON(!node.allocated); 1090 } 1091 1092 ret = i915_gem_object_set_to_gtt_domain(obj, false); 1093 if (ret) 1094 goto out_unpin; 1095 1096 mutex_unlock(&i915->drm.struct_mutex); 1097 1098 user_data = u64_to_user_ptr(args->data_ptr); 1099 remain = args->size; 1100 offset = args->offset; 1101 1102 while (remain > 0) { 1103 /* Operation in this page 1104 * 1105 * page_base = page offset within aperture 1106 * page_offset = offset within page 1107 * page_length = bytes to copy for this page 1108 */ 1109 u32 page_base = node.start; 1110 unsigned page_offset = offset_in_page(offset); 1111 unsigned page_length = PAGE_SIZE - page_offset; 1112 page_length = remain < page_length ? remain : page_length; 1113 if (node.allocated) { 1114 wmb(); 1115 ggtt->base.insert_page(&ggtt->base, 1116 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1117 node.start, I915_CACHE_NONE, 0); 1118 wmb(); 1119 } else { 1120 page_base += offset & LINUX_PAGE_MASK; 1121 } 1122 1123 if (gtt_user_read(&ggtt->mappable, page_base, page_offset, 1124 user_data, page_length)) { 1125 ret = -EFAULT; 1126 break; 1127 } 1128 1129 remain -= page_length; 1130 user_data += page_length; 1131 offset += page_length; 1132 } 1133 1134 mutex_lock(&i915->drm.struct_mutex); 1135 out_unpin: 1136 if (node.allocated) { 1137 wmb(); 1138 ggtt->base.clear_range(&ggtt->base, 1139 node.start, node.size); 1140 remove_mappable_node(&node); 1141 } else { 1142 i915_vma_unpin(vma); 1143 } 1144 out_unlock: 1145 intel_runtime_pm_put(i915); 1146 mutex_unlock(&i915->drm.struct_mutex); 1147 1148 return ret; 1149 } 1150 1151 /** 1152 * Reads data from the object referenced by handle. 1153 * @dev: drm device pointer 1154 * @data: ioctl data blob 1155 * @file: drm file pointer 1156 * 1157 * On error, the contents of *data are undefined. 1158 */ 1159 int 1160 i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1161 struct drm_file *file) 1162 { 1163 struct drm_i915_gem_pread *args = data; 1164 struct drm_i915_gem_object *obj; 1165 int ret; 1166 1167 if (args->size == 0) 1168 return 0; 1169 1170 #if 0 1171 if (!access_ok(VERIFY_WRITE, 1172 u64_to_user_ptr(args->data_ptr), 1173 args->size)) 1174 return -EFAULT; 1175 #endif 1176 1177 obj = i915_gem_object_lookup(file, args->handle); 1178 if (!obj) 1179 return -ENOENT; 1180 1181 /* Bounds check source. */ 1182 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1183 ret = -EINVAL; 1184 goto out; 1185 } 1186 1187 trace_i915_gem_object_pread(obj, args->offset, args->size); 1188 1189 ret = i915_gem_object_wait(obj, 1190 I915_WAIT_INTERRUPTIBLE, 1191 MAX_SCHEDULE_TIMEOUT, 1192 to_rps_client(file)); 1193 if (ret) 1194 goto out; 1195 1196 ret = i915_gem_object_pin_pages(obj); 1197 if (ret) 1198 goto out; 1199 1200 ret = i915_gem_shmem_pread(obj, args); 1201 if (ret == -EFAULT || ret == -ENODEV) 1202 ret = i915_gem_gtt_pread(obj, args); 1203 1204 i915_gem_object_unpin_pages(obj); 1205 out: 1206 i915_gem_object_put(obj); 1207 return ret; 1208 } 1209 1210 /* This is the fast write path which cannot handle 1211 * page faults in the source data 1212 */ 1213 1214 static inline bool 1215 ggtt_write(struct io_mapping *mapping, 1216 loff_t base, int offset, 1217 char __user *user_data, int length) 1218 { 1219 void __iomem *vaddr; 1220 unsigned long unwritten; 1221 1222 /* We can use the cpu mem copy function because this is X86. */ 1223 vaddr = io_mapping_map_atomic_wc(mapping, base); 1224 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, 1225 user_data, length); 1226 io_mapping_unmap_atomic(vaddr); 1227 if (unwritten) { 1228 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); 1229 unwritten = copy_from_user((void __force *)vaddr + offset, 1230 user_data, length); 1231 io_mapping_unmap(vaddr); 1232 } 1233 1234 return unwritten; 1235 } 1236 1237 /** 1238 * This is the fast pwrite path, where we copy the data directly from the 1239 * user into the GTT, uncached. 1240 * @obj: i915 GEM object 1241 * @args: pwrite arguments structure 1242 */ 1243 static int 1244 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, 1245 const struct drm_i915_gem_pwrite *args) 1246 { 1247 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1248 struct i915_ggtt *ggtt = &i915->ggtt; 1249 struct drm_mm_node node; 1250 struct i915_vma *vma; 1251 u64 remain, offset; 1252 void __user *user_data; 1253 int ret; 1254 1255 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1256 if (ret) 1257 return ret; 1258 1259 if (i915_gem_object_has_struct_page(obj)) { 1260 /* 1261 * Avoid waking the device up if we can fallback, as 1262 * waking/resuming is very slow (worst-case 10-100 ms 1263 * depending on PCI sleeps and our own resume time). 1264 * This easily dwarfs any performance advantage from 1265 * using the cache bypass of indirect GGTT access. 1266 */ 1267 if (!intel_runtime_pm_get_if_in_use(i915)) { 1268 ret = -EFAULT; 1269 goto out_unlock; 1270 } 1271 } else { 1272 /* No backing pages, no fallback, we must force GGTT access */ 1273 intel_runtime_pm_get(i915); 1274 } 1275 1276 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 1277 PIN_MAPPABLE | 1278 PIN_NONFAULT | 1279 PIN_NONBLOCK); 1280 if (!IS_ERR(vma)) { 1281 node.start = i915_ggtt_offset(vma); 1282 node.allocated = false; 1283 ret = i915_vma_put_fence(vma); 1284 if (ret) { 1285 i915_vma_unpin(vma); 1286 vma = ERR_PTR(ret); 1287 } 1288 } 1289 if (IS_ERR(vma)) { 1290 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); 1291 if (ret) 1292 goto out_rpm; 1293 GEM_BUG_ON(!node.allocated); 1294 } 1295 1296 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1297 if (ret) 1298 goto out_unpin; 1299 1300 mutex_unlock(&i915->drm.struct_mutex); 1301 1302 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 1303 1304 user_data = u64_to_user_ptr(args->data_ptr); 1305 offset = args->offset; 1306 remain = args->size; 1307 while (remain) { 1308 /* Operation in this page 1309 * 1310 * page_base = page offset within aperture 1311 * page_offset = offset within page 1312 * page_length = bytes to copy for this page 1313 */ 1314 u32 page_base = node.start; 1315 unsigned int page_offset = offset_in_page(offset); 1316 unsigned int page_length = PAGE_SIZE - page_offset; 1317 page_length = remain < page_length ? remain : page_length; 1318 if (node.allocated) { 1319 wmb(); /* flush the write before we modify the GGTT */ 1320 ggtt->base.insert_page(&ggtt->base, 1321 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), 1322 node.start, I915_CACHE_NONE, 0); 1323 wmb(); /* flush modifications to the GGTT (insert_page) */ 1324 } else { 1325 page_base += offset & LINUX_PAGE_MASK; 1326 } 1327 /* If we get a fault while copying data, then (presumably) our 1328 * source page isn't available. Return the error and we'll 1329 * retry in the slow path. 1330 * If the object is non-shmem backed, we retry again with the 1331 * path that handles page fault. 1332 */ 1333 if (ggtt_write(&ggtt->mappable, page_base, page_offset, 1334 user_data, page_length)) { 1335 ret = -EFAULT; 1336 break; 1337 } 1338 1339 remain -= page_length; 1340 user_data += page_length; 1341 offset += page_length; 1342 } 1343 intel_fb_obj_flush(obj, ORIGIN_CPU); 1344 1345 mutex_lock(&i915->drm.struct_mutex); 1346 out_unpin: 1347 if (node.allocated) { 1348 wmb(); 1349 ggtt->base.clear_range(&ggtt->base, 1350 node.start, node.size); 1351 remove_mappable_node(&node); 1352 } else { 1353 i915_vma_unpin(vma); 1354 } 1355 out_rpm: 1356 intel_runtime_pm_put(i915); 1357 out_unlock: 1358 mutex_unlock(&i915->drm.struct_mutex); 1359 return ret; 1360 } 1361 1362 static int 1363 shmem_pwrite_slow(struct page *page, int offset, int length, 1364 char __user *user_data, 1365 bool page_do_bit17_swizzling, 1366 bool needs_clflush_before, 1367 bool needs_clflush_after) 1368 { 1369 char *vaddr; 1370 int ret; 1371 1372 vaddr = kmap(page); 1373 if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) 1374 shmem_clflush_swizzled_range(vaddr + offset, length, 1375 page_do_bit17_swizzling); 1376 if (page_do_bit17_swizzling) 1377 ret = __copy_from_user_swizzled(vaddr, offset, user_data, 1378 length); 1379 else 1380 ret = __copy_from_user(vaddr + offset, user_data, length); 1381 if (needs_clflush_after) 1382 shmem_clflush_swizzled_range(vaddr + offset, length, 1383 page_do_bit17_swizzling); 1384 kunmap(page); 1385 1386 return ret ? -EFAULT : 0; 1387 } 1388 1389 /* Per-page copy function for the shmem pwrite fastpath. 1390 * Flushes invalid cachelines before writing to the target if 1391 * needs_clflush_before is set and flushes out any written cachelines after 1392 * writing if needs_clflush is set. 1393 */ 1394 static int 1395 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data, 1396 bool page_do_bit17_swizzling, 1397 bool needs_clflush_before, 1398 bool needs_clflush_after) 1399 { 1400 int ret; 1401 1402 ret = -ENODEV; 1403 if (!page_do_bit17_swizzling) { 1404 char *vaddr = kmap_atomic(page); 1405 1406 if (needs_clflush_before) 1407 drm_clflush_virt_range(vaddr + offset, len); 1408 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len); 1409 if (needs_clflush_after) 1410 drm_clflush_virt_range(vaddr + offset, len); 1411 1412 kunmap_atomic(vaddr); 1413 } 1414 if (ret == 0) 1415 return ret; 1416 1417 return shmem_pwrite_slow(page, offset, len, user_data, 1418 page_do_bit17_swizzling, 1419 needs_clflush_before, 1420 needs_clflush_after); 1421 } 1422 1423 static int 1424 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj, 1425 const struct drm_i915_gem_pwrite *args) 1426 { 1427 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1428 void __user *user_data; 1429 u64 remain; 1430 unsigned int obj_do_bit17_swizzling; 1431 unsigned int partial_cacheline_write; 1432 unsigned int needs_clflush; 1433 unsigned int offset, idx; 1434 int ret; 1435 #ifdef __DragonFly__ 1436 vm_object_t vm_obj; 1437 #endif 1438 1439 ret = mutex_lock_interruptible(&i915->drm.struct_mutex); 1440 if (ret) 1441 return ret; 1442 1443 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush); 1444 mutex_unlock(&i915->drm.struct_mutex); 1445 if (ret) 1446 return ret; 1447 1448 obj_do_bit17_swizzling = 0; 1449 if (i915_gem_object_needs_bit17_swizzle(obj)) 1450 obj_do_bit17_swizzling = BIT(17); 1451 1452 /* If we don't overwrite a cacheline completely we need to be 1453 * careful to have up-to-date data by first clflushing. Don't 1454 * overcomplicate things and flush the entire patch. 1455 */ 1456 partial_cacheline_write = 0; 1457 if (needs_clflush & CLFLUSH_BEFORE) 1458 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1; 1459 1460 user_data = u64_to_user_ptr(args->data_ptr); 1461 remain = args->size; 1462 offset = offset_in_page(args->offset); 1463 #ifdef __DragonFly__ 1464 vm_obj = obj->base.filp; 1465 VM_OBJECT_LOCK(vm_obj); 1466 vm_object_pip_add(vm_obj, 1); 1467 #endif 1468 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) { 1469 struct page *page = i915_gem_object_get_page(obj, idx); 1470 int length; 1471 1472 length = remain; 1473 if (offset + length > PAGE_SIZE) 1474 length = PAGE_SIZE - offset; 1475 1476 ret = shmem_pwrite(page, offset, length, user_data, 1477 page_to_phys(page) & obj_do_bit17_swizzling, 1478 (offset | length) & partial_cacheline_write, 1479 needs_clflush & CLFLUSH_AFTER); 1480 if (ret) 1481 break; 1482 1483 remain -= length; 1484 user_data += length; 1485 offset = 0; 1486 } 1487 #ifdef __DragonFly__ 1488 if (vm_obj != obj->base.filp) { 1489 kprintf("i915_gem_shmem_pwrite: VM_OBJECT CHANGED! %p %p\n", 1490 vm_obj, obj->base.filp); 1491 } 1492 vm_object_pip_wakeup(vm_obj); 1493 VM_OBJECT_UNLOCK(vm_obj); 1494 #endif 1495 1496 intel_fb_obj_flush(obj, ORIGIN_CPU); 1497 i915_gem_obj_finish_shmem_access(obj); 1498 return ret; 1499 } 1500 1501 /** 1502 * Writes data to the object referenced by handle. 1503 * @dev: drm device 1504 * @data: ioctl data blob 1505 * @file: drm file 1506 * 1507 * On error, the contents of the buffer that were to be modified are undefined. 1508 */ 1509 int 1510 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1511 struct drm_file *file) 1512 { 1513 struct drm_i915_gem_pwrite *args = data; 1514 struct drm_i915_gem_object *obj; 1515 int ret; 1516 1517 if (args->size == 0) 1518 return 0; 1519 1520 #if 0 1521 if (!access_ok(VERIFY_READ, 1522 u64_to_user_ptr(args->data_ptr), 1523 args->size)) 1524 return -EFAULT; 1525 #endif 1526 1527 obj = i915_gem_object_lookup(file, args->handle); 1528 if (!obj) 1529 return -ENOENT; 1530 1531 /* Bounds check destination. */ 1532 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) { 1533 ret = -EINVAL; 1534 goto err; 1535 } 1536 1537 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1538 1539 ret = -ENODEV; 1540 if (obj->ops->pwrite) 1541 ret = obj->ops->pwrite(obj, args); 1542 if (ret != -ENODEV) 1543 goto err; 1544 1545 ret = i915_gem_object_wait(obj, 1546 I915_WAIT_INTERRUPTIBLE | 1547 I915_WAIT_ALL, 1548 MAX_SCHEDULE_TIMEOUT, 1549 to_rps_client(file)); 1550 if (ret) 1551 goto err; 1552 1553 ret = i915_gem_object_pin_pages(obj); 1554 if (ret) 1555 goto err; 1556 1557 ret = -EFAULT; 1558 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1559 * it would end up going through the fenced access, and we'll get 1560 * different detiling behavior between reading and writing. 1561 * pread/pwrite currently are reading and writing from the CPU 1562 * perspective, requiring manual detiling by the client. 1563 */ 1564 if (!i915_gem_object_has_struct_page(obj) || 1565 cpu_write_needs_clflush(obj)) 1566 /* Note that the gtt paths might fail with non-page-backed user 1567 * pointers (e.g. gtt mappings when moving data between 1568 * textures). Fallback to the shmem path in that case. 1569 */ 1570 ret = i915_gem_gtt_pwrite_fast(obj, args); 1571 1572 if (ret == -EFAULT || ret == -ENOSPC) { 1573 if (obj->phys_handle) 1574 ret = i915_gem_phys_pwrite(obj, args, file); 1575 else 1576 ret = i915_gem_shmem_pwrite(obj, args); 1577 } 1578 1579 i915_gem_object_unpin_pages(obj); 1580 err: 1581 i915_gem_object_put(obj); 1582 return ret; 1583 } 1584 1585 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) 1586 { 1587 struct drm_i915_private *i915; 1588 struct list_head *list; 1589 struct i915_vma *vma; 1590 1591 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 1592 1593 list_for_each_entry(vma, &obj->vma_list, obj_link) { 1594 if (!i915_vma_is_ggtt(vma)) 1595 break; 1596 1597 if (i915_vma_is_active(vma)) 1598 continue; 1599 1600 if (!drm_mm_node_allocated(&vma->node)) 1601 continue; 1602 1603 list_move_tail(&vma->vm_link, &vma->vm->inactive_list); 1604 } 1605 1606 i915 = to_i915(obj->base.dev); 1607 lockmgr(&i915->mm.obj_lock, LK_EXCLUSIVE); 1608 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list; 1609 list_move_tail(&obj->mm.link, list); 1610 lockmgr(&i915->mm.obj_lock, LK_RELEASE); 1611 } 1612 1613 /** 1614 * Called when user space prepares to use an object with the CPU, either 1615 * through the mmap ioctl's mapping or a GTT mapping. 1616 * @dev: drm device 1617 * @data: ioctl data blob 1618 * @file: drm file 1619 */ 1620 int 1621 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1622 struct drm_file *file) 1623 { 1624 struct drm_i915_gem_set_domain *args = data; 1625 struct drm_i915_gem_object *obj; 1626 uint32_t read_domains = args->read_domains; 1627 uint32_t write_domain = args->write_domain; 1628 int err; 1629 1630 /* Only handle setting domains to types used by the CPU. */ 1631 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) 1632 return -EINVAL; 1633 1634 /* Having something in the write domain implies it's in the read 1635 * domain, and only that read domain. Enforce that in the request. 1636 */ 1637 if (write_domain != 0 && read_domains != write_domain) 1638 return -EINVAL; 1639 1640 obj = i915_gem_object_lookup(file, args->handle); 1641 if (!obj) 1642 return -ENOENT; 1643 1644 /* Try to flush the object off the GPU without holding the lock. 1645 * We will repeat the flush holding the lock in the normal manner 1646 * to catch cases where we are gazumped. 1647 */ 1648 err = i915_gem_object_wait(obj, 1649 I915_WAIT_INTERRUPTIBLE | 1650 (write_domain ? I915_WAIT_ALL : 0), 1651 MAX_SCHEDULE_TIMEOUT, 1652 to_rps_client(file)); 1653 if (err) 1654 goto out; 1655 1656 /* Flush and acquire obj->pages so that we are coherent through 1657 * direct access in memory with previous cached writes through 1658 * shmemfs and that our cache domain tracking remains valid. 1659 * For example, if the obj->filp was moved to swap without us 1660 * being notified and releasing the pages, we would mistakenly 1661 * continue to assume that the obj remained out of the CPU cached 1662 * domain. 1663 */ 1664 err = i915_gem_object_pin_pages(obj); 1665 if (err) 1666 goto out; 1667 1668 err = i915_mutex_lock_interruptible(dev); 1669 if (err) 1670 goto out_unpin; 1671 1672 if (read_domains & I915_GEM_DOMAIN_WC) 1673 err = i915_gem_object_set_to_wc_domain(obj, write_domain); 1674 else if (read_domains & I915_GEM_DOMAIN_GTT) 1675 err = i915_gem_object_set_to_gtt_domain(obj, write_domain); 1676 else 1677 err = i915_gem_object_set_to_cpu_domain(obj, write_domain); 1678 1679 /* And bump the LRU for this access */ 1680 i915_gem_object_bump_inactive_ggtt(obj); 1681 1682 mutex_unlock(&dev->struct_mutex); 1683 1684 if (write_domain != 0) 1685 intel_fb_obj_invalidate(obj, 1686 fb_write_origin(obj, write_domain)); 1687 1688 out_unpin: 1689 i915_gem_object_unpin_pages(obj); 1690 out: 1691 i915_gem_object_put(obj); 1692 return err; 1693 } 1694 1695 /** 1696 * Called when user space has done writes to this buffer 1697 * @dev: drm device 1698 * @data: ioctl data blob 1699 * @file: drm file 1700 */ 1701 int 1702 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1703 struct drm_file *file) 1704 { 1705 struct drm_i915_gem_sw_finish *args = data; 1706 struct drm_i915_gem_object *obj; 1707 1708 obj = i915_gem_object_lookup(file, args->handle); 1709 if (!obj) 1710 return -ENOENT; 1711 1712 /* Pinned buffers may be scanout, so flush the cache */ 1713 i915_gem_object_flush_if_display(obj); 1714 i915_gem_object_put(obj); 1715 1716 return 0; 1717 } 1718 1719 /** 1720 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 1721 * it is mapped to. 1722 * @dev: drm device 1723 * @data: ioctl data blob 1724 * @file: drm file 1725 * 1726 * While the mapping holds a reference on the contents of the object, it doesn't 1727 * imply a ref on the object itself. 1728 * 1729 * IMPORTANT: 1730 * 1731 * DRM driver writers who look a this function as an example for how to do GEM 1732 * mmap support, please don't implement mmap support like here. The modern way 1733 * to implement DRM mmap support is with an mmap offset ioctl (like 1734 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 1735 * That way debug tooling like valgrind will understand what's going on, hiding 1736 * the mmap call in a driver private ioctl will break that. The i915 driver only 1737 * does cpu mmaps this way because we didn't know better. 1738 */ 1739 int 1740 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1741 struct drm_file *file) 1742 { 1743 struct drm_i915_gem_mmap *args = data; 1744 struct drm_i915_gem_object *obj; 1745 unsigned long addr; 1746 #ifdef __DragonFly__ 1747 struct proc *p = curproc; 1748 vm_map_t map = &p->p_vmspace->vm_map; 1749 vm_size_t size; 1750 int error = 0, rv; 1751 #endif 1752 1753 if (args->flags & ~(I915_MMAP_WC)) 1754 return -EINVAL; 1755 1756 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT)) 1757 return -ENODEV; 1758 1759 obj = i915_gem_object_lookup(file, args->handle); 1760 if (!obj) 1761 return -ENOENT; 1762 1763 /* prime objects have no backing filp to GEM mmap 1764 * pages from. 1765 */ 1766 if (!obj->base.filp) { 1767 i915_gem_object_put(obj); 1768 return -EINVAL; 1769 } 1770 1771 if (args->size == 0) 1772 goto out; 1773 1774 size = round_page(args->size); 1775 if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { 1776 error = -ENOMEM; 1777 goto out; 1778 } 1779 1780 /* 1781 * Call hint to ensure that NULL is not returned as a valid address 1782 * and to reduce vm_map traversals. XXX causes instability, use a 1783 * fixed low address as the start point instead to avoid the NULL 1784 * return issue. 1785 */ 1786 addr = PAGE_SIZE; 1787 1788 /* 1789 * Use 256KB alignment. It is unclear why this matters for a 1790 * virtual address but it appears to fix a number of application/X 1791 * crashes and kms console switching is much faster. 1792 */ 1793 vm_object_hold(obj->base.filp); 1794 vm_object_reference_locked(obj->base.filp); 1795 vm_object_drop(obj->base.filp); 1796 1797 /* Something gets wrong here: fails to mmap 4096 */ 1798 rv = vm_map_find(map, obj->base.filp, NULL, 1799 args->offset, &addr, args->size, 1800 256 * 1024, /* align */ 1801 TRUE, /* fitit */ 1802 VM_MAPTYPE_NORMAL, VM_SUBSYS_DRM_GEM, 1803 VM_PROT_READ | VM_PROT_WRITE, /* prot */ 1804 VM_PROT_READ | VM_PROT_WRITE, /* max */ 1805 MAP_SHARED /* cow */); 1806 if (rv != KERN_SUCCESS) { 1807 vm_object_deallocate(obj->base.filp); 1808 error = -vm_mmap_to_errno(rv); 1809 } else { 1810 args->addr_ptr = (uint64_t)addr; 1811 } 1812 1813 if (args->flags & I915_MMAP_WC) { /* I915_PARAM_MMAP_VERSION */ 1814 #if 0 1815 addr = vm_mmap(obj->base.filp, 0, args->size, 1816 PROT_READ | PROT_WRITE, MAP_SHARED, 1817 args->offset); 1818 if (args->flags & I915_MMAP_WC) { 1819 struct mm_struct *mm = current->mm; 1820 struct vm_area_struct *vma; 1821 1822 if (down_write_killable(&mm->mmap_sem)) { 1823 i915_gem_object_put(obj); 1824 return -EINTR; 1825 } 1826 vma = find_vma(mm, addr); 1827 if (vma) 1828 vma->vm_page_prot = 1829 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1830 else 1831 addr = -ENOMEM; 1832 up_write(&mm->mmap_sem); 1833 #endif 1834 1835 /* This may race, but that's ok, it only gets set */ 1836 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU); 1837 } 1838 1839 out: 1840 i915_gem_object_put(obj); 1841 if (IS_ERR((void *)addr)) 1842 return addr; 1843 1844 args->addr_ptr = (uint64_t) addr; 1845 1846 return 0; 1847 } 1848 1849 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj) 1850 { 1851 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; 1852 } 1853 1854 /** 1855 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 1856 * 1857 * A history of the GTT mmap interface: 1858 * 1859 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 1860 * aligned and suitable for fencing, and still fit into the available 1861 * mappable space left by the pinned display objects. A classic problem 1862 * we called the page-fault-of-doom where we would ping-pong between 1863 * two objects that could not fit inside the GTT and so the memcpy 1864 * would page one object in at the expense of the other between every 1865 * single byte. 1866 * 1867 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 1868 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 1869 * object is too large for the available space (or simply too large 1870 * for the mappable aperture!), a view is created instead and faulted 1871 * into userspace. (This view is aligned and sized appropriately for 1872 * fenced access.) 1873 * 1874 * 2 - Recognise WC as a separate cache domain so that we can flush the 1875 * delayed writes via GTT before performing direct access via WC. 1876 * 1877 * Restrictions: 1878 * 1879 * * snoopable objects cannot be accessed via the GTT. It can cause machine 1880 * hangs on some architectures, corruption on others. An attempt to service 1881 * a GTT page fault from a snoopable object will generate a SIGBUS. 1882 * 1883 * * the object must be able to fit into RAM (physical memory, though no 1884 * limited to the mappable aperture). 1885 * 1886 * 1887 * Caveats: 1888 * 1889 * * a new GTT page fault will synchronize rendering from the GPU and flush 1890 * all data to system memory. Subsequent access will not be synchronized. 1891 * 1892 * * all mappings are revoked on runtime device suspend. 1893 * 1894 * * there are only 8, 16 or 32 fence registers to share between all users 1895 * (older machines require fence register for display and blitter access 1896 * as well). Contention of the fence registers will cause the previous users 1897 * to be unmapped and any new access will generate new page faults. 1898 * 1899 * * running out of memory while servicing a fault may generate a SIGBUS, 1900 * rather than the expected SIGSEGV. 1901 */ 1902 int i915_gem_mmap_gtt_version(void) 1903 { 1904 return 2; 1905 } 1906 1907 static inline struct i915_ggtt_view 1908 compute_partial_view(struct drm_i915_gem_object *obj, 1909 pgoff_t page_offset, 1910 unsigned int chunk) 1911 { 1912 struct i915_ggtt_view view; 1913 1914 if (i915_gem_object_is_tiled(obj)) 1915 chunk = roundup(chunk, tile_row_pages(obj)); 1916 1917 view.type = I915_GGTT_VIEW_PARTIAL; 1918 view.partial.offset = rounddown(page_offset, chunk); 1919 view.partial.size = 1920 min_t(unsigned int, chunk, 1921 (obj->base.size >> PAGE_SHIFT) - view.partial.offset); 1922 1923 /* If the partial covers the entire object, just create a normal VMA. */ 1924 if (chunk >= obj->base.size >> PAGE_SHIFT) 1925 view.type = I915_GGTT_VIEW_NORMAL; 1926 1927 return view; 1928 } 1929 1930 #ifdef __DragonFly__ 1931 static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, 1932 struct address_space *file_mapping) 1933 { 1934 struct drm_i915_gem_object *obj = container_of( 1935 node,struct drm_i915_gem_object, base.vma_node); 1936 vm_object_t devobj; 1937 1938 devobj = cdev_pager_lookup(obj); 1939 if (devobj != NULL) { 1940 VM_OBJECT_LOCK(devobj); 1941 vm_object_page_remove(devobj, 0, 0, false); 1942 VM_OBJECT_UNLOCK(devobj); 1943 vm_object_deallocate(devobj); 1944 } 1945 } 1946 #endif 1947 1948 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) 1949 { 1950 struct i915_vma *vma; 1951 1952 GEM_BUG_ON(!obj->userfault_count); 1953 1954 obj->userfault_count = 0; 1955 list_del(&obj->userfault_link); 1956 #ifdef __DragonFly__ 1957 drm_vma_node_unmap(&obj->base.vma_node, NULL); 1958 #else 1959 drm_vma_node_unmap(&obj->base.vma_node, 1960 obj->base.dev->anon_inode->i_mapping); 1961 #endif 1962 1963 list_for_each_entry(vma, &obj->vma_list, obj_link) { 1964 if (!i915_vma_is_ggtt(vma)) 1965 break; 1966 1967 i915_vma_unset_userfault(vma); 1968 } 1969 } 1970 1971 /** 1972 * i915_gem_fault - fault a page into the GTT 1973 * 1974 * vm_obj is locked on entry and expected to be locked on return. 1975 * 1976 * This is a OBJT_MGTDEVICE object, *mres will be NULL and should be set 1977 * to the desired vm_page. The page is not indexed into the vm_obj. 1978 * 1979 * XXX Most GEM calls appear to be interruptable, but we can't hard loop 1980 * in that case. Release all resources and wait 1 tick before retrying. 1981 * This is a huge problem which needs to be fixed by getting rid of most 1982 * of the interruptability. The linux code does not retry but does appear 1983 * to have some sort of mechanism (VM_FAULT_NOPAGE ?) for the higher level 1984 * to be able to retry. 1985 * 1986 * -- 1987 * @vma: VMA in question 1988 * @vmf: fault info 1989 * 1990 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped 1991 * from userspace. The fault handler takes care of binding the object to 1992 * the GTT (if needed), allocating and programming a fence register (again, 1993 * only if needed based on whether the old reg is still valid or the object 1994 * is tiled) and inserting a new PTE into the faulting process. 1995 * 1996 * Note that the faulting process may involve evicting existing objects 1997 * from the GTT and/or fence registers to make room. So performance may 1998 * suffer if the GTT working set is large or there are few fence registers 1999 * left. 2000 * 2001 * The current feature set supported by i915_gem_fault() and thus GTT mmaps 2002 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version). 2003 * vm_obj is locked on entry and expected to be locked on return. The VM 2004 * pager has placed an anonymous memory page at (obj,offset) which we have 2005 * to replace. 2006 */ 2007 // int i915_gem_fault(struct vm_fault *vmf) 2008 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) 2009 { 2010 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */ 2011 struct vm_area_struct *area; 2012 struct drm_i915_gem_object *obj = to_intel_bo(vm_obj->handle); 2013 struct drm_device *dev = obj->base.dev; 2014 struct drm_i915_private *dev_priv = to_i915(dev); 2015 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2016 bool write = !!(prot & VM_PROT_WRITE); 2017 struct i915_vma *vma; 2018 pgoff_t page_offset; 2019 vm_page_t m; 2020 unsigned int flags; 2021 int ret; 2022 #ifdef __DragonFly__ 2023 int didref = 0; 2024 struct vm_area_struct vmas; 2025 2026 /* Fill-in vm_area_struct */ 2027 area = &vmas; 2028 area->vm_private_data = vm_obj->handle; 2029 area->vm_start = 0; 2030 area->vm_end = obj->base.size; 2031 area->vm_mm = current->mm; 2032 // XXX: in Linux, mmap_sem is held on entry of this function 2033 // XXX: should that be an exclusive lock ? 2034 down_read(&area->vm_mm->mmap_sem); 2035 #endif 2036 2037 /* We don't use vmf->pgoff since that has the fake offset */ 2038 page_offset = (unsigned long)offset >> PAGE_SHIFT; 2039 2040 /* 2041 * vm_fault() has supplied us with a busied page placeholding 2042 * the operation. This presents a lock order reversal issue 2043 * again i915_gem_release_mmap() for our device mutex. 2044 * 2045 * Deal with the problem by getting rid of the placeholder now, 2046 * and then dealing with the potential for a new placeholder when 2047 * we try to insert later. 2048 */ 2049 KKASSERT(*mres == NULL); 2050 m = NULL; 2051 2052 retry: 2053 trace_i915_gem_object_fault(obj, page_offset, true, write); 2054 2055 /* Try to flush the object off the GPU first without holding the lock. 2056 * Upon acquiring the lock, we will perform our sanity checks and then 2057 * repeat the flush holding the lock in the normal manner to catch cases 2058 * where we are gazumped. 2059 */ 2060 ret = i915_gem_object_wait(obj, 2061 I915_WAIT_INTERRUPTIBLE, 2062 MAX_SCHEDULE_TIMEOUT, 2063 NULL); 2064 if (ret) 2065 goto err; 2066 2067 ret = i915_gem_object_pin_pages(obj); 2068 if (ret) 2069 goto err; 2070 2071 intel_runtime_pm_get(dev_priv); 2072 2073 ret = i915_mutex_lock_interruptible(dev); 2074 if (ret) { 2075 if (ret != -EINTR) 2076 kprintf("i915: caught bug(%d) (mutex_lock_inter)\n", ret); 2077 goto err_rpm; 2078 } 2079 2080 /* Access to snoopable pages through the GTT is incoherent. */ 2081 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { 2082 kprintf("i915: caught bug() (cache_level %d %d)\n", 2083 (obj->cache_level), !HAS_LLC(dev_priv)); 2084 ret = -EFAULT; 2085 goto err_unlock; 2086 } 2087 2088 /* If the object is smaller than a couple of partial vma, it is 2089 * not worth only creating a single partial vma - we may as well 2090 * clear enough space for the full object. 2091 */ 2092 flags = PIN_MAPPABLE; 2093 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT) 2094 flags |= PIN_NONBLOCK | PIN_NONFAULT; 2095 2096 /* Now pin it into the GTT as needed */ 2097 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags); 2098 if (IS_ERR(vma)) { 2099 /* Use a partial view if it is bigger than available space */ 2100 struct i915_ggtt_view view = 2101 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 2102 2103 kprintf("i915_gem_fault: CHUNKING PASS\n"); 2104 2105 /* Userspace is now writing through an untracked VMA, abandon 2106 * all hope that the hardware is able to track future writes. 2107 */ 2108 obj->frontbuffer_ggtt_origin = ORIGIN_CPU; 2109 2110 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 2111 } 2112 if (IS_ERR(vma)) { 2113 kprintf("i915: caught bug() (VMA error %ld objsize %ld)\n", 2114 PTR_ERR(vma), obj->base.size); 2115 ret = PTR_ERR(vma); 2116 goto err_unlock; 2117 } 2118 2119 ret = i915_gem_object_set_to_gtt_domain(obj, write); 2120 if (ret) { 2121 kprintf("i915: caught bug(%d) (set_to_gtt_dom)\n", ret); 2122 goto err_unpin; 2123 } 2124 2125 ret = i915_vma_pin_fence(vma); 2126 if (ret) { 2127 kprintf("i915: caught bug(%d) (vma_pin_fence)\n", ret); 2128 goto err_unpin; 2129 } 2130 2131 #ifdef __DragonFly__ 2132 /* 2133 * Add a pip count to avoid destruction and certain other 2134 * complex operations (such as collapses?) while unlocked. 2135 */ 2136 vm_object_pip_add(vm_obj, 1); 2137 didref = 1; 2138 2139 ret = 0; 2140 2141 #if 0 2142 /* NO LONGER USED */ 2143 { 2144 vm_page_t m; 2145 2146 m = vm_page_lookup(vm_obj, OFF_TO_IDX(offset)); 2147 if (m) { 2148 if (vm_page_busy_try(m, false)) { 2149 kprintf("i915_gem_fault: BUSY\n"); 2150 ret = -EINTR; 2151 goto err_unpin; 2152 } 2153 } 2154 goto have_page; 2155 } 2156 #endif 2157 2158 /* Finally, remap it using the new GTT offset */ 2159 m = vm_phys_fictitious_to_vm_page(ggtt->mappable_base + 2160 vma->node.start + offset); 2161 2162 if (m == NULL) { 2163 kprintf("i915: caught bug() (phys_fict_to_vm)\n"); 2164 ret = -EFAULT; 2165 goto err_fence; 2166 } 2167 KASSERT((m->flags & PG_FICTITIOUS) != 0, ("not fictitious %p", m)); 2168 KASSERT(m->wire_count == 1, ("wire_count not 1 %p", m)); 2169 2170 /* 2171 * Try to busy the page. Fails on non-zero return. 2172 */ 2173 if (vm_page_busy_try(m, false)) { 2174 kprintf("i915_gem_fault: BUSY(2)\n"); 2175 ret = -EINTR; 2176 goto err_fence; 2177 } 2178 m->valid = VM_PAGE_BITS_ALL; 2179 2180 #ifdef USE_INSERT 2181 /* NO LONGER USED */ 2182 /* 2183 * This should always work since we already checked via a lookup 2184 * above. 2185 */ 2186 if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset)) == FALSE) { 2187 kprintf("i915:gem_fault: page %p,%jd already in object\n", 2188 vm_obj, 2189 OFF_TO_IDX(offset)); 2190 vm_page_wakeup(m); 2191 ret = -EINTR; 2192 goto err_unpin; 2193 } 2194 have_page: 2195 #endif 2196 *mres = m; 2197 ret = VM_PAGER_OK; 2198 2199 /* 2200 * ALTERNATIVE ERROR RETURN. 2201 * 2202 * OBJECT EXPECTED TO BE LOCKED. 2203 */ 2204 #endif 2205 2206 /* Mark as being mmapped into userspace for later revocation */ 2207 assert_rpm_wakelock_held(dev_priv); 2208 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) 2209 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list); 2210 GEM_BUG_ON(!obj->userfault_count); 2211 2212 err_fence: 2213 i915_vma_unpin_fence(vma); 2214 err_unpin: 2215 __i915_vma_unpin(vma); 2216 err_unlock: 2217 mutex_unlock(&dev->struct_mutex); 2218 err_rpm: 2219 intel_runtime_pm_put(dev_priv); 2220 i915_gem_object_unpin_pages(obj); 2221 err: 2222 switch (ret) { 2223 case -EIO: 2224 /* 2225 * We eat errors when the gpu is terminally wedged to avoid 2226 * userspace unduly crashing (gl has no provisions for mmaps to 2227 * fail). But any other -EIO isn't ours (e.g. swap in failure) 2228 * and so needs to be reported. 2229 */ 2230 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 2231 // ret = VM_FAULT_SIGBUS; 2232 break; 2233 } 2234 case -EAGAIN: 2235 /* 2236 * EAGAIN means the gpu is hung and we'll wait for the error 2237 * handler to reset everything when re-faulting in 2238 * i915_mutex_lock_interruptible. 2239 */ 2240 case -ERESTARTSYS: 2241 case -EINTR: 2242 #ifdef __DragonFly__ 2243 if (didref) { 2244 kprintf("i915: caught bug(%d) (retry)\n", ret); 2245 vm_object_pip_wakeup(vm_obj); 2246 didref = 0; 2247 } 2248 VM_OBJECT_UNLOCK(vm_obj); 2249 int dummy; 2250 tsleep(&dummy, 0, "delay", 1); /* XXX */ 2251 VM_OBJECT_LOCK(vm_obj); 2252 goto retry; 2253 case VM_PAGER_OK: 2254 break; 2255 #endif 2256 default: 2257 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret); 2258 ret = VM_PAGER_ERROR; 2259 break; 2260 } 2261 2262 #ifdef __DragonFly__ 2263 if (didref) 2264 vm_object_pip_wakeup(vm_obj); 2265 else 2266 kprintf("i915: caught bug(%d)\n", ret); 2267 2268 up_read(&area->vm_mm->mmap_sem); 2269 #endif 2270 2271 return ret; 2272 } 2273 2274 /** 2275 * i915_gem_release_mmap - remove physical page mappings 2276 * @obj: obj in question 2277 * 2278 * Preserve the reservation of the mmapping with the DRM core code, but 2279 * relinquish ownership of the pages back to the system. 2280 * 2281 * It is vital that we remove the page mapping if we have mapped a tiled 2282 * object through the GTT and then lose the fence register due to 2283 * resource pressure. Similarly if the object has been moved out of the 2284 * aperture, than pages mapped into userspace must be revoked. Removing the 2285 * mapping will then trigger a page fault on the next user access, allowing 2286 * fixup by i915_gem_fault(). 2287 */ 2288 void 2289 i915_gem_release_mmap(struct drm_i915_gem_object *obj) 2290 { 2291 struct drm_i915_private *i915 = to_i915(obj->base.dev); 2292 2293 /* Serialisation between user GTT access and our code depends upon 2294 * revoking the CPU's PTE whilst the mutex is held. The next user 2295 * pagefault then has to wait until we release the mutex. 2296 * 2297 * Note that RPM complicates somewhat by adding an additional 2298 * requirement that operations to the GGTT be made holding the RPM 2299 * wakeref. 2300 */ 2301 lockdep_assert_held(&i915->drm.struct_mutex); 2302 intel_runtime_pm_get(i915); 2303 2304 if (!obj->userfault_count) 2305 goto out; 2306 2307 __i915_gem_object_release_mmap(obj); 2308 2309 /* Ensure that the CPU's PTE are revoked and there are not outstanding 2310 * memory transactions from userspace before we return. The TLB 2311 * flushing implied above by changing the PTE above *should* be 2312 * sufficient, an extra barrier here just provides us with a bit 2313 * of paranoid documentation about our requirement to serialise 2314 * memory writes before touching registers / GSM. 2315 */ 2316 wmb(); 2317 2318 out: 2319 intel_runtime_pm_put(i915); 2320 } 2321 2322 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) 2323 { 2324 struct drm_i915_gem_object *obj, *on; 2325 int i; 2326 2327 /* 2328 * Only called during RPM suspend. All users of the userfault_list 2329 * must be holding an RPM wakeref to ensure that this can not 2330 * run concurrently with themselves (and use the struct_mutex for 2331 * protection between themselves). 2332 */ 2333 2334 list_for_each_entry_safe(obj, on, 2335 &dev_priv->mm.userfault_list, userfault_link) 2336 __i915_gem_object_release_mmap(obj); 2337 2338 /* The fence will be lost when the device powers down. If any were 2339 * in use by hardware (i.e. they are pinned), we should not be powering 2340 * down! All other fences will be reacquired by the user upon waking. 2341 */ 2342 for (i = 0; i < dev_priv->num_fence_regs; i++) { 2343 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 2344 2345 /* Ideally we want to assert that the fence register is not 2346 * live at this point (i.e. that no piece of code will be 2347 * trying to write through fence + GTT, as that both violates 2348 * our tracking of activity and associated locking/barriers, 2349 * but also is illegal given that the hw is powered down). 2350 * 2351 * Previously we used reg->pin_count as a "liveness" indicator. 2352 * That is not sufficient, and we need a more fine-grained 2353 * tool if we want to have a sanity check here. 2354 */ 2355 2356 if (!reg->vma) 2357 continue; 2358 2359 GEM_BUG_ON(i915_vma_has_userfault(reg->vma)); 2360 reg->dirty = true; 2361 } 2362 } 2363 2364 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 2365 { 2366 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2367 int err; 2368 2369 err = drm_gem_create_mmap_offset(&obj->base); 2370 if (likely(!err)) 2371 return 0; 2372 2373 /* Attempt to reap some mmap space from dead objects */ 2374 do { 2375 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE); 2376 if (err) 2377 break; 2378 2379 i915_gem_drain_freed_objects(dev_priv); 2380 err = drm_gem_create_mmap_offset(&obj->base); 2381 if (!err) 2382 break; 2383 2384 } while (flush_delayed_work(&dev_priv->gt.retire_work)); 2385 2386 return err; 2387 } 2388 2389 #if 0 2390 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 2391 { 2392 drm_gem_free_mmap_offset(&obj->base); 2393 } 2394 #endif 2395 2396 int 2397 i915_gem_mmap_gtt(struct drm_file *file, 2398 struct drm_device *dev, 2399 uint32_t handle, 2400 uint64_t *offset) 2401 { 2402 struct drm_i915_gem_object *obj; 2403 int ret; 2404 2405 obj = i915_gem_object_lookup(file, handle); 2406 if (!obj) 2407 return -ENOENT; 2408 2409 ret = i915_gem_object_create_mmap_offset(obj); 2410 if (ret == 0) 2411 *offset = DRM_GEM_MAPPING_OFF(obj->base.map_list.key) | 2412 DRM_GEM_MAPPING_KEY; 2413 2414 i915_gem_object_put(obj); 2415 return ret; 2416 } 2417 2418 /** 2419 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 2420 * @dev: DRM device 2421 * @data: GTT mapping ioctl data 2422 * @file: GEM object info 2423 * 2424 * Simply returns the fake offset to userspace so it can mmap it. 2425 * The mmap call will end up in drm_gem_mmap(), which will set things 2426 * up so we can get faults in the handler above. 2427 * 2428 * The fault handler will take care of binding the object into the GTT 2429 * (since it may have been evicted to make room for something), allocating 2430 * a fence register, and mapping the appropriate aperture address into 2431 * userspace. 2432 */ 2433 int 2434 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 2435 struct drm_file *file) 2436 { 2437 struct drm_i915_gem_mmap_gtt *args = data; 2438 2439 return i915_gem_mmap_gtt(file, dev, args->handle, (uint64_t *)&args->offset); 2440 } 2441 2442 /* Immediately discard the backing storage */ 2443 static void 2444 i915_gem_object_truncate(struct drm_i915_gem_object *obj) 2445 { 2446 vm_object_t vm_obj = obj->base.filp; 2447 2448 if (obj->base.filp == NULL) 2449 return; 2450 2451 VM_OBJECT_LOCK(vm_obj); 2452 vm_object_page_remove(vm_obj, 0, 0, false); 2453 VM_OBJECT_UNLOCK(vm_obj); 2454 2455 /* Our goal here is to return as much of the memory as 2456 * is possible back to the system as we are called from OOM. 2457 * To do this we must instruct the shmfs to drop all of its 2458 * backing pages, *now*. 2459 */ 2460 #if 0 2461 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); 2462 #endif 2463 obj->mm.madv = __I915_MADV_PURGED; 2464 obj->mm.pages = ERR_PTR(-EFAULT); 2465 } 2466 2467 /* Try to discard unwanted pages */ 2468 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) 2469 { 2470 #if 0 2471 struct address_space *mapping; 2472 #endif 2473 2474 lockdep_assert_held(&obj->mm.lock); 2475 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 2476 2477 switch (obj->mm.madv) { 2478 case I915_MADV_DONTNEED: 2479 i915_gem_object_truncate(obj); 2480 case __I915_MADV_PURGED: 2481 return; 2482 } 2483 2484 if (obj->base.filp == NULL) 2485 return; 2486 2487 #if 0 2488 mapping = obj->base.filp->f_mapping, 2489 invalidate_mapping_pages(mapping, 0, (loff_t)-1); 2490 #endif 2491 invalidate_mapping_pages(obj->base.filp, 0, (loff_t)-1); 2492 } 2493 2494 static void 2495 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, 2496 struct sg_table *pages) 2497 { 2498 struct sgt_iter sgt_iter; 2499 struct page *page; 2500 2501 __i915_gem_object_release_shmem(obj, pages, true); 2502 2503 i915_gem_gtt_finish_pages(obj, pages); 2504 2505 if (i915_gem_object_needs_bit17_swizzle(obj)) 2506 i915_gem_object_save_bit_17_swizzle(obj, pages); 2507 2508 for_each_sgt_page(page, sgt_iter, pages) { 2509 if (obj->mm.dirty) 2510 set_page_dirty(page); 2511 2512 if (obj->mm.madv == I915_MADV_WILLNEED) 2513 mark_page_accessed(page); 2514 2515 put_page(page); 2516 } 2517 obj->mm.dirty = false; 2518 2519 sg_free_table(pages); 2520 kfree(pages); 2521 } 2522 2523 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) 2524 { 2525 struct radix_tree_iter iter; 2526 void __rcu **slot; 2527 2528 rcu_read_lock(); 2529 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0) 2530 radix_tree_delete(&obj->mm.get_page.radix, iter.index); 2531 rcu_read_unlock(); 2532 } 2533 2534 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, 2535 enum i915_mm_subclass subclass) 2536 { 2537 struct drm_i915_private *i915 = to_i915(obj->base.dev); 2538 struct sg_table *pages; 2539 2540 if (i915_gem_object_has_pinned_pages(obj)) 2541 return; 2542 2543 GEM_BUG_ON(obj->bind_count); 2544 if (!i915_gem_object_has_pages(obj)) 2545 return; 2546 2547 /* May be called by shrinker from within get_pages() (on another bo) */ 2548 mutex_lock_nested(&obj->mm.lock, subclass); 2549 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) 2550 goto unlock; 2551 2552 /* ->put_pages might need to allocate memory for the bit17 swizzle 2553 * array, hence protect them from being reaped by removing them from gtt 2554 * lists early. */ 2555 pages = fetch_and_zero(&obj->mm.pages); 2556 GEM_BUG_ON(!pages); 2557 2558 lockmgr(&i915->mm.obj_lock, LK_EXCLUSIVE); 2559 list_del(&obj->mm.link); 2560 lockmgr(&i915->mm.obj_lock, LK_RELEASE); 2561 2562 if (obj->mm.mapping) { 2563 void *ptr; 2564 2565 ptr = page_mask_bits(obj->mm.mapping); 2566 if (is_vmalloc_addr(ptr)) 2567 vunmap(ptr); 2568 else 2569 kunmap(kmap_to_page(ptr)); 2570 2571 obj->mm.mapping = NULL; 2572 } 2573 2574 __i915_gem_object_reset_page_iter(obj); 2575 2576 if (!IS_ERR(pages)) 2577 obj->ops->put_pages(obj, pages); 2578 2579 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0; 2580 2581 unlock: 2582 mutex_unlock(&obj->mm.lock); 2583 } 2584 2585 static bool i915_sg_trim(struct sg_table *orig_st) 2586 { 2587 struct sg_table new_st; 2588 struct scatterlist *sg, *new_sg; 2589 unsigned int i; 2590 2591 if (orig_st->nents == orig_st->orig_nents) 2592 return false; 2593 2594 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) 2595 return false; 2596 2597 new_sg = new_st.sgl; 2598 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { 2599 sg_set_page(new_sg, sg_page(sg), sg->length, 0); 2600 /* called before being DMA mapped, no need to copy sg->dma_* */ 2601 new_sg = sg_next(new_sg); 2602 } 2603 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ 2604 2605 sg_free_table(orig_st); 2606 2607 *orig_st = new_st; 2608 return true; 2609 } 2610 2611 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2612 { 2613 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2614 const unsigned long page_count = obj->base.size / PAGE_SIZE; 2615 unsigned long i; 2616 struct vm_object *mapping; 2617 struct sg_table *st; 2618 struct scatterlist *sg; 2619 struct sgt_iter sgt_iter; 2620 struct page *page; 2621 unsigned long last_pfn = 0; /* suppress gcc warning */ 2622 unsigned int max_segment = i915_sg_segment_size(); 2623 unsigned int sg_page_sizes; 2624 gfp_t noreclaim; 2625 int ret; 2626 2627 /* Assert that the object is not currently in any GPU domain. As it 2628 * wasn't in the GTT, there shouldn't be any way it could have been in 2629 * a GPU cache 2630 */ 2631 GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2632 GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2633 2634 st = kmalloc(sizeof(*st), M_DRM, GFP_KERNEL); 2635 if (st == NULL) 2636 return -ENOMEM; 2637 2638 rebuild_st: 2639 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2640 kfree(st); 2641 return -ENOMEM; 2642 } 2643 2644 /* Get the list of pages out of our struct file. They'll be pinned 2645 * at this point until we release them. 2646 * 2647 * Fail silently without starting the shrinker 2648 */ 2649 #ifdef __DragonFly__ 2650 mapping = obj->base.filp; 2651 VM_OBJECT_LOCK(mapping); 2652 #endif 2653 noreclaim = mapping_gfp_constraint(mapping, 2654 ~(__GFP_IO | __GFP_RECLAIM)); 2655 noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 2656 2657 sg = st->sgl; 2658 st->nents = 0; 2659 sg_page_sizes = 0; 2660 for (i = 0; i < page_count; i++) { 2661 const unsigned int shrink[] = { 2662 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, 2663 0, 2664 }, *s = shrink; 2665 gfp_t gfp = noreclaim; 2666 2667 do { 2668 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2669 if (likely(!IS_ERR(page))) 2670 break; 2671 2672 if (!*s) { 2673 ret = PTR_ERR(page); 2674 goto err_sg; 2675 } 2676 2677 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); 2678 cond_resched(); 2679 2680 /* We've tried hard to allocate the memory by reaping 2681 * our own buffer, now let the real VM do its job and 2682 * go down in flames if truly OOM. 2683 * 2684 * However, since graphics tend to be disposable, 2685 * defer the oom here by reporting the ENOMEM back 2686 * to userspace. 2687 */ 2688 if (!*s) { 2689 /* reclaim and warn, but no oom */ 2690 gfp = mapping_gfp_mask(mapping); 2691 2692 /* Our bo are always dirty and so we require 2693 * kswapd to reclaim our pages (direct reclaim 2694 * does not effectively begin pageout of our 2695 * buffers on its own). However, direct reclaim 2696 * only waits for kswapd when under allocation 2697 * congestion. So as a result __GFP_RECLAIM is 2698 * unreliable and fails to actually reclaim our 2699 * dirty pages -- unless you try over and over 2700 * again with !__GFP_NORETRY. However, we still 2701 * want to fail this allocation rather than 2702 * trigger the out-of-memory killer and for 2703 * this we want __GFP_RETRY_MAYFAIL. 2704 */ 2705 gfp |= __GFP_RETRY_MAYFAIL; 2706 } 2707 } while (1); 2708 2709 if (!i || 2710 sg->length >= max_segment || 2711 page_to_pfn(page) != last_pfn + 1) { 2712 if (i) { 2713 sg_page_sizes |= sg->length; 2714 sg = sg_next(sg); 2715 } 2716 st->nents++; 2717 sg_set_page(sg, page, PAGE_SIZE, 0); 2718 } else { 2719 sg->length += PAGE_SIZE; 2720 } 2721 last_pfn = page_to_pfn(page); 2722 2723 /* Check that the i965g/gm workaround works. */ 2724 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); 2725 } 2726 if (sg) { /* loop terminated early; short sg table */ 2727 sg_page_sizes |= sg->length; 2728 sg_mark_end(sg); 2729 } 2730 #ifdef __DragonFly__ 2731 VM_OBJECT_UNLOCK(mapping); 2732 #endif 2733 2734 /* Trim unused sg entries to avoid wasting memory. */ 2735 i915_sg_trim(st); 2736 2737 ret = i915_gem_gtt_prepare_pages(obj, st); 2738 if (ret) { 2739 /* DMA remapping failed? One possible cause is that 2740 * it could not reserve enough large entries, asking 2741 * for PAGE_SIZE chunks instead may be helpful. 2742 */ 2743 if (max_segment > PAGE_SIZE) { 2744 for_each_sgt_page(page, sgt_iter, st) 2745 put_page(page); 2746 sg_free_table(st); 2747 2748 max_segment = PAGE_SIZE; 2749 goto rebuild_st; 2750 } else { 2751 dev_warn(&dev_priv->drm.pdev->dev, 2752 "Failed to DMA remap %lu pages\n", 2753 page_count); 2754 goto err_pages; 2755 } 2756 } 2757 2758 if (i915_gem_object_needs_bit17_swizzle(obj)) 2759 i915_gem_object_do_bit_17_swizzle(obj, st); 2760 2761 __i915_gem_object_set_pages(obj, st, sg_page_sizes); 2762 2763 return 0; 2764 2765 err_sg: 2766 sg_mark_end(sg); 2767 err_pages: 2768 for_each_sgt_page(page, sgt_iter, st) 2769 put_page(page); 2770 #ifdef __DragonFly__ 2771 VM_OBJECT_UNLOCK(mapping); 2772 #endif 2773 sg_free_table(st); 2774 kfree(st); 2775 2776 /* shmemfs first checks if there is enough memory to allocate the page 2777 * and reports ENOSPC should there be insufficient, along with the usual 2778 * ENOMEM for a genuine allocation failure. 2779 * 2780 * We use ENOSPC in our driver to mean that we have run out of aperture 2781 * space and so want to translate the error from shmemfs back to our 2782 * usual understanding of ENOMEM. 2783 */ 2784 if (ret == -ENOSPC) 2785 ret = -ENOMEM; 2786 2787 return ret; 2788 } 2789 2790 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, 2791 struct sg_table *pages, 2792 unsigned int sg_page_sizes) 2793 { 2794 struct drm_i915_private *i915 = to_i915(obj->base.dev); 2795 unsigned long supported = INTEL_INFO(i915)->page_sizes; 2796 int i; 2797 2798 lockdep_assert_held(&obj->mm.lock); 2799 2800 obj->mm.get_page.sg_pos = pages->sgl; 2801 obj->mm.get_page.sg_idx = 0; 2802 2803 obj->mm.pages = pages; 2804 2805 if (i915_gem_object_is_tiled(obj) && 2806 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 2807 GEM_BUG_ON(obj->mm.quirked); 2808 __i915_gem_object_pin_pages(obj); 2809 obj->mm.quirked = true; 2810 } 2811 2812 GEM_BUG_ON(!sg_page_sizes); 2813 obj->mm.page_sizes.phys = sg_page_sizes; 2814 2815 /* 2816 * Calculate the supported page-sizes which fit into the given 2817 * sg_page_sizes. This will give us the page-sizes which we may be able 2818 * to use opportunistically when later inserting into the GTT. For 2819 * example if phys=2G, then in theory we should be able to use 1G, 2M, 2820 * 64K or 4K pages, although in practice this will depend on a number of 2821 * other factors. 2822 */ 2823 obj->mm.page_sizes.sg = 0; 2824 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { 2825 if (obj->mm.page_sizes.phys & ~0u << i) 2826 obj->mm.page_sizes.sg |= BIT(i); 2827 } 2828 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); 2829 2830 lockmgr(&i915->mm.obj_lock, LK_EXCLUSIVE); 2831 list_add(&obj->mm.link, &i915->mm.unbound_list); 2832 lockmgr(&i915->mm.obj_lock, LK_RELEASE); 2833 } 2834 2835 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2836 { 2837 int err; 2838 2839 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { 2840 DRM_DEBUG("Attempting to obtain a purgeable object\n"); 2841 return -EFAULT; 2842 } 2843 2844 err = obj->ops->get_pages(obj); 2845 GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages)); 2846 2847 return err; 2848 } 2849 2850 /* Ensure that the associated pages are gathered from the backing storage 2851 * and pinned into our object. i915_gem_object_pin_pages() may be called 2852 * multiple times before they are released by a single call to 2853 * i915_gem_object_unpin_pages() - once the pages are no longer referenced 2854 * either as a result of memory pressure (reaping pages under the shrinker) 2855 * or as the object is itself released. 2856 */ 2857 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2858 { 2859 int err; 2860 2861 err = mutex_lock_interruptible(&obj->mm.lock); 2862 if (err) 2863 return err; 2864 2865 if (unlikely(!i915_gem_object_has_pages(obj))) { 2866 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 2867 2868 err = ____i915_gem_object_get_pages(obj); 2869 if (err) 2870 goto unlock; 2871 2872 smp_mb__before_atomic(); 2873 } 2874 atomic_inc(&obj->mm.pages_pin_count); 2875 2876 unlock: 2877 mutex_unlock(&obj->mm.lock); 2878 return err; 2879 } 2880 2881 /* The 'mapping' part of i915_gem_object_pin_map() below */ 2882 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, 2883 enum i915_map_type type) 2884 { 2885 unsigned long n_pages = obj->base.size >> PAGE_SHIFT; 2886 struct sg_table *sgt = obj->mm.pages; 2887 struct sgt_iter sgt_iter; 2888 struct page *page; 2889 struct page *stack_pages[32]; 2890 struct page **pages = stack_pages; 2891 unsigned long i = 0; 2892 pgprot_t pgprot; 2893 void *addr; 2894 2895 /* A single page can always be kmapped */ 2896 if (n_pages == 1 && type == I915_MAP_WB) 2897 return kmap(sg_page(sgt->sgl)); 2898 2899 if (n_pages > ARRAY_SIZE(stack_pages)) { 2900 /* Too big for stack -- allocate temporary array instead */ 2901 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); 2902 if (!pages) 2903 return NULL; 2904 } 2905 2906 for_each_sgt_page(page, sgt_iter, sgt) 2907 pages[i++] = page; 2908 2909 /* Check that we have the expected number of pages */ 2910 GEM_BUG_ON(i != n_pages); 2911 2912 switch (type) { 2913 default: 2914 MISSING_CASE(type); 2915 /* fallthrough to use PAGE_KERNEL anyway */ 2916 case I915_MAP_WB: 2917 pgprot = PAGE_KERNEL; 2918 break; 2919 case I915_MAP_WC: 2920 pgprot = pgprot_writecombine(PAGE_KERNEL_IO); 2921 break; 2922 } 2923 addr = vmap(pages, n_pages, 0, pgprot); 2924 2925 if (pages != stack_pages) 2926 kvfree(pages); 2927 2928 return addr; 2929 } 2930 2931 /* get, pin, and map the pages of the object into kernel space */ 2932 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, 2933 enum i915_map_type type) 2934 { 2935 enum i915_map_type has_type; 2936 bool pinned; 2937 void *ptr; 2938 int ret; 2939 2940 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 2941 2942 ret = mutex_lock_interruptible(&obj->mm.lock); 2943 if (ret) 2944 return ERR_PTR(ret); 2945 2946 pinned = !(type & I915_MAP_OVERRIDE); 2947 type &= ~I915_MAP_OVERRIDE; 2948 2949 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { 2950 if (unlikely(!i915_gem_object_has_pages(obj))) { 2951 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); 2952 2953 ret = ____i915_gem_object_get_pages(obj); 2954 if (ret) 2955 goto err_unlock; 2956 2957 smp_mb__before_atomic(); 2958 } 2959 atomic_inc(&obj->mm.pages_pin_count); 2960 pinned = false; 2961 } 2962 GEM_BUG_ON(!i915_gem_object_has_pages(obj)); 2963 2964 ptr = page_unpack_bits(obj->mm.mapping, &has_type); 2965 if (ptr && has_type != type) { 2966 if (pinned) { 2967 ret = -EBUSY; 2968 goto err_unpin; 2969 } 2970 2971 if (is_vmalloc_addr(ptr)) 2972 vunmap(ptr); 2973 else 2974 kunmap(kmap_to_page(ptr)); 2975 2976 ptr = obj->mm.mapping = NULL; 2977 } 2978 2979 if (!ptr) { 2980 ptr = i915_gem_object_map(obj, type); 2981 if (!ptr) { 2982 ret = -ENOMEM; 2983 goto err_unpin; 2984 } 2985 2986 obj->mm.mapping = page_pack_bits(ptr, type); 2987 } 2988 2989 out_unlock: 2990 mutex_unlock(&obj->mm.lock); 2991 return ptr; 2992 2993 err_unpin: 2994 atomic_dec(&obj->mm.pages_pin_count); 2995 err_unlock: 2996 ptr = ERR_PTR(ret); 2997 goto out_unlock; 2998 } 2999 3000 static int 3001 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, 3002 const struct drm_i915_gem_pwrite *arg) 3003 { 3004 #ifndef __DragonFly__ 3005 struct address_space *mapping = obj->base.filp->f_mapping; 3006 #endif 3007 char __user *user_data = u64_to_user_ptr(arg->data_ptr); 3008 u64 remain, offset; 3009 unsigned int pg; 3010 3011 /* Before we instantiate/pin the backing store for our use, we 3012 * can prepopulate the shmemfs filp efficiently using a write into 3013 * the pagecache. We avoid the penalty of instantiating all the 3014 * pages, important if the user is just writing to a few and never 3015 * uses the object on the GPU, and using a direct write into shmemfs 3016 * allows it to avoid the cost of retrieving a page (either swapin 3017 * or clearing-before-use) before it is overwritten. 3018 */ 3019 if (i915_gem_object_has_pages(obj)) 3020 return -ENODEV; 3021 3022 if (obj->mm.madv != I915_MADV_WILLNEED) 3023 return -EFAULT; 3024 3025 /* Before the pages are instantiated the object is treated as being 3026 * in the CPU domain. The pages will be clflushed as required before 3027 * use, and we can freely write into the pages directly. If userspace 3028 * races pwrite with any other operation; corruption will ensue - 3029 * that is userspace's prerogative! 3030 */ 3031 3032 remain = arg->size; 3033 offset = arg->offset; 3034 pg = offset_in_page(offset); 3035 3036 do { 3037 unsigned int len, unwritten; 3038 struct page *page; 3039 void *data, *vaddr; 3040 int err; 3041 3042 len = PAGE_SIZE - pg; 3043 if (len > remain) 3044 len = remain; 3045 3046 #ifndef __DragonFly__ 3047 err = pagecache_write_begin(obj->base.filp, mapping, 3048 #else 3049 err = pagecache_write_begin(obj->base.filp, NULL, 3050 #endif 3051 offset, len, 0, 3052 &page, &data); 3053 if (err < 0) 3054 return err; 3055 3056 vaddr = kmap(page); 3057 unwritten = copy_from_user(vaddr + pg, user_data, len); 3058 kunmap(page); 3059 3060 #ifndef __DragonFly__ 3061 err = pagecache_write_end(obj->base.filp, mapping, 3062 #else 3063 err = pagecache_write_end(obj->base.filp, NULL, 3064 #endif 3065 offset, len, len - unwritten, 3066 page, data); 3067 if (err < 0) 3068 return err; 3069 3070 if (unwritten) 3071 return -EFAULT; 3072 3073 remain -= len; 3074 user_data += len; 3075 offset += len; 3076 pg = 0; 3077 } while (remain); 3078 3079 return 0; 3080 } 3081 3082 static bool ban_context(const struct i915_gem_context *ctx, 3083 unsigned int score) 3084 { 3085 return (i915_gem_context_is_bannable(ctx) && 3086 score >= CONTEXT_SCORE_BAN_THRESHOLD); 3087 } 3088 3089 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 3090 { 3091 unsigned int score; 3092 bool banned; 3093 3094 atomic_inc(&ctx->guilty_count); 3095 3096 score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); 3097 banned = ban_context(ctx, score); 3098 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", 3099 ctx->name, score, yesno(banned)); 3100 if (!banned) 3101 return; 3102 3103 i915_gem_context_set_banned(ctx); 3104 if (!IS_ERR_OR_NULL(ctx->file_priv)) { 3105 atomic_inc(&ctx->file_priv->context_bans); 3106 DRM_DEBUG_DRIVER("client %s has had %d context banned\n", 3107 ctx->name, atomic_read(&ctx->file_priv->context_bans)); 3108 } 3109 } 3110 3111 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) 3112 { 3113 atomic_inc(&ctx->active_count); 3114 } 3115 3116 struct drm_i915_gem_request * 3117 i915_gem_find_active_request(struct intel_engine_cs *engine) 3118 { 3119 struct drm_i915_gem_request *request, *active = NULL; 3120 unsigned long flags; 3121 3122 /* We are called by the error capture and reset at a random 3123 * point in time. In particular, note that neither is crucially 3124 * ordered with an interrupt. After a hang, the GPU is dead and we 3125 * assume that no more writes can happen (we waited long enough for 3126 * all writes that were in transaction to be flushed) - adding an 3127 * extra delay for a recent interrupt is pointless. Hence, we do 3128 * not need an engine->irq_seqno_barrier() before the seqno reads. 3129 */ 3130 spin_lock_irqsave(&engine->timeline->lock, flags); 3131 list_for_each_entry(request, &engine->timeline->requests, link) { 3132 if (__i915_gem_request_completed(request, 3133 request->global_seqno)) 3134 continue; 3135 3136 GEM_BUG_ON(request->engine != engine); 3137 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 3138 &request->fence.flags)); 3139 3140 active = request; 3141 break; 3142 } 3143 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3144 3145 return active; 3146 } 3147 3148 static bool engine_stalled(struct intel_engine_cs *engine) 3149 { 3150 if (!engine->hangcheck.stalled) 3151 return false; 3152 3153 /* Check for possible seqno movement after hang declaration */ 3154 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) { 3155 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name); 3156 return false; 3157 } 3158 3159 return true; 3160 } 3161 3162 /* 3163 * Ensure irq handler finishes, and not run again. 3164 * Also return the active request so that we only search for it once. 3165 */ 3166 struct drm_i915_gem_request * 3167 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) 3168 { 3169 struct drm_i915_gem_request *request = NULL; 3170 3171 /* 3172 * During the reset sequence, we must prevent the engine from 3173 * entering RC6. As the context state is undefined until we restart 3174 * the engine, if it does enter RC6 during the reset, the state 3175 * written to the powercontext is undefined and so we may lose 3176 * GPU state upon resume, i.e. fail to restart after a reset. 3177 */ 3178 intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); 3179 3180 /* 3181 * Prevent the signaler thread from updating the request 3182 * state (by calling dma_fence_signal) as we are processing 3183 * the reset. The write from the GPU of the seqno is 3184 * asynchronous and the signaler thread may see a different 3185 * value to us and declare the request complete, even though 3186 * the reset routine have picked that request as the active 3187 * (incomplete) request. This conflict is not handled 3188 * gracefully! 3189 */ 3190 kthread_park(engine->breadcrumbs.signaler); 3191 3192 /* 3193 * Prevent request submission to the hardware until we have 3194 * completed the reset in i915_gem_reset_finish(). If a request 3195 * is completed by one engine, it may then queue a request 3196 * to a second via its engine->irq_tasklet *just* as we are 3197 * calling engine->init_hw() and also writing the ELSP. 3198 * Turning off the engine->irq_tasklet until the reset is over 3199 * prevents the race. 3200 */ 3201 tasklet_kill(&engine->execlists.irq_tasklet); 3202 tasklet_disable(&engine->execlists.irq_tasklet); 3203 3204 if (engine->irq_seqno_barrier) 3205 engine->irq_seqno_barrier(engine); 3206 3207 request = i915_gem_find_active_request(engine); 3208 if (request && request->fence.error == -EIO) 3209 request = ERR_PTR(-EIO); /* Previous reset failed! */ 3210 3211 return request; 3212 } 3213 3214 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) 3215 { 3216 struct intel_engine_cs *engine; 3217 struct drm_i915_gem_request *request; 3218 enum intel_engine_id id; 3219 int err = 0; 3220 3221 for_each_engine(engine, dev_priv, id) { 3222 request = i915_gem_reset_prepare_engine(engine); 3223 if (IS_ERR(request)) { 3224 err = PTR_ERR(request); 3225 continue; 3226 } 3227 3228 engine->hangcheck.active_request = request; 3229 } 3230 3231 i915_gem_revoke_fences(dev_priv); 3232 3233 return err; 3234 } 3235 3236 static void skip_request(struct drm_i915_gem_request *request) 3237 { 3238 void *vaddr = request->ring->vaddr; 3239 u32 head; 3240 3241 /* As this request likely depends on state from the lost 3242 * context, clear out all the user operations leaving the 3243 * breadcrumb at the end (so we get the fence notifications). 3244 */ 3245 head = request->head; 3246 if (request->postfix < head) { 3247 memset(vaddr + head, 0, request->ring->size - head); 3248 head = 0; 3249 } 3250 memset(vaddr + head, 0, request->postfix - head); 3251 3252 dma_fence_set_error(&request->fence, -EIO); 3253 } 3254 3255 static void engine_skip_context(struct drm_i915_gem_request *request) 3256 { 3257 struct intel_engine_cs *engine = request->engine; 3258 struct i915_gem_context *hung_ctx = request->ctx; 3259 struct intel_timeline *timeline; 3260 unsigned long flags; 3261 3262 timeline = i915_gem_context_lookup_timeline(hung_ctx, engine); 3263 3264 spin_lock_irqsave(&engine->timeline->lock, flags); 3265 lockmgr(&timeline->lock, LK_EXCLUSIVE); 3266 3267 list_for_each_entry_continue(request, &engine->timeline->requests, link) 3268 if (request->ctx == hung_ctx) 3269 skip_request(request); 3270 3271 list_for_each_entry(request, &timeline->requests, link) 3272 skip_request(request); 3273 3274 lockmgr(&timeline->lock, LK_RELEASE); 3275 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3276 } 3277 3278 /* Returns the request if it was guilty of the hang */ 3279 static struct drm_i915_gem_request * 3280 i915_gem_reset_request(struct intel_engine_cs *engine, 3281 struct drm_i915_gem_request *request) 3282 { 3283 /* The guilty request will get skipped on a hung engine. 3284 * 3285 * Users of client default contexts do not rely on logical 3286 * state preserved between batches so it is safe to execute 3287 * queued requests following the hang. Non default contexts 3288 * rely on preserved state, so skipping a batch loses the 3289 * evolution of the state and it needs to be considered corrupted. 3290 * Executing more queued batches on top of corrupted state is 3291 * risky. But we take the risk by trying to advance through 3292 * the queued requests in order to make the client behaviour 3293 * more predictable around resets, by not throwing away random 3294 * amount of batches it has prepared for execution. Sophisticated 3295 * clients can use gem_reset_stats_ioctl and dma fence status 3296 * (exported via sync_file info ioctl on explicit fences) to observe 3297 * when it loses the context state and should rebuild accordingly. 3298 * 3299 * The context ban, and ultimately the client ban, mechanism are safety 3300 * valves if client submission ends up resulting in nothing more than 3301 * subsequent hangs. 3302 */ 3303 3304 if (engine_stalled(engine)) { 3305 i915_gem_context_mark_guilty(request->ctx); 3306 skip_request(request); 3307 3308 /* If this context is now banned, skip all pending requests. */ 3309 if (i915_gem_context_is_banned(request->ctx)) 3310 engine_skip_context(request); 3311 } else { 3312 /* 3313 * Since this is not the hung engine, it may have advanced 3314 * since the hang declaration. Double check by refinding 3315 * the active request at the time of the reset. 3316 */ 3317 request = i915_gem_find_active_request(engine); 3318 if (request) { 3319 i915_gem_context_mark_innocent(request->ctx); 3320 dma_fence_set_error(&request->fence, -EAGAIN); 3321 3322 /* Rewind the engine to replay the incomplete rq */ 3323 spin_lock_irq(&engine->timeline->lock); 3324 request = list_prev_entry(request, link); 3325 if (&request->link == &engine->timeline->requests) 3326 request = NULL; 3327 spin_unlock_irq(&engine->timeline->lock); 3328 } 3329 } 3330 3331 return request; 3332 } 3333 3334 void i915_gem_reset_engine(struct intel_engine_cs *engine, 3335 struct drm_i915_gem_request *request) 3336 { 3337 engine->irq_posted = 0; 3338 3339 if (request) 3340 request = i915_gem_reset_request(engine, request); 3341 3342 if (request) { 3343 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", 3344 engine->name, request->global_seqno); 3345 } 3346 3347 /* Setup the CS to resume from the breadcrumb of the hung request */ 3348 engine->reset_hw(engine, request); 3349 } 3350 3351 void i915_gem_reset(struct drm_i915_private *dev_priv) 3352 { 3353 struct intel_engine_cs *engine; 3354 enum intel_engine_id id; 3355 3356 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3357 3358 i915_gem_retire_requests(dev_priv); 3359 3360 for_each_engine(engine, dev_priv, id) { 3361 struct i915_gem_context *ctx; 3362 3363 i915_gem_reset_engine(engine, engine->hangcheck.active_request); 3364 ctx = fetch_and_zero(&engine->last_retired_context); 3365 if (ctx) 3366 engine->context_unpin(engine, ctx); 3367 } 3368 3369 i915_gem_restore_fences(dev_priv); 3370 3371 if (dev_priv->gt.awake) { 3372 intel_sanitize_gt_powersave(dev_priv); 3373 intel_enable_gt_powersave(dev_priv); 3374 if (INTEL_GEN(dev_priv) >= 6) 3375 gen6_rps_busy(dev_priv); 3376 } 3377 } 3378 3379 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) 3380 { 3381 tasklet_enable(&engine->execlists.irq_tasklet); 3382 kthread_unpark(engine->breadcrumbs.signaler); 3383 3384 intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); 3385 } 3386 3387 void i915_gem_reset_finish(struct drm_i915_private *dev_priv) 3388 { 3389 struct intel_engine_cs *engine; 3390 enum intel_engine_id id; 3391 3392 lockdep_assert_held(&dev_priv->drm.struct_mutex); 3393 3394 for_each_engine(engine, dev_priv, id) { 3395 engine->hangcheck.active_request = NULL; 3396 i915_gem_reset_finish_engine(engine); 3397 } 3398 } 3399 3400 static void nop_submit_request(struct drm_i915_gem_request *request) 3401 { 3402 dma_fence_set_error(&request->fence, -EIO); 3403 3404 i915_gem_request_submit(request); 3405 } 3406 3407 static void nop_complete_submit_request(struct drm_i915_gem_request *request) 3408 { 3409 unsigned long flags; 3410 3411 dma_fence_set_error(&request->fence, -EIO); 3412 3413 spin_lock_irqsave(&request->engine->timeline->lock, flags); 3414 __i915_gem_request_submit(request); 3415 intel_engine_init_global_seqno(request->engine, request->global_seqno); 3416 spin_unlock_irqrestore(&request->engine->timeline->lock, flags); 3417 } 3418 3419 void i915_gem_set_wedged(struct drm_i915_private *i915) 3420 { 3421 struct intel_engine_cs *engine; 3422 enum intel_engine_id id; 3423 3424 /* 3425 * First, stop submission to hw, but do not yet complete requests by 3426 * rolling the global seqno forward (since this would complete requests 3427 * for which we haven't set the fence error to EIO yet). 3428 */ 3429 for_each_engine(engine, i915, id) 3430 engine->submit_request = nop_submit_request; 3431 3432 /* 3433 * Make sure no one is running the old callback before we proceed with 3434 * cancelling requests and resetting the completion tracking. Otherwise 3435 * we might submit a request to the hardware which never completes. 3436 */ 3437 synchronize_rcu(); 3438 3439 for_each_engine(engine, i915, id) { 3440 /* Mark all executing requests as skipped */ 3441 engine->cancel_requests(engine); 3442 3443 /* 3444 * Only once we've force-cancelled all in-flight requests can we 3445 * start to complete all requests. 3446 */ 3447 engine->submit_request = nop_complete_submit_request; 3448 } 3449 3450 /* 3451 * Make sure no request can slip through without getting completed by 3452 * either this call here to intel_engine_init_global_seqno, or the one 3453 * in nop_complete_submit_request. 3454 */ 3455 synchronize_rcu(); 3456 3457 for_each_engine(engine, i915, id) { 3458 unsigned long flags; 3459 3460 /* Mark all pending requests as complete so that any concurrent 3461 * (lockless) lookup doesn't try and wait upon the request as we 3462 * reset it. 3463 */ 3464 spin_lock_irqsave(&engine->timeline->lock, flags); 3465 intel_engine_init_global_seqno(engine, 3466 intel_engine_last_submit(engine)); 3467 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3468 } 3469 3470 set_bit(I915_WEDGED, &i915->gpu_error.flags); 3471 wake_up_all(&i915->gpu_error.reset_queue); 3472 } 3473 3474 bool i915_gem_unset_wedged(struct drm_i915_private *i915) 3475 { 3476 struct i915_gem_timeline *tl; 3477 int i; 3478 3479 lockdep_assert_held(&i915->drm.struct_mutex); 3480 if (!test_bit(I915_WEDGED, &i915->gpu_error.flags)) 3481 return true; 3482 3483 /* Before unwedging, make sure that all pending operations 3484 * are flushed and errored out - we may have requests waiting upon 3485 * third party fences. We marked all inflight requests as EIO, and 3486 * every execbuf since returned EIO, for consistency we want all 3487 * the currently pending requests to also be marked as EIO, which 3488 * is done inside our nop_submit_request - and so we must wait. 3489 * 3490 * No more can be submitted until we reset the wedged bit. 3491 */ 3492 list_for_each_entry(tl, &i915->gt.timelines, link) { 3493 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3494 struct drm_i915_gem_request *rq; 3495 3496 rq = i915_gem_active_peek(&tl->engine[i].last_request, 3497 &i915->drm.struct_mutex); 3498 if (!rq) 3499 continue; 3500 3501 /* We can't use our normal waiter as we want to 3502 * avoid recursively trying to handle the current 3503 * reset. The basic dma_fence_default_wait() installs 3504 * a callback for dma_fence_signal(), which is 3505 * triggered by our nop handler (indirectly, the 3506 * callback enables the signaler thread which is 3507 * woken by the nop_submit_request() advancing the seqno 3508 * and when the seqno passes the fence, the signaler 3509 * then signals the fence waking us up). 3510 */ 3511 if (dma_fence_default_wait(&rq->fence, true, 3512 MAX_SCHEDULE_TIMEOUT) < 0) 3513 return false; 3514 } 3515 } 3516 3517 /* Undo nop_submit_request. We prevent all new i915 requests from 3518 * being queued (by disallowing execbuf whilst wedged) so having 3519 * waited for all active requests above, we know the system is idle 3520 * and do not have to worry about a thread being inside 3521 * engine->submit_request() as we swap over. So unlike installing 3522 * the nop_submit_request on reset, we can do this from normal 3523 * context and do not require stop_machine(). 3524 */ 3525 intel_engines_reset_default_submission(i915); 3526 i915_gem_contexts_lost(i915); 3527 3528 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ 3529 clear_bit(I915_WEDGED, &i915->gpu_error.flags); 3530 3531 return true; 3532 } 3533 3534 static void 3535 i915_gem_retire_work_handler(struct work_struct *work) 3536 { 3537 struct drm_i915_private *dev_priv = 3538 container_of(work, typeof(*dev_priv), gt.retire_work.work); 3539 struct drm_device *dev = &dev_priv->drm; 3540 3541 /* Come back later if the device is busy... */ 3542 if (mutex_trylock(&dev->struct_mutex)) { 3543 i915_gem_retire_requests(dev_priv); 3544 mutex_unlock(&dev->struct_mutex); 3545 } 3546 3547 /* Keep the retire handler running until we are finally idle. 3548 * We do not need to do this test under locking as in the worst-case 3549 * we queue the retire worker once too often. 3550 */ 3551 if (READ_ONCE(dev_priv->gt.awake)) { 3552 i915_queue_hangcheck(dev_priv); 3553 queue_delayed_work(dev_priv->wq, 3554 &dev_priv->gt.retire_work, 3555 round_jiffies_up_relative(HZ)); 3556 } 3557 } 3558 3559 static void 3560 i915_gem_idle_work_handler(struct work_struct *work) 3561 { 3562 struct drm_i915_private *dev_priv = 3563 container_of(work, typeof(*dev_priv), gt.idle_work.work); 3564 struct drm_device *dev = &dev_priv->drm; 3565 bool rearm_hangcheck; 3566 3567 if (!READ_ONCE(dev_priv->gt.awake)) 3568 return; 3569 3570 /* 3571 * Wait for last execlists context complete, but bail out in case a 3572 * new request is submitted. 3573 */ 3574 wait_for(intel_engines_are_idle(dev_priv), 10); 3575 if (READ_ONCE(dev_priv->gt.active_requests)) 3576 return; 3577 3578 rearm_hangcheck = 3579 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 3580 3581 if (!mutex_trylock(&dev->struct_mutex)) { 3582 /* Currently busy, come back later */ 3583 mod_delayed_work(dev_priv->wq, 3584 &dev_priv->gt.idle_work, 3585 msecs_to_jiffies(50)); 3586 goto out_rearm; 3587 } 3588 3589 /* 3590 * New request retired after this work handler started, extend active 3591 * period until next instance of the work. 3592 */ 3593 if (work_pending(work)) 3594 goto out_unlock; 3595 3596 if (dev_priv->gt.active_requests) 3597 goto out_unlock; 3598 3599 if (wait_for(intel_engines_are_idle(dev_priv), 10)) 3600 DRM_ERROR("Timeout waiting for engines to idle\n"); 3601 3602 intel_engines_mark_idle(dev_priv); 3603 i915_gem_timelines_mark_idle(dev_priv); 3604 3605 GEM_BUG_ON(!dev_priv->gt.awake); 3606 dev_priv->gt.awake = false; 3607 rearm_hangcheck = false; 3608 3609 if (INTEL_GEN(dev_priv) >= 6) 3610 gen6_rps_idle(dev_priv); 3611 intel_runtime_pm_put(dev_priv); 3612 out_unlock: 3613 mutex_unlock(&dev->struct_mutex); 3614 3615 out_rearm: 3616 if (rearm_hangcheck) { 3617 GEM_BUG_ON(!dev_priv->gt.awake); 3618 i915_queue_hangcheck(dev_priv); 3619 } 3620 } 3621 3622 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) 3623 { 3624 struct drm_i915_private *i915 = to_i915(gem->dev); 3625 struct drm_i915_gem_object *obj = to_intel_bo(gem); 3626 struct drm_i915_file_private *fpriv = file->driver_priv; 3627 struct i915_lut_handle *lut, *ln; 3628 3629 mutex_lock(&i915->drm.struct_mutex); 3630 3631 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) { 3632 struct i915_gem_context *ctx = lut->ctx; 3633 struct i915_vma *vma; 3634 3635 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF)); 3636 if (ctx->file_priv != fpriv) 3637 continue; 3638 3639 vma = radix_tree_delete(&ctx->handles_vma, lut->handle); 3640 GEM_BUG_ON(vma->obj != obj); 3641 3642 /* We allow the process to have multiple handles to the same 3643 * vma, in the same fd namespace, by virtue of flink/open. 3644 */ 3645 GEM_BUG_ON(!vma->open_count); 3646 if (!--vma->open_count && !i915_vma_is_ggtt(vma)) 3647 i915_vma_close(vma); 3648 3649 list_del(&lut->obj_link); 3650 list_del(&lut->ctx_link); 3651 3652 kmem_cache_free(i915->luts, lut); 3653 __i915_gem_object_release_unless_active(obj); 3654 } 3655 3656 mutex_unlock(&i915->drm.struct_mutex); 3657 } 3658 3659 static unsigned long to_wait_timeout(s64 timeout_ns) 3660 { 3661 if (timeout_ns < 0) 3662 return MAX_SCHEDULE_TIMEOUT; 3663 3664 if (timeout_ns == 0) 3665 return 0; 3666 3667 return nsecs_to_jiffies_timeout(timeout_ns); 3668 } 3669 3670 /** 3671 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 3672 * @dev: drm device pointer 3673 * @data: ioctl data blob 3674 * @file: drm file pointer 3675 * 3676 * Returns 0 if successful, else an error is returned with the remaining time in 3677 * the timeout parameter. 3678 * -ETIME: object is still busy after timeout 3679 * -ERESTARTSYS: signal interrupted the wait 3680 * -ENONENT: object doesn't exist 3681 * Also possible, but rare: 3682 * -EAGAIN: incomplete, restart syscall 3683 * -ENOMEM: damn 3684 * -ENODEV: Internal IRQ fail 3685 * -E?: The add request failed 3686 * 3687 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 3688 * non-zero timeout parameter the wait ioctl will wait for the given number of 3689 * nanoseconds on an object becoming unbusy. Since the wait itself does so 3690 * without holding struct_mutex the object may become re-busied before this 3691 * function completes. A similar but shorter * race condition exists in the busy 3692 * ioctl 3693 */ 3694 int 3695 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 3696 { 3697 struct drm_i915_gem_wait *args = data; 3698 struct drm_i915_gem_object *obj; 3699 ktime_t start; 3700 long ret; 3701 3702 if (args->flags != 0) 3703 return -EINVAL; 3704 3705 obj = i915_gem_object_lookup(file, args->bo_handle); 3706 if (!obj) 3707 return -ENOENT; 3708 3709 start = ktime_get(); 3710 3711 ret = i915_gem_object_wait(obj, 3712 I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, 3713 to_wait_timeout(args->timeout_ns), 3714 to_rps_client(file)); 3715 3716 if (args->timeout_ns > 0) { 3717 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 3718 if (args->timeout_ns < 0) 3719 args->timeout_ns = 0; 3720 3721 /* 3722 * Apparently ktime isn't accurate enough and occasionally has a 3723 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 3724 * things up to make the test happy. We allow up to 1 jiffy. 3725 * 3726 * This is a regression from the timespec->ktime conversion. 3727 */ 3728 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 3729 args->timeout_ns = 0; 3730 3731 /* 3732 * Apparently ktime isn't accurate enough and occasionally has a 3733 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 3734 * things up to make the test happy. We allow up to 1 jiffy. 3735 * 3736 * This is a regression from the timespec->ktime conversion. 3737 */ 3738 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 3739 args->timeout_ns = 0; 3740 3741 /* Asked to wait beyond the jiffie/scheduler precision? */ 3742 if (ret == -ETIME && args->timeout_ns) 3743 ret = -EAGAIN; 3744 } 3745 3746 i915_gem_object_put(obj); 3747 return ret; 3748 } 3749 3750 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) 3751 { 3752 int ret, i; 3753 3754 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) { 3755 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags); 3756 if (ret) 3757 return ret; 3758 } 3759 3760 return 0; 3761 } 3762 3763 static int wait_for_engines(struct drm_i915_private *i915) 3764 { 3765 if (wait_for(intel_engines_are_idle(i915), 50)) { 3766 DRM_ERROR("Failed to idle engines, declaring wedged!\n"); 3767 i915_gem_set_wedged(i915); 3768 return -EIO; 3769 } 3770 3771 return 0; 3772 } 3773 3774 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags) 3775 { 3776 int ret; 3777 3778 /* If the device is asleep, we have no requests outstanding */ 3779 if (!READ_ONCE(i915->gt.awake)) 3780 return 0; 3781 3782 if (flags & I915_WAIT_LOCKED) { 3783 struct i915_gem_timeline *tl; 3784 3785 lockdep_assert_held(&i915->drm.struct_mutex); 3786 3787 list_for_each_entry(tl, &i915->gt.timelines, link) { 3788 ret = wait_for_timeline(tl, flags); 3789 if (ret) 3790 return ret; 3791 } 3792 3793 i915_gem_retire_requests(i915); 3794 GEM_BUG_ON(i915->gt.active_requests); 3795 3796 ret = wait_for_engines(i915); 3797 } else { 3798 ret = wait_for_timeline(&i915->gt.global_timeline, flags); 3799 } 3800 3801 return ret; 3802 } 3803 3804 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) 3805 { 3806 /* 3807 * We manually flush the CPU domain so that we can override and 3808 * force the flush for the display, and perform it asyncrhonously. 3809 */ 3810 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 3811 if (obj->cache_dirty) 3812 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); 3813 obj->base.write_domain = 0; 3814 } 3815 3816 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) 3817 { 3818 if (!READ_ONCE(obj->pin_global)) 3819 return; 3820 3821 mutex_lock(&obj->base.dev->struct_mutex); 3822 __i915_gem_object_flush_for_display(obj); 3823 mutex_unlock(&obj->base.dev->struct_mutex); 3824 } 3825 3826 /** 3827 * Moves a single object to the WC read, and possibly write domain. 3828 * @obj: object to act on 3829 * @write: ask for write access or read only 3830 * 3831 * This function returns when the move is complete, including waiting on 3832 * flushes to occur. 3833 */ 3834 int 3835 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) 3836 { 3837 int ret; 3838 3839 lockdep_assert_held(&obj->base.dev->struct_mutex); 3840 3841 ret = i915_gem_object_wait(obj, 3842 I915_WAIT_INTERRUPTIBLE | 3843 I915_WAIT_LOCKED | 3844 (write ? I915_WAIT_ALL : 0), 3845 MAX_SCHEDULE_TIMEOUT, 3846 NULL); 3847 if (ret) 3848 return ret; 3849 3850 if (obj->base.write_domain == I915_GEM_DOMAIN_WC) 3851 return 0; 3852 3853 /* Flush and acquire obj->pages so that we are coherent through 3854 * direct access in memory with previous cached writes through 3855 * shmemfs and that our cache domain tracking remains valid. 3856 * For example, if the obj->filp was moved to swap without us 3857 * being notified and releasing the pages, we would mistakenly 3858 * continue to assume that the obj remained out of the CPU cached 3859 * domain. 3860 */ 3861 ret = i915_gem_object_pin_pages(obj); 3862 if (ret) 3863 return ret; 3864 3865 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC); 3866 3867 /* Serialise direct access to this object with the barriers for 3868 * coherent writes from the GPU, by effectively invalidating the 3869 * WC domain upon first access. 3870 */ 3871 if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0) 3872 mb(); 3873 3874 /* It should now be out of any other write domains, and we can update 3875 * the domain values for our changes. 3876 */ 3877 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0); 3878 obj->base.read_domains |= I915_GEM_DOMAIN_WC; 3879 if (write) { 3880 obj->base.read_domains = I915_GEM_DOMAIN_WC; 3881 obj->base.write_domain = I915_GEM_DOMAIN_WC; 3882 obj->mm.dirty = true; 3883 } 3884 3885 i915_gem_object_unpin_pages(obj); 3886 return 0; 3887 } 3888 3889 /** 3890 * Moves a single object to the GTT read, and possibly write domain. 3891 * @obj: object to act on 3892 * @write: ask for write access or read only 3893 * 3894 * This function returns when the move is complete, including waiting on 3895 * flushes to occur. 3896 */ 3897 int 3898 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3899 { 3900 int ret; 3901 3902 lockdep_assert_held(&obj->base.dev->struct_mutex); 3903 3904 ret = i915_gem_object_wait(obj, 3905 I915_WAIT_INTERRUPTIBLE | 3906 I915_WAIT_LOCKED | 3907 (write ? I915_WAIT_ALL : 0), 3908 MAX_SCHEDULE_TIMEOUT, 3909 NULL); 3910 if (ret) 3911 return ret; 3912 3913 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3914 return 0; 3915 3916 /* Flush and acquire obj->pages so that we are coherent through 3917 * direct access in memory with previous cached writes through 3918 * shmemfs and that our cache domain tracking remains valid. 3919 * For example, if the obj->filp was moved to swap without us 3920 * being notified and releasing the pages, we would mistakenly 3921 * continue to assume that the obj remained out of the CPU cached 3922 * domain. 3923 */ 3924 ret = i915_gem_object_pin_pages(obj); 3925 if (ret) 3926 return ret; 3927 3928 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT); 3929 3930 /* Serialise direct access to this object with the barriers for 3931 * coherent writes from the GPU, by effectively invalidating the 3932 * GTT domain upon first access. 3933 */ 3934 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3935 mb(); 3936 3937 /* It should now be out of any other write domains, and we can update 3938 * the domain values for our changes. 3939 */ 3940 GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3941 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3942 if (write) { 3943 obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3944 obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3945 obj->mm.dirty = true; 3946 } 3947 3948 i915_gem_object_unpin_pages(obj); 3949 return 0; 3950 } 3951 3952 /** 3953 * Changes the cache-level of an object across all VMA. 3954 * @obj: object to act on 3955 * @cache_level: new cache level to set for the object 3956 * 3957 * After this function returns, the object will be in the new cache-level 3958 * across all GTT and the contents of the backing storage will be coherent, 3959 * with respect to the new cache-level. In order to keep the backing storage 3960 * coherent for all users, we only allow a single cache level to be set 3961 * globally on the object and prevent it from being changed whilst the 3962 * hardware is reading from the object. That is if the object is currently 3963 * on the scanout it will be set to uncached (or equivalent display 3964 * cache coherency) and all non-MOCS GPU access will also be uncached so 3965 * that all direct access to the scanout remains coherent. 3966 */ 3967 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 3968 enum i915_cache_level cache_level) 3969 { 3970 struct i915_vma *vma; 3971 int ret; 3972 3973 lockdep_assert_held(&obj->base.dev->struct_mutex); 3974 3975 if (obj->cache_level == cache_level) 3976 return 0; 3977 3978 /* Inspect the list of currently bound VMA and unbind any that would 3979 * be invalid given the new cache-level. This is principally to 3980 * catch the issue of the CS prefetch crossing page boundaries and 3981 * reading an invalid PTE on older architectures. 3982 */ 3983 restart: 3984 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3985 if (!drm_mm_node_allocated(&vma->node)) 3986 continue; 3987 3988 if (i915_vma_is_pinned(vma)) { 3989 DRM_DEBUG("can not change the cache level of pinned objects\n"); 3990 return -EBUSY; 3991 } 3992 3993 if (i915_gem_valid_gtt_space(vma, cache_level)) 3994 continue; 3995 3996 ret = i915_vma_unbind(vma); 3997 if (ret) 3998 return ret; 3999 4000 /* As unbinding may affect other elements in the 4001 * obj->vma_list (due to side-effects from retiring 4002 * an active vma), play safe and restart the iterator. 4003 */ 4004 goto restart; 4005 } 4006 4007 /* We can reuse the existing drm_mm nodes but need to change the 4008 * cache-level on the PTE. We could simply unbind them all and 4009 * rebind with the correct cache-level on next use. However since 4010 * we already have a valid slot, dma mapping, pages etc, we may as 4011 * rewrite the PTE in the belief that doing so tramples upon less 4012 * state and so involves less work. 4013 */ 4014 if (obj->bind_count) { 4015 /* Before we change the PTE, the GPU must not be accessing it. 4016 * If we wait upon the object, we know that all the bound 4017 * VMA are no longer active. 4018 */ 4019 ret = i915_gem_object_wait(obj, 4020 I915_WAIT_INTERRUPTIBLE | 4021 I915_WAIT_LOCKED | 4022 I915_WAIT_ALL, 4023 MAX_SCHEDULE_TIMEOUT, 4024 NULL); 4025 if (ret) 4026 return ret; 4027 4028 if (!HAS_LLC(to_i915(obj->base.dev)) && 4029 cache_level != I915_CACHE_NONE) { 4030 /* Access to snoopable pages through the GTT is 4031 * incoherent and on some machines causes a hard 4032 * lockup. Relinquish the CPU mmaping to force 4033 * userspace to refault in the pages and we can 4034 * then double check if the GTT mapping is still 4035 * valid for that pointer access. 4036 */ 4037 i915_gem_release_mmap(obj); 4038 4039 /* As we no longer need a fence for GTT access, 4040 * we can relinquish it now (and so prevent having 4041 * to steal a fence from someone else on the next 4042 * fence request). Note GPU activity would have 4043 * dropped the fence as all snoopable access is 4044 * supposed to be linear. 4045 */ 4046 list_for_each_entry(vma, &obj->vma_list, obj_link) { 4047 ret = i915_vma_put_fence(vma); 4048 if (ret) 4049 return ret; 4050 } 4051 } else { 4052 /* We either have incoherent backing store and 4053 * so no GTT access or the architecture is fully 4054 * coherent. In such cases, existing GTT mmaps 4055 * ignore the cache bit in the PTE and we can 4056 * rewrite it without confusing the GPU or having 4057 * to force userspace to fault back in its mmaps. 4058 */ 4059 } 4060 4061 list_for_each_entry(vma, &obj->vma_list, obj_link) { 4062 if (!drm_mm_node_allocated(&vma->node)) 4063 continue; 4064 4065 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); 4066 if (ret) 4067 return ret; 4068 } 4069 } 4070 4071 list_for_each_entry(vma, &obj->vma_list, obj_link) 4072 vma->node.color = cache_level; 4073 i915_gem_object_set_cache_coherency(obj, cache_level); 4074 obj->cache_dirty = true; /* Always invalidate stale cachelines */ 4075 4076 return 0; 4077 } 4078 4079 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 4080 struct drm_file *file) 4081 { 4082 struct drm_i915_gem_caching *args = data; 4083 struct drm_i915_gem_object *obj; 4084 int err = 0; 4085 4086 rcu_read_lock(); 4087 obj = i915_gem_object_lookup_rcu(file, args->handle); 4088 if (!obj) { 4089 err = -ENOENT; 4090 goto out; 4091 } 4092 4093 switch (obj->cache_level) { 4094 case I915_CACHE_LLC: 4095 case I915_CACHE_L3_LLC: 4096 args->caching = I915_CACHING_CACHED; 4097 break; 4098 4099 case I915_CACHE_WT: 4100 args->caching = I915_CACHING_DISPLAY; 4101 break; 4102 4103 default: 4104 args->caching = I915_CACHING_NONE; 4105 break; 4106 } 4107 out: 4108 rcu_read_unlock(); 4109 return err; 4110 } 4111 4112 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 4113 struct drm_file *file) 4114 { 4115 struct drm_i915_private *i915 = to_i915(dev); 4116 struct drm_i915_gem_caching *args = data; 4117 struct drm_i915_gem_object *obj; 4118 enum i915_cache_level level; 4119 int ret = 0; 4120 4121 switch (args->caching) { 4122 case I915_CACHING_NONE: 4123 level = I915_CACHE_NONE; 4124 break; 4125 case I915_CACHING_CACHED: 4126 /* 4127 * Due to a HW issue on BXT A stepping, GPU stores via a 4128 * snooped mapping may leave stale data in a corresponding CPU 4129 * cacheline, whereas normally such cachelines would get 4130 * invalidated. 4131 */ 4132 if (!HAS_LLC(i915) && !HAS_SNOOP(i915)) 4133 return -ENODEV; 4134 4135 level = I915_CACHE_LLC; 4136 break; 4137 case I915_CACHING_DISPLAY: 4138 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE; 4139 break; 4140 default: 4141 return -EINVAL; 4142 } 4143 4144 obj = i915_gem_object_lookup(file, args->handle); 4145 if (!obj) 4146 return -ENOENT; 4147 4148 if (obj->cache_level == level) 4149 goto out; 4150 4151 ret = i915_gem_object_wait(obj, 4152 I915_WAIT_INTERRUPTIBLE, 4153 MAX_SCHEDULE_TIMEOUT, 4154 to_rps_client(file)); 4155 if (ret) 4156 goto out; 4157 4158 ret = i915_mutex_lock_interruptible(dev); 4159 if (ret) 4160 goto out; 4161 4162 ret = i915_gem_object_set_cache_level(obj, level); 4163 mutex_unlock(&dev->struct_mutex); 4164 4165 out: 4166 i915_gem_object_put(obj); 4167 return ret; 4168 } 4169 4170 /* 4171 * Prepare buffer for display plane (scanout, cursors, etc). 4172 * Can be called from an uninterruptible phase (modesetting) and allows 4173 * any flushes to be pipelined (for pageflips). 4174 */ 4175 struct i915_vma * 4176 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 4177 u32 alignment, 4178 const struct i915_ggtt_view *view) 4179 { 4180 struct i915_vma *vma; 4181 int ret; 4182 4183 lockdep_assert_held(&obj->base.dev->struct_mutex); 4184 4185 /* Mark the global pin early so that we account for the 4186 * display coherency whilst setting up the cache domains. 4187 */ 4188 obj->pin_global++; 4189 4190 /* The display engine is not coherent with the LLC cache on gen6. As 4191 * a result, we make sure that the pinning that is about to occur is 4192 * done with uncached PTEs. This is lowest common denominator for all 4193 * chipsets. 4194 * 4195 * However for gen6+, we could do better by using the GFDT bit instead 4196 * of uncaching, which would allow us to flush all the LLC-cached data 4197 * with that bit in the PTE to main memory with just one PIPE_CONTROL. 4198 */ 4199 ret = i915_gem_object_set_cache_level(obj, 4200 HAS_WT(to_i915(obj->base.dev)) ? 4201 I915_CACHE_WT : I915_CACHE_NONE); 4202 if (ret) { 4203 vma = ERR_PTR(ret); 4204 goto err_unpin_global; 4205 } 4206 4207 /* As the user may map the buffer once pinned in the display plane 4208 * (e.g. libkms for the bootup splash), we have to ensure that we 4209 * always use map_and_fenceable for all scanout buffers. However, 4210 * it may simply be too big to fit into mappable, in which case 4211 * put it anyway and hope that userspace can cope (but always first 4212 * try to preserve the existing ABI). 4213 */ 4214 vma = ERR_PTR(-ENOSPC); 4215 if (!view || view->type == I915_GGTT_VIEW_NORMAL) 4216 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 4217 PIN_MAPPABLE | PIN_NONBLOCK); 4218 if (IS_ERR(vma)) { 4219 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4220 unsigned int flags; 4221 4222 /* Valleyview is definitely limited to scanning out the first 4223 * 512MiB. Lets presume this behaviour was inherited from the 4224 * g4x display engine and that all earlier gen are similarly 4225 * limited. Testing suggests that it is a little more 4226 * complicated than this. For example, Cherryview appears quite 4227 * happy to scanout from anywhere within its global aperture. 4228 */ 4229 flags = 0; 4230 if (HAS_GMCH_DISPLAY(i915)) 4231 flags = PIN_MAPPABLE; 4232 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); 4233 } 4234 if (IS_ERR(vma)) 4235 goto err_unpin_global; 4236 4237 vma->display_alignment = max_t(u64, vma->display_alignment, alignment); 4238 4239 /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ 4240 __i915_gem_object_flush_for_display(obj); 4241 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 4242 4243 /* It should now be out of any other write domains, and we can update 4244 * the domain values for our changes. 4245 */ 4246 obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 4247 4248 return vma; 4249 4250 err_unpin_global: 4251 obj->pin_global--; 4252 return vma; 4253 } 4254 4255 void 4256 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) 4257 { 4258 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 4259 4260 if (WARN_ON(vma->obj->pin_global == 0)) 4261 return; 4262 4263 if (--vma->obj->pin_global == 0) 4264 vma->display_alignment = I915_GTT_MIN_ALIGNMENT; 4265 4266 /* Bump the LRU to try and avoid premature eviction whilst flipping */ 4267 i915_gem_object_bump_inactive_ggtt(vma->obj); 4268 4269 i915_vma_unpin(vma); 4270 } 4271 4272 /** 4273 * Moves a single object to the CPU read, and possibly write domain. 4274 * @obj: object to act on 4275 * @write: requesting write or read-only access 4276 * 4277 * This function returns when the move is complete, including waiting on 4278 * flushes to occur. 4279 */ 4280 int 4281 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) 4282 { 4283 int ret; 4284 4285 lockdep_assert_held(&obj->base.dev->struct_mutex); 4286 4287 ret = i915_gem_object_wait(obj, 4288 I915_WAIT_INTERRUPTIBLE | 4289 I915_WAIT_LOCKED | 4290 (write ? I915_WAIT_ALL : 0), 4291 MAX_SCHEDULE_TIMEOUT, 4292 NULL); 4293 if (ret) 4294 return ret; 4295 4296 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 4297 4298 /* Flush the CPU cache if it's still invalid. */ 4299 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 4300 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 4301 obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 4302 } 4303 4304 /* It should now be out of any other write domains, and we can update 4305 * the domain values for our changes. 4306 */ 4307 GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 4308 4309 /* If we're writing through the CPU, then the GPU read domains will 4310 * need to be invalidated at next use. 4311 */ 4312 if (write) 4313 __start_cpu_write(obj); 4314 4315 return 0; 4316 } 4317 4318 /* Throttle our rendering by waiting until the ring has completed our requests 4319 * emitted over 20 msec ago. 4320 * 4321 * Note that if we were to use the current jiffies each time around the loop, 4322 * we wouldn't escape the function with any frames outstanding if the time to 4323 * render a frame was over 20ms. 4324 * 4325 * This should get us reasonable parallelism between CPU and GPU but also 4326 * relatively low latency when blocking on a particular request to finish. 4327 */ 4328 static int 4329 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 4330 { 4331 struct drm_i915_private *dev_priv = to_i915(dev); 4332 struct drm_i915_file_private *file_priv = file->driver_priv; 4333 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 4334 struct drm_i915_gem_request *request, *target = NULL; 4335 long ret; 4336 4337 /* ABI: return -EIO if already wedged */ 4338 if (i915_terminally_wedged(&dev_priv->gpu_error)) 4339 return -EIO; 4340 4341 lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE); 4342 list_for_each_entry(request, &file_priv->mm.request_list, client_link) { 4343 if (time_after_eq(request->emitted_jiffies, recent_enough)) 4344 break; 4345 4346 if (target) { 4347 list_del(&target->client_link); 4348 target->file_priv = NULL; 4349 } 4350 4351 target = request; 4352 } 4353 if (target) 4354 i915_gem_request_get(target); 4355 lockmgr(&file_priv->mm.lock, LK_RELEASE); 4356 4357 if (target == NULL) 4358 return 0; 4359 4360 ret = i915_wait_request(target, 4361 I915_WAIT_INTERRUPTIBLE, 4362 MAX_SCHEDULE_TIMEOUT); 4363 i915_gem_request_put(target); 4364 4365 return ret < 0 ? ret : 0; 4366 } 4367 4368 struct i915_vma * 4369 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, 4370 const struct i915_ggtt_view *view, 4371 u64 size, 4372 u64 alignment, 4373 u64 flags) 4374 { 4375 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 4376 struct i915_address_space *vm = &dev_priv->ggtt.base; 4377 struct i915_vma *vma; 4378 int ret; 4379 4380 lockdep_assert_held(&obj->base.dev->struct_mutex); 4381 4382 if (!view && flags & PIN_MAPPABLE) { 4383 /* If the required space is larger than the available 4384 * aperture, we will not able to find a slot for the 4385 * object and unbinding the object now will be in 4386 * vain. Worse, doing so may cause us to ping-pong 4387 * the object in and out of the Global GTT and 4388 * waste a lot of cycles under the mutex. 4389 */ 4390 if (obj->base.size > dev_priv->ggtt.mappable_end) 4391 return ERR_PTR(-E2BIG); 4392 4393 /* If NONBLOCK is set the caller is optimistically 4394 * trying to cache the full object within the mappable 4395 * aperture, and *must* have a fallback in place for 4396 * situations where we cannot bind the object. We 4397 * can be a little more lax here and use the fallback 4398 * more often to avoid costly migrations of ourselves 4399 * and other objects within the aperture. 4400 * 4401 * Half-the-aperture is used as a simple heuristic. 4402 * More interesting would to do search for a free 4403 * block prior to making the commitment to unbind. 4404 * That caters for the self-harm case, and with a 4405 * little more heuristics (e.g. NOFAULT, NOEVICT) 4406 * we could try to minimise harm to others. 4407 */ 4408 if (flags & PIN_NONBLOCK && 4409 obj->base.size > dev_priv->ggtt.mappable_end / 2) 4410 return ERR_PTR(-ENOSPC); 4411 } 4412 4413 vma = i915_vma_instance(obj, vm, view); 4414 if (unlikely(IS_ERR(vma))) 4415 return vma; 4416 4417 if (i915_vma_misplaced(vma, size, alignment, flags)) { 4418 if (flags & PIN_NONBLOCK) { 4419 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) 4420 return ERR_PTR(-ENOSPC); 4421 4422 if (flags & PIN_MAPPABLE && 4423 vma->fence_size > dev_priv->ggtt.mappable_end / 2) 4424 return ERR_PTR(-ENOSPC); 4425 } 4426 4427 WARN(i915_vma_is_pinned(vma), 4428 "bo is already pinned in ggtt with incorrect alignment:" 4429 " offset=%08x, req.alignment=%llx," 4430 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", 4431 i915_ggtt_offset(vma), alignment, 4432 !!(flags & PIN_MAPPABLE), 4433 i915_vma_is_map_and_fenceable(vma)); 4434 ret = i915_vma_unbind(vma); 4435 if (ret) 4436 return ERR_PTR(ret); 4437 } 4438 4439 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); 4440 if (ret) 4441 return ERR_PTR(ret); 4442 4443 return vma; 4444 } 4445 4446 static __always_inline unsigned int __busy_read_flag(unsigned int id) 4447 { 4448 /* Note that we could alias engines in the execbuf API, but 4449 * that would be very unwise as it prevents userspace from 4450 * fine control over engine selection. Ahem. 4451 * 4452 * This should be something like EXEC_MAX_ENGINE instead of 4453 * I915_NUM_ENGINES. 4454 */ 4455 BUILD_BUG_ON(I915_NUM_ENGINES > 16); 4456 return 0x10000 << id; 4457 } 4458 4459 static __always_inline unsigned int __busy_write_id(unsigned int id) 4460 { 4461 /* The uABI guarantees an active writer is also amongst the read 4462 * engines. This would be true if we accessed the activity tracking 4463 * under the lock, but as we perform the lookup of the object and 4464 * its activity locklessly we can not guarantee that the last_write 4465 * being active implies that we have set the same engine flag from 4466 * last_read - hence we always set both read and write busy for 4467 * last_write. 4468 */ 4469 return id | __busy_read_flag(id); 4470 } 4471 4472 #pragma GCC diagnostic push 4473 #pragma GCC diagnostic ignored "-Wdiscarded-qualifiers" 4474 4475 static __always_inline unsigned int 4476 __busy_set_if_active(const struct dma_fence *fence, 4477 unsigned int (*flag)(unsigned int id)) 4478 { 4479 struct drm_i915_gem_request *rq; 4480 4481 /* We have to check the current hw status of the fence as the uABI 4482 * guarantees forward progress. We could rely on the idle worker 4483 * to eventually flush us, but to minimise latency just ask the 4484 * hardware. 4485 * 4486 * Note we only report on the status of native fences. 4487 */ 4488 if (!dma_fence_is_i915(fence)) 4489 return 0; 4490 4491 /* opencode to_request() in order to avoid const warnings */ 4492 rq = container_of(fence, struct drm_i915_gem_request, fence); 4493 if (i915_gem_request_completed(rq)) 4494 return 0; 4495 4496 return flag(rq->engine->uabi_id); 4497 } 4498 #pragma GCC diagnostic pop 4499 4500 static __always_inline unsigned int 4501 busy_check_reader(const struct dma_fence *fence) 4502 { 4503 return __busy_set_if_active(fence, __busy_read_flag); 4504 } 4505 4506 static __always_inline unsigned int 4507 busy_check_writer(const struct dma_fence *fence) 4508 { 4509 if (!fence) 4510 return 0; 4511 4512 return __busy_set_if_active(fence, __busy_write_id); 4513 } 4514 4515 int 4516 i915_gem_busy_ioctl(struct drm_device *dev, void *data, 4517 struct drm_file *file) 4518 { 4519 struct drm_i915_gem_busy *args = data; 4520 struct drm_i915_gem_object *obj; 4521 struct reservation_object_list *list; 4522 unsigned int seq; 4523 int err; 4524 4525 err = -ENOENT; 4526 rcu_read_lock(); 4527 obj = i915_gem_object_lookup_rcu(file, args->handle); 4528 if (!obj) 4529 goto out; 4530 4531 /* A discrepancy here is that we do not report the status of 4532 * non-i915 fences, i.e. even though we may report the object as idle, 4533 * a call to set-domain may still stall waiting for foreign rendering. 4534 * This also means that wait-ioctl may report an object as busy, 4535 * where busy-ioctl considers it idle. 4536 * 4537 * We trade the ability to warn of foreign fences to report on which 4538 * i915 engines are active for the object. 4539 * 4540 * Alternatively, we can trade that extra information on read/write 4541 * activity with 4542 * args->busy = 4543 * !reservation_object_test_signaled_rcu(obj->resv, true); 4544 * to report the overall busyness. This is what the wait-ioctl does. 4545 * 4546 */ 4547 retry: 4548 seq = raw_read_seqcount(&obj->resv->seq); 4549 4550 /* Translate the exclusive fence to the READ *and* WRITE engine */ 4551 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); 4552 4553 /* Translate shared fences to READ set of engines */ 4554 list = rcu_dereference(obj->resv->fence); 4555 if (list) { 4556 unsigned int shared_count = list->shared_count, i; 4557 4558 for (i = 0; i < shared_count; ++i) { 4559 struct dma_fence *fence = 4560 rcu_dereference(list->shared[i]); 4561 4562 args->busy |= busy_check_reader(fence); 4563 } 4564 } 4565 4566 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) 4567 goto retry; 4568 4569 err = 0; 4570 out: 4571 rcu_read_unlock(); 4572 return err; 4573 } 4574 4575 int 4576 i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 4577 struct drm_file *file_priv) 4578 { 4579 return i915_gem_ring_throttle(dev, file_priv); 4580 } 4581 4582 int 4583 i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4584 struct drm_file *file_priv) 4585 { 4586 struct drm_i915_private *dev_priv = to_i915(dev); 4587 struct drm_i915_gem_madvise *args = data; 4588 struct drm_i915_gem_object *obj; 4589 int err; 4590 4591 switch (args->madv) { 4592 case I915_MADV_DONTNEED: 4593 case I915_MADV_WILLNEED: 4594 break; 4595 default: 4596 return -EINVAL; 4597 } 4598 4599 obj = i915_gem_object_lookup(file_priv, args->handle); 4600 if (!obj) 4601 return -ENOENT; 4602 4603 err = mutex_lock_interruptible(&obj->mm.lock); 4604 if (err) 4605 goto out; 4606 4607 if (i915_gem_object_has_pages(obj) && 4608 i915_gem_object_is_tiled(obj) && 4609 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 4610 if (obj->mm.madv == I915_MADV_WILLNEED) { 4611 GEM_BUG_ON(!obj->mm.quirked); 4612 __i915_gem_object_unpin_pages(obj); 4613 obj->mm.quirked = false; 4614 } 4615 if (args->madv == I915_MADV_WILLNEED) { 4616 GEM_BUG_ON(obj->mm.quirked); 4617 __i915_gem_object_pin_pages(obj); 4618 obj->mm.quirked = true; 4619 } 4620 } 4621 4622 if (obj->mm.madv != __I915_MADV_PURGED) 4623 obj->mm.madv = args->madv; 4624 4625 /* if the object is no longer attached, discard its backing storage */ 4626 if (obj->mm.madv == I915_MADV_DONTNEED && 4627 !i915_gem_object_has_pages(obj)) 4628 i915_gem_object_truncate(obj); 4629 4630 args->retained = obj->mm.madv != __I915_MADV_PURGED; 4631 mutex_unlock(&obj->mm.lock); 4632 4633 out: 4634 i915_gem_object_put(obj); 4635 return err; 4636 } 4637 4638 static void 4639 frontbuffer_retire(struct i915_gem_active *active, 4640 struct drm_i915_gem_request *request) 4641 { 4642 struct drm_i915_gem_object *obj = 4643 container_of(active, typeof(*obj), frontbuffer_write); 4644 4645 intel_fb_obj_flush(obj, ORIGIN_CS); 4646 } 4647 4648 void i915_gem_object_init(struct drm_i915_gem_object *obj, 4649 const struct drm_i915_gem_object_ops *ops) 4650 { 4651 lockinit(&obj->mm.lock, "i9goml", 0, LK_CANRECURSE); 4652 4653 INIT_LIST_HEAD(&obj->vma_list); 4654 INIT_LIST_HEAD(&obj->lut_list); 4655 INIT_LIST_HEAD(&obj->batch_pool_link); 4656 4657 obj->ops = ops; 4658 4659 reservation_object_init(&obj->__builtin_resv); 4660 obj->resv = &obj->__builtin_resv; 4661 4662 obj->frontbuffer_ggtt_origin = ORIGIN_GTT; 4663 init_request_active(&obj->frontbuffer_write, frontbuffer_retire); 4664 4665 obj->mm.madv = I915_MADV_WILLNEED; 4666 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); 4667 lockinit(&obj->mm.get_page.lock, "i915ogpl", 0, LK_CANRECURSE); 4668 4669 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); 4670 } 4671 4672 static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4673 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | 4674 I915_GEM_OBJECT_IS_SHRINKABLE, 4675 4676 .get_pages = i915_gem_object_get_pages_gtt, 4677 .put_pages = i915_gem_object_put_pages_gtt, 4678 4679 .pwrite = i915_gem_object_pwrite_gtt, 4680 }; 4681 4682 static int i915_gem_object_create_shmem(struct drm_device *dev, 4683 struct drm_gem_object *obj, 4684 size_t size) 4685 { 4686 #ifndef __DragonFly__ 4687 struct drm_i915_private *i915 = to_i915(dev); 4688 unsigned long flags = VM_NORESERVE; 4689 struct file *filp; 4690 #endif 4691 4692 #ifndef __DragonFly__ 4693 drm_gem_private_object_init(dev, obj, size); 4694 #else 4695 drm_gem_object_init(dev, obj, size); 4696 #endif 4697 4698 #ifndef __DragonFly__ 4699 if (i915->mm.gemfs) 4700 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, 4701 flags); 4702 else 4703 filp = shmem_file_setup("i915", size, flags); 4704 4705 if (IS_ERR(filp)) 4706 return PTR_ERR(filp); 4707 4708 obj->filp = filp; 4709 #endif 4710 4711 return 0; 4712 } 4713 4714 struct drm_i915_gem_object * 4715 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) 4716 { 4717 struct drm_i915_gem_object *obj; 4718 #if 0 4719 struct address_space *mapping; 4720 #endif 4721 unsigned int cache_level; 4722 gfp_t mask; 4723 int ret; 4724 4725 /* There is a prevalence of the assumption that we fit the object's 4726 * page count inside a 32bit _signed_ variable. Let's document this and 4727 * catch if we ever need to fix it. In the meantime, if you do spot 4728 * such a local variable, please consider fixing! 4729 */ 4730 if (size >> PAGE_SHIFT > INT_MAX) 4731 return ERR_PTR(-E2BIG); 4732 4733 if (overflows_type(size, obj->base.size)) 4734 return ERR_PTR(-E2BIG); 4735 4736 obj = i915_gem_object_alloc(dev_priv); 4737 if (obj == NULL) 4738 return ERR_PTR(-ENOMEM); 4739 4740 ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size); 4741 if (ret) 4742 goto fail; 4743 4744 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4745 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) { 4746 /* 965gm cannot relocate objects above 4GiB. */ 4747 mask &= ~__GFP_HIGHMEM; 4748 mask |= __GFP_DMA32; 4749 } 4750 4751 #if 0 4752 mapping = obj->base.filp->f_mapping; 4753 mapping_set_gfp_mask(mapping, mask); 4754 #endif 4755 4756 i915_gem_object_init(obj, &i915_gem_object_ops); 4757 4758 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4759 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4760 4761 if (HAS_LLC(dev_priv)) 4762 /* On some devices, we can have the GPU use the LLC (the CPU 4763 * cache) for about a 10% performance improvement 4764 * compared to uncached. Graphics requests other than 4765 * display scanout are coherent with the CPU in 4766 * accessing this cache. This means in this mode we 4767 * don't need to clflush on the CPU side, and on the 4768 * GPU side we only need to flush internal caches to 4769 * get data visible to the CPU. 4770 * 4771 * However, we maintain the display planes as UC, and so 4772 * need to rebind when first used as such. 4773 */ 4774 cache_level = I915_CACHE_LLC; 4775 else 4776 cache_level = I915_CACHE_NONE; 4777 4778 i915_gem_object_set_cache_coherency(obj, cache_level); 4779 4780 trace_i915_gem_object_create(obj); 4781 4782 return obj; 4783 4784 fail: 4785 i915_gem_object_free(obj); 4786 return ERR_PTR(ret); 4787 } 4788 4789 static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4790 { 4791 /* If we are the last user of the backing storage (be it shmemfs 4792 * pages or stolen etc), we know that the pages are going to be 4793 * immediately released. In this case, we can then skip copying 4794 * back the contents from the GPU. 4795 */ 4796 4797 if (obj->mm.madv != I915_MADV_WILLNEED) 4798 return false; 4799 4800 if (obj->base.filp == NULL) 4801 return true; 4802 4803 /* At first glance, this looks racy, but then again so would be 4804 * userspace racing mmap against close. However, the first external 4805 * reference to the filp can only be obtained through the 4806 * i915_gem_mmap_ioctl() which safeguards us against the user 4807 * acquiring such a reference whilst we are in the middle of 4808 * freeing the object. 4809 */ 4810 #if 0 4811 return atomic_long_read(&obj->base.filp->f_count) == 1; 4812 #else 4813 return false; 4814 #endif 4815 } 4816 4817 static void __i915_gem_free_objects(struct drm_i915_private *i915, 4818 struct llist_node *freed) 4819 { 4820 struct drm_i915_gem_object *obj, *on; 4821 4822 intel_runtime_pm_get(i915); 4823 llist_for_each_entry_safe(obj, on, freed, freed) { 4824 struct i915_vma *vma, *vn; 4825 4826 trace_i915_gem_object_destroy(obj); 4827 4828 mutex_lock(&i915->drm.struct_mutex); 4829 4830 GEM_BUG_ON(i915_gem_object_is_active(obj)); 4831 list_for_each_entry_safe(vma, vn, 4832 &obj->vma_list, obj_link) { 4833 GEM_BUG_ON(i915_vma_is_active(vma)); 4834 vma->flags &= ~I915_VMA_PIN_MASK; 4835 i915_vma_close(vma); 4836 } 4837 GEM_BUG_ON(!list_empty(&obj->vma_list)); 4838 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree)); 4839 4840 /* This serializes freeing with the shrinker. Since the free 4841 * is delayed, first by RCU then by the workqueue, we want the 4842 * shrinker to be able to free pages of unreferenced objects, 4843 * or else we may oom whilst there are plenty of deferred 4844 * freed objects. 4845 */ 4846 if (i915_gem_object_has_pages(obj)) { 4847 lockmgr(&i915->mm.obj_lock, LK_EXCLUSIVE); 4848 list_del_init(&obj->mm.link); 4849 lockmgr(&i915->mm.obj_lock, LK_RELEASE); 4850 } 4851 4852 mutex_unlock(&i915->drm.struct_mutex); 4853 4854 GEM_BUG_ON(obj->bind_count); 4855 GEM_BUG_ON(obj->userfault_count); 4856 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); 4857 GEM_BUG_ON(!list_empty(&obj->lut_list)); 4858 4859 if (obj->ops->release) 4860 obj->ops->release(obj); 4861 4862 #if 0 4863 if (WARN_ON(i915_gem_object_has_pinned_pages(obj))) 4864 #else 4865 if (i915_gem_object_has_pinned_pages(obj)) 4866 #endif 4867 atomic_set(&obj->mm.pages_pin_count, 0); 4868 __i915_gem_object_put_pages(obj, I915_MM_NORMAL); 4869 GEM_BUG_ON(i915_gem_object_has_pages(obj)); 4870 4871 if (obj->base.import_attach) 4872 drm_prime_gem_destroy(&obj->base, NULL); 4873 4874 reservation_object_fini(&obj->__builtin_resv); 4875 drm_gem_object_release(&obj->base); 4876 i915_gem_info_remove_obj(i915, obj->base.size); 4877 4878 kfree(obj->bit_17); 4879 i915_gem_object_free(obj); 4880 4881 if (on) 4882 cond_resched(); 4883 } 4884 intel_runtime_pm_put(i915); 4885 } 4886 4887 static void i915_gem_flush_free_objects(struct drm_i915_private *i915) 4888 { 4889 struct llist_node *freed; 4890 4891 /* Free the oldest, most stale object to keep the free_list short */ 4892 freed = NULL; 4893 if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */ 4894 /* Only one consumer of llist_del_first() allowed */ 4895 lockmgr(&i915->mm.free_lock, LK_EXCLUSIVE); 4896 freed = llist_del_first(&i915->mm.free_list); 4897 lockmgr(&i915->mm.free_lock, LK_RELEASE); 4898 } 4899 if (unlikely(freed)) { 4900 freed->next = NULL; 4901 __i915_gem_free_objects(i915, freed); 4902 } 4903 } 4904 4905 static void __i915_gem_free_work(struct work_struct *work) 4906 { 4907 struct drm_i915_private *i915 = 4908 container_of(work, struct drm_i915_private, mm.free_work); 4909 struct llist_node *freed; 4910 4911 /* All file-owned VMA should have been released by this point through 4912 * i915_gem_close_object(), or earlier by i915_gem_context_close(). 4913 * However, the object may also be bound into the global GTT (e.g. 4914 * older GPUs without per-process support, or for direct access through 4915 * the GTT either for the user or for scanout). Those VMA still need to 4916 * unbound now. 4917 */ 4918 4919 lockmgr(&i915->mm.free_lock, LK_EXCLUSIVE); 4920 while ((freed = llist_del_all(&i915->mm.free_list))) { 4921 lockmgr(&i915->mm.free_lock, LK_RELEASE); 4922 4923 __i915_gem_free_objects(i915, freed); 4924 if (need_resched()) 4925 return; 4926 4927 lockmgr(&i915->mm.free_lock, LK_EXCLUSIVE); 4928 } 4929 lockmgr(&i915->mm.free_lock, LK_RELEASE); 4930 } 4931 4932 static void __i915_gem_free_object_rcu(struct rcu_head *head) 4933 { 4934 struct drm_i915_gem_object *obj = 4935 container_of(head, typeof(*obj), rcu); 4936 struct drm_i915_private *i915 = to_i915(obj->base.dev); 4937 4938 /* We can't simply use call_rcu() from i915_gem_free_object() 4939 * as we need to block whilst unbinding, and the call_rcu 4940 * task may be called from softirq context. So we take a 4941 * detour through a worker. 4942 */ 4943 if (llist_add(&obj->freed, &i915->mm.free_list)) 4944 schedule_work(&i915->mm.free_work); 4945 } 4946 4947 void i915_gem_free_object(struct drm_gem_object *gem_obj) 4948 { 4949 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4950 4951 if (obj->mm.quirked) 4952 __i915_gem_object_unpin_pages(obj); 4953 4954 if (discard_backing_storage(obj)) 4955 obj->mm.madv = I915_MADV_DONTNEED; 4956 4957 /* Before we free the object, make sure any pure RCU-only 4958 * read-side critical sections are complete, e.g. 4959 * i915_gem_busy_ioctl(). For the corresponding synchronized 4960 * lookup see i915_gem_object_lookup_rcu(). 4961 */ 4962 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 4963 } 4964 4965 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj) 4966 { 4967 lockdep_assert_held(&obj->base.dev->struct_mutex); 4968 4969 if (!i915_gem_object_has_active_reference(obj) && 4970 i915_gem_object_is_active(obj)) 4971 i915_gem_object_set_active_reference(obj); 4972 else 4973 i915_gem_object_put(obj); 4974 } 4975 4976 static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) 4977 { 4978 struct intel_engine_cs *engine; 4979 enum intel_engine_id id; 4980 4981 for_each_engine(engine, dev_priv, id) 4982 GEM_BUG_ON(engine->last_retired_context && 4983 !i915_gem_context_is_kernel(engine->last_retired_context)); 4984 } 4985 4986 void i915_gem_sanitize(struct drm_i915_private *i915) 4987 { 4988 if (i915_terminally_wedged(&i915->gpu_error)) { 4989 mutex_lock(&i915->drm.struct_mutex); 4990 i915_gem_unset_wedged(i915); 4991 mutex_unlock(&i915->drm.struct_mutex); 4992 } 4993 4994 /* 4995 * If we inherit context state from the BIOS or earlier occupants 4996 * of the GPU, the GPU may be in an inconsistent state when we 4997 * try to take over. The only way to remove the earlier state 4998 * is by resetting. However, resetting on earlier gen is tricky as 4999 * it may impact the display and we are uncertain about the stability 5000 * of the reset, so this could be applied to even earlier gen. 5001 */ 5002 if (INTEL_GEN(i915) >= 5) { 5003 int reset = intel_gpu_reset(i915, ALL_ENGINES); 5004 WARN_ON(reset && reset != -ENODEV); 5005 } 5006 } 5007 5008 int i915_gem_suspend(struct drm_i915_private *dev_priv) 5009 { 5010 struct drm_device *dev = &dev_priv->drm; 5011 int ret; 5012 5013 intel_runtime_pm_get(dev_priv); 5014 intel_suspend_gt_powersave(dev_priv); 5015 5016 mutex_lock(&dev->struct_mutex); 5017 5018 /* We have to flush all the executing contexts to main memory so 5019 * that they can saved in the hibernation image. To ensure the last 5020 * context image is coherent, we have to switch away from it. That 5021 * leaves the dev_priv->kernel_context still active when 5022 * we actually suspend, and its image in memory may not match the GPU 5023 * state. Fortunately, the kernel_context is disposable and we do 5024 * not rely on its state. 5025 */ 5026 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 5027 ret = i915_gem_switch_to_kernel_context(dev_priv); 5028 if (ret) 5029 goto err_unlock; 5030 5031 ret = i915_gem_wait_for_idle(dev_priv, 5032 I915_WAIT_INTERRUPTIBLE | 5033 I915_WAIT_LOCKED); 5034 if (ret && ret != -EIO) 5035 goto err_unlock; 5036 5037 assert_kernel_context_is_current(dev_priv); 5038 } 5039 i915_gem_contexts_lost(dev_priv); 5040 mutex_unlock(&dev->struct_mutex); 5041 5042 intel_guc_suspend(dev_priv); 5043 5044 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 5045 cancel_delayed_work_sync(&dev_priv->gt.retire_work); 5046 5047 /* As the idle_work is rearming if it detects a race, play safe and 5048 * repeat the flush until it is definitely idle. 5049 */ 5050 drain_delayed_work(&dev_priv->gt.idle_work); 5051 5052 /* Assert that we sucessfully flushed all the work and 5053 * reset the GPU back to its idle, low power state. 5054 */ 5055 WARN_ON(dev_priv->gt.awake); 5056 if (WARN_ON(!intel_engines_are_idle(dev_priv))) 5057 i915_gem_set_wedged(dev_priv); /* no hope, discard everything */ 5058 5059 /* 5060 * Neither the BIOS, ourselves or any other kernel 5061 * expects the system to be in execlists mode on startup, 5062 * so we need to reset the GPU back to legacy mode. And the only 5063 * known way to disable logical contexts is through a GPU reset. 5064 * 5065 * So in order to leave the system in a known default configuration, 5066 * always reset the GPU upon unload and suspend. Afterwards we then 5067 * clean up the GEM state tracking, flushing off the requests and 5068 * leaving the system in a known idle state. 5069 * 5070 * Note that is of the upmost importance that the GPU is idle and 5071 * all stray writes are flushed *before* we dismantle the backing 5072 * storage for the pinned objects. 5073 * 5074 * However, since we are uncertain that resetting the GPU on older 5075 * machines is a good idea, we don't - just in case it leaves the 5076 * machine in an unusable condition. 5077 */ 5078 i915_gem_sanitize(dev_priv); 5079 5080 intel_runtime_pm_put(dev_priv); 5081 return 0; 5082 5083 err_unlock: 5084 mutex_unlock(&dev->struct_mutex); 5085 intel_runtime_pm_put(dev_priv); 5086 return ret; 5087 } 5088 5089 void i915_gem_resume(struct drm_i915_private *dev_priv) 5090 { 5091 struct drm_device *dev = &dev_priv->drm; 5092 5093 WARN_ON(dev_priv->gt.awake); 5094 5095 mutex_lock(&dev->struct_mutex); 5096 i915_gem_restore_gtt_mappings(dev_priv); 5097 i915_gem_restore_fences(dev_priv); 5098 5099 /* As we didn't flush the kernel context before suspend, we cannot 5100 * guarantee that the context image is complete. So let's just reset 5101 * it and start again. 5102 */ 5103 dev_priv->gt.resume(dev_priv); 5104 5105 mutex_unlock(&dev->struct_mutex); 5106 } 5107 5108 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) 5109 { 5110 if (INTEL_GEN(dev_priv) < 5 || 5111 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 5112 return; 5113 5114 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 5115 DISP_TILE_SURFACE_SWIZZLING); 5116 5117 if (IS_GEN5(dev_priv)) 5118 return; 5119 5120 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 5121 if (IS_GEN6(dev_priv)) 5122 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 5123 else if (IS_GEN7(dev_priv)) 5124 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 5125 else if (IS_GEN8(dev_priv)) 5126 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); 5127 else 5128 BUG(); 5129 } 5130 5131 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) 5132 { 5133 I915_WRITE(RING_CTL(base), 0); 5134 I915_WRITE(RING_HEAD(base), 0); 5135 I915_WRITE(RING_TAIL(base), 0); 5136 I915_WRITE(RING_START(base), 0); 5137 } 5138 5139 static void init_unused_rings(struct drm_i915_private *dev_priv) 5140 { 5141 if (IS_I830(dev_priv)) { 5142 init_unused_ring(dev_priv, PRB1_BASE); 5143 init_unused_ring(dev_priv, SRB0_BASE); 5144 init_unused_ring(dev_priv, SRB1_BASE); 5145 init_unused_ring(dev_priv, SRB2_BASE); 5146 init_unused_ring(dev_priv, SRB3_BASE); 5147 } else if (IS_GEN2(dev_priv)) { 5148 init_unused_ring(dev_priv, SRB0_BASE); 5149 init_unused_ring(dev_priv, SRB1_BASE); 5150 } else if (IS_GEN3(dev_priv)) { 5151 init_unused_ring(dev_priv, PRB1_BASE); 5152 init_unused_ring(dev_priv, PRB2_BASE); 5153 } 5154 } 5155 5156 static int __i915_gem_restart_engines(void *data) 5157 { 5158 struct drm_i915_private *i915 = data; 5159 struct intel_engine_cs *engine; 5160 enum intel_engine_id id; 5161 int err; 5162 5163 for_each_engine(engine, i915, id) { 5164 err = engine->init_hw(engine); 5165 if (err) 5166 return err; 5167 } 5168 5169 return 0; 5170 } 5171 5172 int i915_gem_init_hw(struct drm_i915_private *dev_priv) 5173 { 5174 int ret; 5175 5176 dev_priv->gt.last_init_time = ktime_get(); 5177 5178 /* Double layer security blanket, see i915_gem_init() */ 5179 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5180 5181 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) 5182 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); 5183 5184 if (IS_HASWELL(dev_priv)) 5185 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? 5186 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); 5187 5188 if (HAS_PCH_NOP(dev_priv)) { 5189 if (IS_IVYBRIDGE(dev_priv)) { 5190 u32 temp = I915_READ(GEN7_MSG_CTL); 5191 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); 5192 I915_WRITE(GEN7_MSG_CTL, temp); 5193 } else if (INTEL_GEN(dev_priv) >= 7) { 5194 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); 5195 temp &= ~RESET_PCH_HANDSHAKE_ENABLE; 5196 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); 5197 } 5198 } 5199 5200 i915_gem_init_swizzling(dev_priv); 5201 5202 /* 5203 * At least 830 can leave some of the unused rings 5204 * "active" (ie. head != tail) after resume which 5205 * will prevent c3 entry. Makes sure all unused rings 5206 * are totally idle. 5207 */ 5208 init_unused_rings(dev_priv); 5209 5210 BUG_ON(!dev_priv->kernel_context); 5211 if (i915_terminally_wedged(&dev_priv->gpu_error)) { 5212 ret = -EIO; 5213 goto out; 5214 } 5215 5216 ret = i915_ppgtt_init_hw(dev_priv); 5217 if (ret) { 5218 DRM_ERROR("PPGTT enable HW failed %d\n", ret); 5219 goto out; 5220 } 5221 5222 /* Need to do basic initialisation of all rings first: */ 5223 ret = __i915_gem_restart_engines(dev_priv); 5224 if (ret) 5225 goto out; 5226 5227 intel_mocs_init_l3cc_table(dev_priv); 5228 5229 /* We can't enable contexts until all firmware is loaded */ 5230 ret = intel_uc_init_hw(dev_priv); 5231 if (ret) 5232 goto out; 5233 5234 out: 5235 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5236 return ret; 5237 } 5238 5239 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) 5240 { 5241 if (INTEL_INFO(dev_priv)->gen < 6) 5242 return false; 5243 5244 /* TODO: make semaphores and Execlists play nicely together */ 5245 if (i915_modparams.enable_execlists) 5246 return false; 5247 5248 if (value >= 0) 5249 return value; 5250 5251 /* Enable semaphores on SNB when IO remapping is off */ 5252 if (IS_GEN6(dev_priv) && intel_vtd_active()) 5253 return false; 5254 5255 return true; 5256 } 5257 5258 int i915_gem_init(struct drm_i915_private *dev_priv) 5259 { 5260 int ret; 5261 5262 /* 5263 * We need to fallback to 4K pages since gvt gtt handling doesn't 5264 * support huge page entries - we will need to check either hypervisor 5265 * mm can support huge guest page or just do emulation in gvt. 5266 */ 5267 if (intel_vgpu_active(dev_priv)) 5268 mkwrite_device_info(dev_priv)->page_sizes = 5269 I915_GTT_PAGE_SIZE_4K; 5270 5271 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); 5272 5273 if (!i915_modparams.enable_execlists) { 5274 dev_priv->gt.resume = intel_legacy_submission_resume; 5275 dev_priv->gt.cleanup_engine = intel_engine_cleanup; 5276 } else { 5277 dev_priv->gt.resume = intel_lr_context_resume; 5278 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; 5279 } 5280 5281 ret = i915_gem_init_userptr(dev_priv); 5282 if (ret) 5283 return ret; 5284 5285 /* This is just a security blanket to placate dragons. 5286 * On some systems, we very sporadically observe that the first TLBs 5287 * used by the CS may be stale, despite us poking the TLB reset. If 5288 * we hold the forcewake during initialisation these problems 5289 * just magically go away. 5290 */ 5291 mutex_lock(&dev_priv->drm.struct_mutex); 5292 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5293 5294 ret = i915_gem_init_ggtt(dev_priv); 5295 if (ret) 5296 goto out_unlock; 5297 5298 ret = i915_gem_contexts_init(dev_priv); 5299 if (ret) 5300 goto out_unlock; 5301 5302 ret = intel_engines_init(dev_priv); 5303 if (ret) 5304 goto out_unlock; 5305 5306 ret = i915_gem_init_hw(dev_priv); 5307 if (ret == -EIO) { 5308 /* Allow engine initialisation to fail by marking the GPU as 5309 * wedged. But we only want to do this where the GPU is angry, 5310 * for all other failure, such as an allocation failure, bail. 5311 */ 5312 if (!i915_terminally_wedged(&dev_priv->gpu_error)) { 5313 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n"); 5314 i915_gem_set_wedged(dev_priv); 5315 } 5316 ret = 0; 5317 } 5318 5319 out_unlock: 5320 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5321 mutex_unlock(&dev_priv->drm.struct_mutex); 5322 5323 return ret; 5324 } 5325 5326 void i915_gem_init_mmio(struct drm_i915_private *i915) 5327 { 5328 i915_gem_sanitize(i915); 5329 } 5330 5331 void 5332 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv) 5333 { 5334 struct intel_engine_cs *engine; 5335 enum intel_engine_id id; 5336 5337 for_each_engine(engine, dev_priv, id) 5338 dev_priv->gt.cleanup_engine(engine); 5339 } 5340 5341 void 5342 i915_gem_load_init_fences(struct drm_i915_private *dev_priv) 5343 { 5344 int i; 5345 5346 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 5347 !IS_CHERRYVIEW(dev_priv)) 5348 dev_priv->num_fence_regs = 32; 5349 else if (INTEL_INFO(dev_priv)->gen >= 4 || 5350 IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 5351 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 5352 dev_priv->num_fence_regs = 16; 5353 else 5354 dev_priv->num_fence_regs = 8; 5355 5356 if (intel_vgpu_active(dev_priv)) 5357 dev_priv->num_fence_regs = 5358 I915_READ(vgtif_reg(avail_rs.fence_num)); 5359 5360 /* Initialize fence registers to zero */ 5361 for (i = 0; i < dev_priv->num_fence_regs; i++) { 5362 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; 5363 5364 fence->i915 = dev_priv; 5365 fence->id = i; 5366 list_add_tail(&fence->link, &dev_priv->mm.fence_list); 5367 } 5368 i915_gem_restore_fences(dev_priv); 5369 5370 i915_gem_detect_bit_6_swizzle(dev_priv); 5371 } 5372 5373 int 5374 i915_gem_load_init(struct drm_i915_private *dev_priv) 5375 { 5376 int err = -ENOMEM; 5377 5378 dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); 5379 if (!dev_priv->objects) 5380 goto err_out; 5381 5382 dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); 5383 if (!dev_priv->vmas) 5384 goto err_objects; 5385 5386 dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0); 5387 if (!dev_priv->luts) 5388 goto err_vmas; 5389 5390 dev_priv->requests = KMEM_CACHE(drm_i915_gem_request, 5391 SLAB_HWCACHE_ALIGN | 5392 SLAB_RECLAIM_ACCOUNT | 5393 SLAB_TYPESAFE_BY_RCU); 5394 if (!dev_priv->requests) 5395 goto err_luts; 5396 5397 dev_priv->dependencies = KMEM_CACHE(i915_dependency, 5398 SLAB_HWCACHE_ALIGN | 5399 SLAB_RECLAIM_ACCOUNT); 5400 if (!dev_priv->dependencies) 5401 goto err_requests; 5402 5403 dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); 5404 if (!dev_priv->priorities) 5405 goto err_dependencies; 5406 5407 mutex_lock(&dev_priv->drm.struct_mutex); 5408 INIT_LIST_HEAD(&dev_priv->gt.timelines); 5409 err = i915_gem_timeline_init__global(dev_priv); 5410 mutex_unlock(&dev_priv->drm.struct_mutex); 5411 if (err) 5412 goto err_priorities; 5413 5414 INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work); 5415 5416 lockinit(&dev_priv->mm.obj_lock, "i9dpmmo", 0, 0); 5417 lockinit(&dev_priv->mm.free_lock, "i9dpmmf", 0, 0); 5418 init_llist_head(&dev_priv->mm.free_list); 5419 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 5420 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 5421 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 5422 INIT_LIST_HEAD(&dev_priv->mm.userfault_list); 5423 5424 INIT_DELAYED_WORK(&dev_priv->gt.retire_work, 5425 i915_gem_retire_work_handler); 5426 INIT_DELAYED_WORK(&dev_priv->gt.idle_work, 5427 i915_gem_idle_work_handler); 5428 init_waitqueue_head(&dev_priv->gpu_error.wait_queue); 5429 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 5430 5431 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); 5432 5433 lockinit(&dev_priv->fb_tracking.lock, "drmftl", 0, 0); 5434 5435 return 0; 5436 5437 err_priorities: 5438 kmem_cache_destroy(dev_priv->priorities); 5439 err_dependencies: 5440 kmem_cache_destroy(dev_priv->dependencies); 5441 err_requests: 5442 kmem_cache_destroy(dev_priv->requests); 5443 err_luts: 5444 kmem_cache_destroy(dev_priv->luts); 5445 err_vmas: 5446 kmem_cache_destroy(dev_priv->vmas); 5447 err_objects: 5448 kmem_cache_destroy(dev_priv->objects); 5449 err_out: 5450 return err; 5451 } 5452 5453 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) 5454 { 5455 i915_gem_drain_freed_objects(dev_priv); 5456 WARN_ON(!llist_empty(&dev_priv->mm.free_list)); 5457 WARN_ON(dev_priv->mm.object_count); 5458 5459 mutex_lock(&dev_priv->drm.struct_mutex); 5460 i915_gem_timeline_fini(&dev_priv->gt.global_timeline); 5461 WARN_ON(!list_empty(&dev_priv->gt.timelines)); 5462 mutex_unlock(&dev_priv->drm.struct_mutex); 5463 5464 kmem_cache_destroy(dev_priv->priorities); 5465 kmem_cache_destroy(dev_priv->dependencies); 5466 kmem_cache_destroy(dev_priv->requests); 5467 kmem_cache_destroy(dev_priv->luts); 5468 kmem_cache_destroy(dev_priv->vmas); 5469 kmem_cache_destroy(dev_priv->objects); 5470 5471 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ 5472 rcu_barrier(); 5473 5474 #if 0 5475 i915_gemfs_fini(dev_priv); 5476 #endif 5477 } 5478 5479 int i915_gem_freeze(struct drm_i915_private *dev_priv) 5480 { 5481 /* Discard all purgeable objects, let userspace recover those as 5482 * required after resuming. 5483 */ 5484 i915_gem_shrink_all(dev_priv); 5485 5486 return 0; 5487 } 5488 5489 int i915_gem_freeze_late(struct drm_i915_private *dev_priv) 5490 { 5491 struct drm_i915_gem_object *obj; 5492 struct list_head *phases[] = { 5493 &dev_priv->mm.unbound_list, 5494 &dev_priv->mm.bound_list, 5495 NULL 5496 }, **p; 5497 5498 /* Called just before we write the hibernation image. 5499 * 5500 * We need to update the domain tracking to reflect that the CPU 5501 * will be accessing all the pages to create and restore from the 5502 * hibernation, and so upon restoration those pages will be in the 5503 * CPU domain. 5504 * 5505 * To make sure the hibernation image contains the latest state, 5506 * we update that state just before writing out the image. 5507 * 5508 * To try and reduce the hibernation image, we manually shrink 5509 * the objects as well, see i915_gem_freeze() 5510 */ 5511 5512 i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND); 5513 i915_gem_drain_freed_objects(dev_priv); 5514 5515 lockmgr(&dev_priv->mm.obj_lock, LK_EXCLUSIVE); 5516 for (p = phases; *p; p++) { 5517 list_for_each_entry(obj, *p, mm.link) 5518 __start_cpu_write(obj); 5519 } 5520 lockmgr(&dev_priv->mm.obj_lock, LK_RELEASE); 5521 5522 return 0; 5523 } 5524 5525 void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5526 { 5527 struct drm_i915_file_private *file_priv = file->driver_priv; 5528 struct drm_i915_gem_request *request; 5529 5530 /* Clean up our request list when the client is going away, so that 5531 * later retire_requests won't dereference our soon-to-be-gone 5532 * file_priv. 5533 */ 5534 lockmgr(&file_priv->mm.lock, LK_EXCLUSIVE); 5535 list_for_each_entry(request, &file_priv->mm.request_list, client_link) 5536 request->file_priv = NULL; 5537 lockmgr(&file_priv->mm.lock, LK_RELEASE); 5538 } 5539 5540 #ifdef __DragonFly__ 5541 int 5542 i915_gem_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 5543 vm_ooffset_t foff, struct ucred *cred, u_short *color) 5544 { 5545 *color = 0; /* XXXKIB */ 5546 return (0); 5547 } 5548 5549 void 5550 i915_gem_pager_dtor(void *handle) 5551 { 5552 struct drm_gem_object *obj = handle; 5553 struct drm_device *dev = obj->dev; 5554 5555 drm_gem_free_mmap_offset(obj); 5556 mutex_lock(&dev->struct_mutex); 5557 i915_gem_release_mmap(to_intel_bo(obj)); 5558 drm_gem_object_unreference(obj); 5559 mutex_unlock(&dev->struct_mutex); 5560 } 5561 #endif 5562 5563 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) 5564 { 5565 struct drm_i915_file_private *file_priv; 5566 int ret; 5567 5568 DRM_DEBUG("\n"); 5569 5570 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 5571 if (!file_priv) 5572 return -ENOMEM; 5573 5574 file->driver_priv = file_priv; 5575 file_priv->dev_priv = i915; 5576 file_priv->file = file; 5577 5578 lockinit(&file_priv->mm.lock, "i915_priv", 0, 0); 5579 INIT_LIST_HEAD(&file_priv->mm.request_list); 5580 5581 file_priv->bsd_engine = -1; 5582 5583 ret = i915_gem_context_open(i915, file); 5584 if (ret) 5585 kfree(file_priv); 5586 5587 return ret; 5588 } 5589 5590 /** 5591 * i915_gem_track_fb - update frontbuffer tracking 5592 * @old: current GEM buffer for the frontbuffer slots 5593 * @new: new GEM buffer for the frontbuffer slots 5594 * @frontbuffer_bits: bitmask of frontbuffer slots 5595 * 5596 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them 5597 * from @old and setting them in @new. Both @old and @new can be NULL. 5598 */ 5599 void i915_gem_track_fb(struct drm_i915_gem_object *old, 5600 struct drm_i915_gem_object *new, 5601 unsigned frontbuffer_bits) 5602 { 5603 /* Control of individual bits within the mask are guarded by 5604 * the owning plane->mutex, i.e. we can never see concurrent 5605 * manipulation of individual bits. But since the bitfield as a whole 5606 * is updated using RMW, we need to use atomics in order to update 5607 * the bits. 5608 */ 5609 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 5610 sizeof(atomic_t) * BITS_PER_BYTE); 5611 5612 if (old) { 5613 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); 5614 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits); 5615 } 5616 5617 if (new) { 5618 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits); 5619 atomic_or(frontbuffer_bits, &new->frontbuffer_bits); 5620 } 5621 } 5622 5623 /* Allocate a new GEM object and fill it with the supplied data */ 5624 struct drm_i915_gem_object * 5625 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, 5626 const void *data, size_t size) 5627 { 5628 struct drm_i915_gem_object *obj; 5629 struct vm_object *file; 5630 size_t offset; 5631 int err; 5632 5633 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE)); 5634 if (IS_ERR(obj)) 5635 return obj; 5636 5637 GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 5638 5639 file = obj->base.filp; 5640 offset = 0; 5641 do { 5642 unsigned int len = min_t(typeof(size), size, PAGE_SIZE); 5643 struct page *page; 5644 void *pgdata, *vaddr; 5645 5646 err = pagecache_write_begin(file, NULL, 5647 offset, len, 0, 5648 &page, &pgdata); 5649 if (err < 0) 5650 goto fail; 5651 5652 vaddr = kmap(page); 5653 memcpy(vaddr, data, len); 5654 kunmap(page); 5655 5656 #ifndef __DragonFly__ 5657 err = pagecache_write_end(file, file->f_mapping, 5658 #else 5659 err = pagecache_write_end(file, NULL, 5660 #endif 5661 offset, len, len, 5662 page, pgdata); 5663 if (err < 0) 5664 goto fail; 5665 5666 size -= len; 5667 data += len; 5668 offset += len; 5669 } while (size); 5670 5671 return obj; 5672 5673 fail: 5674 i915_gem_object_put(obj); 5675 return ERR_PTR(err); 5676 } 5677 5678 struct scatterlist * 5679 i915_gem_object_get_sg(struct drm_i915_gem_object *obj, 5680 unsigned int n, 5681 unsigned int *offset) 5682 { 5683 struct i915_gem_object_page_iter *iter = &obj->mm.get_page; 5684 struct scatterlist *sg; 5685 unsigned int idx, count; 5686 5687 might_sleep(); 5688 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT); 5689 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 5690 5691 /* As we iterate forward through the sg, we record each entry in a 5692 * radixtree for quick repeated (backwards) lookups. If we have seen 5693 * this index previously, we will have an entry for it. 5694 * 5695 * Initial lookup is O(N), but this is amortized to O(1) for 5696 * sequential page access (where each new request is consecutive 5697 * to the previous one). Repeated lookups are O(lg(obj->base.size)), 5698 * i.e. O(1) with a large constant! 5699 */ 5700 if (n < READ_ONCE(iter->sg_idx)) 5701 goto lookup; 5702 5703 mutex_lock(&iter->lock); 5704 5705 /* We prefer to reuse the last sg so that repeated lookup of this 5706 * (or the subsequent) sg are fast - comparing against the last 5707 * sg is faster than going through the radixtree. 5708 */ 5709 5710 sg = iter->sg_pos; 5711 idx = iter->sg_idx; 5712 count = __sg_page_count(sg); 5713 5714 while (idx + count <= n) { 5715 unsigned long exception, i; 5716 int ret; 5717 5718 /* If we cannot allocate and insert this entry, or the 5719 * individual pages from this range, cancel updating the 5720 * sg_idx so that on this lookup we are forced to linearly 5721 * scan onwards, but on future lookups we will try the 5722 * insertion again (in which case we need to be careful of 5723 * the error return reporting that we have already inserted 5724 * this index). 5725 */ 5726 ret = radix_tree_insert(&iter->radix, idx, sg); 5727 if (ret && ret != -EEXIST) 5728 goto scan; 5729 5730 exception = 5731 RADIX_TREE_EXCEPTIONAL_ENTRY | 5732 idx << RADIX_TREE_EXCEPTIONAL_SHIFT; 5733 for (i = 1; i < count; i++) { 5734 ret = radix_tree_insert(&iter->radix, idx + i, 5735 (void *)exception); 5736 if (ret && ret != -EEXIST) 5737 goto scan; 5738 } 5739 5740 idx += count; 5741 sg = ____sg_next(sg); 5742 count = __sg_page_count(sg); 5743 } 5744 5745 scan: 5746 iter->sg_pos = sg; 5747 iter->sg_idx = idx; 5748 5749 mutex_unlock(&iter->lock); 5750 5751 if (unlikely(n < idx)) /* insertion completed by another thread */ 5752 goto lookup; 5753 5754 /* In case we failed to insert the entry into the radixtree, we need 5755 * to look beyond the current sg. 5756 */ 5757 while (idx + count <= n) { 5758 idx += count; 5759 sg = ____sg_next(sg); 5760 count = __sg_page_count(sg); 5761 } 5762 5763 *offset = n - idx; 5764 return sg; 5765 5766 lookup: 5767 rcu_read_lock(); 5768 5769 sg = radix_tree_lookup(&iter->radix, n); 5770 GEM_BUG_ON(!sg); 5771 5772 /* If this index is in the middle of multi-page sg entry, 5773 * the radixtree will contain an exceptional entry that points 5774 * to the start of that range. We will return the pointer to 5775 * the base page and the offset of this page within the 5776 * sg entry's range. 5777 */ 5778 *offset = 0; 5779 if (unlikely(radix_tree_exception(sg))) { 5780 unsigned long base = 5781 (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT; 5782 5783 sg = radix_tree_lookup(&iter->radix, base); 5784 GEM_BUG_ON(!sg); 5785 5786 *offset = n - base; 5787 } 5788 5789 rcu_read_unlock(); 5790 5791 return sg; 5792 } 5793 5794 struct page * 5795 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) 5796 { 5797 struct scatterlist *sg; 5798 unsigned int offset; 5799 5800 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); 5801 5802 sg = i915_gem_object_get_sg(obj, n, &offset); 5803 return nth_page(sg_page(sg), offset); 5804 } 5805 5806 /* Like i915_gem_object_get_page(), but mark the returned page dirty */ 5807 struct page * 5808 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, 5809 unsigned int n) 5810 { 5811 struct page *page; 5812 5813 page = i915_gem_object_get_page(obj, n); 5814 if (!obj->mm.dirty) 5815 set_page_dirty(page); 5816 5817 return page; 5818 } 5819 5820 dma_addr_t 5821 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, 5822 unsigned long n) 5823 { 5824 struct scatterlist *sg; 5825 unsigned int offset; 5826 5827 sg = i915_gem_object_get_sg(obj, n, &offset); 5828 return sg_dma_address(sg) + (offset << PAGE_SHIFT); 5829 } 5830 5831 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) 5832 { 5833 struct sg_table *pages; 5834 int err; 5835 5836 if (align > obj->base.size) 5837 return -EINVAL; 5838 5839 if (obj->ops == &i915_gem_phys_ops) 5840 return 0; 5841 5842 if (obj->ops != &i915_gem_object_ops) 5843 return -EINVAL; 5844 5845 err = i915_gem_object_unbind(obj); 5846 if (err) 5847 return err; 5848 5849 mutex_lock(&obj->mm.lock); 5850 5851 if (obj->mm.madv != I915_MADV_WILLNEED) { 5852 err = -EFAULT; 5853 goto err_unlock; 5854 } 5855 5856 if (obj->mm.quirked) { 5857 err = -EFAULT; 5858 goto err_unlock; 5859 } 5860 5861 if (obj->mm.mapping) { 5862 err = -EBUSY; 5863 goto err_unlock; 5864 } 5865 5866 pages = fetch_and_zero(&obj->mm.pages); 5867 if (pages) { 5868 struct drm_i915_private *i915 = to_i915(obj->base.dev); 5869 5870 __i915_gem_object_reset_page_iter(obj); 5871 5872 lockmgr(&i915->mm.obj_lock, LK_EXCLUSIVE); 5873 list_del(&obj->mm.link); 5874 lockmgr(&i915->mm.obj_lock, LK_RELEASE); 5875 } 5876 5877 obj->ops = &i915_gem_phys_ops; 5878 5879 err = ____i915_gem_object_get_pages(obj); 5880 if (err) 5881 goto err_xfer; 5882 5883 /* Perma-pin (until release) the physical set of pages */ 5884 __i915_gem_object_pin_pages(obj); 5885 5886 if (!IS_ERR_OR_NULL(pages)) 5887 i915_gem_object_ops.put_pages(obj, pages); 5888 mutex_unlock(&obj->mm.lock); 5889 return 0; 5890 5891 err_xfer: 5892 obj->ops = &i915_gem_object_ops; 5893 obj->mm.pages = pages; 5894 err_unlock: 5895 mutex_unlock(&obj->mm.lock); 5896 return err; 5897 } 5898 5899 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 5900 #include "selftests/scatterlist.c" 5901 #include "selftests/mock_gem_device.c" 5902 #include "selftests/huge_gem_object.c" 5903 #include "selftests/huge_pages.c" 5904 #include "selftests/i915_gem_object.c" 5905 #include "selftests/i915_gem_coherency.c" 5906 #endif 5907