1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2014-2016 Intel Corporation 5 */ 6 7 #include <linux/anon_inodes.h> 8 #include <linux/mman.h> 9 #include <linux/pfn_t.h> 10 #include <linux/sizes.h> 11 12 #include <drm/drm_cache.h> 13 14 #include "gt/intel_gt.h" 15 #include "gt/intel_gt_requests.h" 16 17 #include "i915_drv.h" 18 #include "i915_gem_evict.h" 19 #include "i915_gem_gtt.h" 20 #include "i915_gem_ioctls.h" 21 #include "i915_gem_object.h" 22 #include "i915_gem_mman.h" 23 #include "i915_mm.h" 24 #include "i915_trace.h" 25 #include "i915_user_extensions.h" 26 #include "i915_gem_ttm.h" 27 #include "i915_vma.h" 28 29 #ifdef __linux__ 30 static inline bool 31 __vma_matches(struct vm_area_struct *vma, struct file *filp, 32 unsigned long addr, unsigned long size) 33 { 34 if (vma->vm_file != filp) 35 return false; 36 37 return vma->vm_start == addr && 38 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); 39 } 40 #endif 41 42 /** 43 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 44 * it is mapped to. 45 * @dev: drm device 46 * @data: ioctl data blob 47 * @file: drm file 48 * 49 * While the mapping holds a reference on the contents of the object, it doesn't 50 * imply a ref on the object itself. 51 * 52 * IMPORTANT: 53 * 54 * DRM driver writers who look a this function as an example for how to do GEM 55 * mmap support, please don't implement mmap support like here. The modern way 56 * to implement DRM mmap support is with an mmap offset ioctl (like 57 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. 58 * That way debug tooling like valgrind will understand what's going on, hiding 59 * the mmap call in a driver private ioctl will break that. The i915 driver only 60 * does cpu mmaps this way because we didn't know better. 61 */ 62 int 63 i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 64 struct drm_file *file) 65 { 66 struct drm_i915_private *i915 = to_i915(dev); 67 struct drm_i915_gem_mmap *args = data; 68 struct drm_i915_gem_object *obj; 69 vaddr_t addr; 70 vsize_t size; 71 int ret; 72 73 #ifdef __OpenBSD__ 74 if (args->size == 0 || args->offset & PAGE_MASK) 75 return -EINVAL; 76 size = round_page(args->size); 77 if (args->offset + size < args->offset) 78 return -EINVAL; 79 #endif 80 81 /* 82 * mmap ioctl is disallowed for all discrete platforms, 83 * and for all platforms with GRAPHICS_VER > 12. 84 */ 85 if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0)) 86 return -EOPNOTSUPP; 87 88 if (args->flags & ~(I915_MMAP_WC)) 89 return -EINVAL; 90 91 if (args->flags & I915_MMAP_WC && !pat_enabled()) 92 return -ENODEV; 93 94 obj = i915_gem_object_lookup(file, args->handle); 95 if (!obj) 96 return -ENOENT; 97 98 /* prime objects have no backing filp to GEM mmap 99 * pages from. 100 */ 101 #ifdef __linux__ 102 if (!obj->base.filp) { 103 addr = -ENXIO; 104 goto err; 105 } 106 #else 107 if (!obj->base.uao) { 108 addr = -ENXIO; 109 goto err; 110 } 111 #endif 112 113 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { 114 addr = -EINVAL; 115 goto err; 116 } 117 118 #ifdef __linux__ 119 addr = vm_mmap(obj->base.filp, 0, args->size, 120 PROT_READ | PROT_WRITE, MAP_SHARED, 121 args->offset); 122 if (IS_ERR_VALUE(addr)) 123 goto err; 124 125 if (args->flags & I915_MMAP_WC) { 126 struct mm_struct *mm = current->mm; 127 struct vm_area_struct *vma; 128 129 if (mmap_write_lock_killable(mm)) { 130 addr = -EINTR; 131 goto err; 132 } 133 vma = find_vma(mm, addr); 134 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) 135 vma->vm_page_prot = 136 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 137 else 138 addr = -ENOMEM; 139 mmap_write_unlock(mm); 140 if (IS_ERR_VALUE(addr)) 141 goto err; 142 } 143 i915_gem_object_put(obj); 144 #else 145 addr = 0; 146 uao_reference(obj->base.uao); 147 ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, size, 148 obj->base.uao, args->offset, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, 149 PROT_READ | PROT_WRITE, MAP_INHERIT_SHARE, MADV_RANDOM, 150 (args->flags & I915_MMAP_WC) ? UVM_FLAG_WC : 0)); 151 if (ret != 0) 152 uao_detach(obj->base.uao); 153 i915_gem_object_put(obj); 154 if (ret) 155 return ret; 156 #endif 157 158 args->addr_ptr = (u64)addr; 159 return 0; 160 161 err: 162 i915_gem_object_put(obj); 163 return addr; 164 } 165 166 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) 167 { 168 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT; 169 } 170 171 /** 172 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps 173 * 174 * A history of the GTT mmap interface: 175 * 176 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to 177 * aligned and suitable for fencing, and still fit into the available 178 * mappable space left by the pinned display objects. A classic problem 179 * we called the page-fault-of-doom where we would ping-pong between 180 * two objects that could not fit inside the GTT and so the memcpy 181 * would page one object in at the expense of the other between every 182 * single byte. 183 * 184 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none 185 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the 186 * object is too large for the available space (or simply too large 187 * for the mappable aperture!), a view is created instead and faulted 188 * into userspace. (This view is aligned and sized appropriately for 189 * fenced access.) 190 * 191 * 2 - Recognise WC as a separate cache domain so that we can flush the 192 * delayed writes via GTT before performing direct access via WC. 193 * 194 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial 195 * pagefault; swapin remains transparent. 196 * 197 * 4 - Support multiple fault handlers per object depending on object's 198 * backing storage (a.k.a. MMAP_OFFSET). 199 * 200 * Restrictions: 201 * 202 * * snoopable objects cannot be accessed via the GTT. It can cause machine 203 * hangs on some architectures, corruption on others. An attempt to service 204 * a GTT page fault from a snoopable object will generate a SIGBUS. 205 * 206 * * the object must be able to fit into RAM (physical memory, though no 207 * limited to the mappable aperture). 208 * 209 * 210 * Caveats: 211 * 212 * * a new GTT page fault will synchronize rendering from the GPU and flush 213 * all data to system memory. Subsequent access will not be synchronized. 214 * 215 * * all mappings are revoked on runtime device suspend. 216 * 217 * * there are only 8, 16 or 32 fence registers to share between all users 218 * (older machines require fence register for display and blitter access 219 * as well). Contention of the fence registers will cause the previous users 220 * to be unmapped and any new access will generate new page faults. 221 * 222 * * running out of memory while servicing a fault may generate a SIGBUS, 223 * rather than the expected SIGSEGV. 224 */ 225 int i915_gem_mmap_gtt_version(void) 226 { 227 return 4; 228 } 229 230 static inline struct i915_gtt_view 231 compute_partial_view(const struct drm_i915_gem_object *obj, 232 pgoff_t page_offset, 233 unsigned int chunk) 234 { 235 struct i915_gtt_view view; 236 237 if (i915_gem_object_is_tiled(obj)) 238 chunk = roundup(chunk, tile_row_pages(obj) ?: 1); 239 240 view.type = I915_GTT_VIEW_PARTIAL; 241 view.partial.offset = rounddown(page_offset, chunk); 242 view.partial.size = 243 min_t(unsigned int, chunk, 244 (obj->base.size >> PAGE_SHIFT) - view.partial.offset); 245 246 /* If the partial covers the entire object, just create a normal VMA. */ 247 if (chunk >= obj->base.size >> PAGE_SHIFT) 248 view.type = I915_GTT_VIEW_NORMAL; 249 250 return view; 251 } 252 253 #ifdef __linux__ 254 255 static vm_fault_t i915_error_to_vmf_fault(int err) 256 { 257 switch (err) { 258 default: 259 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); 260 fallthrough; 261 case -EIO: /* shmemfs failure from swap device */ 262 case -EFAULT: /* purged object */ 263 case -ENODEV: /* bad object, how did you get here! */ 264 case -ENXIO: /* unable to access backing store (on device) */ 265 return VM_FAULT_SIGBUS; 266 267 case -ENOMEM: /* our allocation failure */ 268 return VM_FAULT_OOM; 269 270 case 0: 271 case -EAGAIN: 272 case -ENOSPC: /* transient failure to evict? */ 273 case -ENOBUFS: /* temporarily out of fences? */ 274 case -ERESTARTSYS: 275 case -EINTR: 276 case -EBUSY: 277 /* 278 * EBUSY is ok: this just means that another thread 279 * already did the job. 280 */ 281 return VM_FAULT_NOPAGE; 282 } 283 } 284 285 static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) 286 { 287 struct vm_area_struct *area = vmf->vma; 288 struct i915_mmap_offset *mmo = area->vm_private_data; 289 struct drm_i915_gem_object *obj = mmo->obj; 290 resource_size_t iomap; 291 int err; 292 293 /* Sanity check that we allow writing into this object */ 294 if (unlikely(i915_gem_object_is_readonly(obj) && 295 area->vm_flags & VM_WRITE)) 296 return VM_FAULT_SIGBUS; 297 298 if (i915_gem_object_lock_interruptible(obj, NULL)) 299 return VM_FAULT_NOPAGE; 300 301 err = i915_gem_object_pin_pages(obj); 302 if (err) 303 goto out; 304 305 iomap = -1; 306 if (!i915_gem_object_has_struct_page(obj)) { 307 iomap = obj->mm.region->iomap.base; 308 iomap -= obj->mm.region->region.start; 309 } 310 311 /* PTEs are revoked in obj->ops->put_pages() */ 312 err = remap_io_sg(area, 313 area->vm_start, area->vm_end - area->vm_start, 314 obj->mm.pages->sgl, iomap); 315 316 if (area->vm_flags & VM_WRITE) { 317 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 318 obj->mm.dirty = true; 319 } 320 321 i915_gem_object_unpin_pages(obj); 322 323 out: 324 i915_gem_object_unlock(obj); 325 return i915_error_to_vmf_fault(err); 326 } 327 328 static void set_address_limits(struct vm_area_struct *area, 329 struct i915_vma *vma, 330 unsigned long obj_offset, 331 unsigned long *start_vaddr, 332 unsigned long *end_vaddr) 333 { 334 unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ 335 long start, end; /* memory boundaries */ 336 337 /* 338 * Let's move into the ">> PAGE_SHIFT" 339 * domain to be sure not to lose bits 340 */ 341 vm_start = area->vm_start >> PAGE_SHIFT; 342 vm_end = area->vm_end >> PAGE_SHIFT; 343 vma_size = vma->size >> PAGE_SHIFT; 344 345 /* 346 * Calculate the memory boundaries by considering the offset 347 * provided by the user during memory mapping and the offset 348 * provided for the partial mapping. 349 */ 350 start = vm_start; 351 start -= obj_offset; 352 start += vma->gtt_view.partial.offset; 353 end = start + vma_size; 354 355 start = max_t(long, start, vm_start); 356 end = min_t(long, end, vm_end); 357 358 /* Let's move back into the "<< PAGE_SHIFT" domain */ 359 *start_vaddr = (unsigned long)start << PAGE_SHIFT; 360 *end_vaddr = (unsigned long)end << PAGE_SHIFT; 361 } 362 363 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) 364 { 365 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) 366 struct vm_area_struct *area = vmf->vma; 367 struct i915_mmap_offset *mmo = area->vm_private_data; 368 struct drm_i915_gem_object *obj = mmo->obj; 369 struct drm_device *dev = obj->base.dev; 370 struct drm_i915_private *i915 = to_i915(dev); 371 struct intel_runtime_pm *rpm = &i915->runtime_pm; 372 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 373 bool write = area->vm_flags & VM_WRITE; 374 struct i915_gem_ww_ctx ww; 375 unsigned long obj_offset; 376 unsigned long start, end; /* memory boundaries */ 377 intel_wakeref_t wakeref; 378 struct i915_vma *vma; 379 pgoff_t page_offset; 380 unsigned long pfn; 381 int srcu; 382 int ret; 383 384 obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); 385 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; 386 page_offset += obj_offset; 387 388 trace_i915_gem_object_fault(obj, page_offset, true, write); 389 390 wakeref = intel_runtime_pm_get(rpm); 391 392 i915_gem_ww_ctx_init(&ww, true); 393 retry: 394 ret = i915_gem_object_lock(obj, &ww); 395 if (ret) 396 goto err_rpm; 397 398 /* Sanity check that we allow writing into this object */ 399 if (i915_gem_object_is_readonly(obj) && write) { 400 ret = -EFAULT; 401 goto err_rpm; 402 } 403 404 ret = i915_gem_object_pin_pages(obj); 405 if (ret) 406 goto err_rpm; 407 408 ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu); 409 if (ret) 410 goto err_pages; 411 412 /* Now pin it into the GTT as needed */ 413 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, 414 PIN_MAPPABLE | 415 PIN_NONBLOCK /* NOWARN */ | 416 PIN_NOEVICT); 417 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 418 /* Use a partial view if it is bigger than available space */ 419 struct i915_gtt_view view = 420 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 421 unsigned int flags; 422 423 flags = PIN_MAPPABLE | PIN_NOSEARCH; 424 if (view.type == I915_GTT_VIEW_NORMAL) 425 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ 426 427 /* 428 * Userspace is now writing through an untracked VMA, abandon 429 * all hope that the hardware is able to track future writes. 430 */ 431 432 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 433 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 434 flags = PIN_MAPPABLE; 435 view.type = I915_GTT_VIEW_PARTIAL; 436 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 437 } 438 439 /* 440 * The entire mappable GGTT is pinned? Unexpected! 441 * Try to evict the object we locked too, as normally we skip it 442 * due to lack of short term pinning inside execbuf. 443 */ 444 if (vma == ERR_PTR(-ENOSPC)) { 445 ret = mutex_lock_interruptible(&ggtt->vm.mutex); 446 if (!ret) { 447 ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); 448 mutex_unlock(&ggtt->vm.mutex); 449 } 450 if (ret) 451 goto err_reset; 452 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 453 } 454 } 455 if (IS_ERR(vma)) { 456 ret = PTR_ERR(vma); 457 goto err_reset; 458 } 459 460 /* Access to snoopable pages through the GTT is incoherent. */ 461 /* 462 * For objects created by userspace through GEM_CREATE with pat_index 463 * set by set_pat extension, coherency is managed by userspace, make 464 * sure we don't fail handling the vm fault by calling 465 * i915_gem_object_has_cache_level() which always return true for such 466 * objects. Otherwise this helper function would fall back to checking 467 * whether the object is un-cached. 468 */ 469 if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) || 470 HAS_LLC(i915))) { 471 ret = -EFAULT; 472 goto err_unpin; 473 } 474 475 ret = i915_vma_pin_fence(vma); 476 if (ret) 477 goto err_unpin; 478 479 set_address_limits(area, vma, obj_offset, &start, &end); 480 481 pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; 482 pfn += (start - area->vm_start) >> PAGE_SHIFT; 483 pfn += obj_offset - vma->gtt_view.partial.offset; 484 485 /* Finally, remap it using the new GTT offset */ 486 ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap); 487 if (ret) 488 goto err_fence; 489 490 assert_rpm_wakelock_held(rpm); 491 492 /* Mark as being mmapped into userspace for later revocation */ 493 mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 494 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) 495 list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list); 496 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 497 498 /* Track the mmo associated with the fenced vma */ 499 vma->mmo = mmo; 500 501 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) 502 intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 503 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); 504 505 if (write) { 506 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 507 i915_vma_set_ggtt_write(vma); 508 obj->mm.dirty = true; 509 } 510 511 err_fence: 512 i915_vma_unpin_fence(vma); 513 err_unpin: 514 __i915_vma_unpin(vma); 515 err_reset: 516 intel_gt_reset_unlock(ggtt->vm.gt, srcu); 517 err_pages: 518 i915_gem_object_unpin_pages(obj); 519 err_rpm: 520 if (ret == -EDEADLK) { 521 ret = i915_gem_ww_ctx_backoff(&ww); 522 if (!ret) 523 goto retry; 524 } 525 i915_gem_ww_ctx_fini(&ww); 526 intel_runtime_pm_put(rpm, wakeref); 527 return i915_error_to_vmf_fault(ret); 528 } 529 530 static int 531 vm_access(struct vm_area_struct *area, unsigned long addr, 532 void *buf, int len, int write) 533 { 534 struct i915_mmap_offset *mmo = area->vm_private_data; 535 struct drm_i915_gem_object *obj = mmo->obj; 536 struct i915_gem_ww_ctx ww; 537 void *vaddr; 538 int err = 0; 539 540 if (i915_gem_object_is_readonly(obj) && write) 541 return -EACCES; 542 543 addr -= area->vm_start; 544 if (range_overflows_t(u64, addr, len, obj->base.size)) 545 return -EINVAL; 546 547 i915_gem_ww_ctx_init(&ww, true); 548 retry: 549 err = i915_gem_object_lock(obj, &ww); 550 if (err) 551 goto out; 552 553 /* As this is primarily for debugging, let's focus on simplicity */ 554 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC); 555 if (IS_ERR(vaddr)) { 556 err = PTR_ERR(vaddr); 557 goto out; 558 } 559 560 if (write) { 561 memcpy(vaddr + addr, buf, len); 562 __i915_gem_object_flush_map(obj, addr, len); 563 } else { 564 memcpy(buf, vaddr + addr, len); 565 } 566 567 i915_gem_object_unpin_map(obj); 568 out: 569 if (err == -EDEADLK) { 570 err = i915_gem_ww_ctx_backoff(&ww); 571 if (!err) 572 goto retry; 573 } 574 i915_gem_ww_ctx_fini(&ww); 575 576 if (err) 577 return err; 578 579 return len; 580 } 581 582 #else /* !__linux__ */ 583 584 static int i915_error_to_vmf_fault(int err) 585 { 586 switch (err) { 587 default: 588 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err); 589 fallthrough; 590 case -EIO: /* shmemfs failure from swap device */ 591 case -EFAULT: /* purged object */ 592 case -ENODEV: /* bad object, how did you get here! */ 593 case -ENXIO: /* unable to access backing store (on device) */ 594 return EACCES; 595 596 case -ENOMEM: /* our allocation failure */ 597 return EACCES; /* XXX */ 598 599 case 0: 600 case -EAGAIN: 601 case -ENOSPC: /* transient failure to evict? */ 602 case -ENOBUFS: /* temporarily out of fences? */ 603 case -ERESTART: 604 case -EINTR: 605 case -EBUSY: 606 /* 607 * EBUSY is ok: this just means that another thread 608 * already did the job. 609 */ 610 return 0; 611 } 612 } 613 614 static int 615 vm_fault_cpu(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi, 616 vm_prot_t access_type) 617 { 618 struct vm_map_entry *entry = ufi->entry; 619 struct drm_i915_gem_object *obj = mmo->obj; 620 int write = !!(access_type & PROT_WRITE); 621 struct sg_table *pages; 622 struct sg_page_iter sg_iter; 623 vm_prot_t mapprot; 624 vaddr_t va = entry->start; 625 paddr_t pa, pa_flags = 0; 626 int flags; 627 int err; 628 629 /* Sanity check that we allow writing into this object */ 630 if (unlikely(i915_gem_object_is_readonly(obj) && write)) { 631 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); 632 return EACCES; 633 } 634 635 if (i915_gem_object_lock_interruptible(obj, NULL)) 636 return EACCES; 637 638 err = i915_gem_object_pin_pages(obj); 639 if (err) 640 goto out; 641 642 flags = mapprot = entry->protection; 643 if (write == 0) 644 flags &= ~PROT_WRITE; 645 646 switch (mmo->mmap_type) { 647 case I915_MMAP_TYPE_WC: 648 pa_flags |= PMAP_WC; 649 break; 650 case I915_MMAP_TYPE_UC: 651 pa_flags |= PMAP_NOCACHE; 652 break; 653 default: 654 break; 655 } 656 657 pages = obj->mm.pages; 658 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { 659 pa = sg_page_iter_dma_address(&sg_iter); 660 if (pmap_enter(ufi->orig_map->pmap, va, pa | pa_flags, 661 mapprot, PMAP_CANFAIL | flags)) { 662 err = -ENOMEM; 663 break; 664 } 665 va += PAGE_SIZE; 666 } 667 pmap_update(ufi->orig_map->pmap); 668 669 if (write) { 670 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 671 obj->mm.dirty = true; 672 } 673 674 i915_gem_object_unpin_pages(obj); 675 676 out: 677 i915_gem_object_unlock(obj); 678 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); 679 return i915_error_to_vmf_fault(err); 680 } 681 682 int 683 remap_io_mapping(pmap_t pm, vm_prot_t mapprot, 684 vaddr_t va, unsigned long pfn, unsigned long size) 685 { 686 vaddr_t end = va + size; 687 paddr_t pa = ptoa(pfn); 688 689 while (va < end) { 690 if (pmap_enter(pm, va, pa | PMAP_WC, mapprot, PMAP_CANFAIL | mapprot)) 691 return -ENOMEM; 692 va += PAGE_SIZE; 693 pa += PAGE_SIZE; 694 } 695 696 return 0; 697 } 698 699 static void set_address_limits(struct vm_map_entry *entry, 700 struct i915_vma *vma, 701 unsigned long obj_offset, 702 unsigned long *start_vaddr, 703 unsigned long *end_vaddr) 704 { 705 unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */ 706 long start, end; /* memory boundaries */ 707 708 /* 709 * Let's move into the ">> PAGE_SHIFT" 710 * domain to be sure not to lose bits 711 */ 712 vm_start = entry->start >> PAGE_SHIFT; 713 vm_end = entry->end >> PAGE_SHIFT; 714 vma_size = vma->size >> PAGE_SHIFT; 715 716 /* 717 * Calculate the memory boundaries by considering the offset 718 * provided by the user during memory mapping and the offset 719 * provided for the partial mapping. 720 */ 721 start = vm_start; 722 start -= obj_offset; 723 start += vma->gtt_view.partial.offset; 724 end = start + vma_size; 725 726 start = max_t(long, start, vm_start); 727 end = min_t(long, end, vm_end); 728 729 /* Let's move back into the "<< PAGE_SHIFT" domain */ 730 *start_vaddr = (unsigned long)start << PAGE_SHIFT; 731 *end_vaddr = (unsigned long)end << PAGE_SHIFT; 732 } 733 734 static int 735 vm_fault_gtt(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi, 736 vaddr_t vaddr, vm_prot_t access_type) 737 { 738 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) 739 struct vm_map_entry *entry = ufi->entry; 740 struct drm_i915_gem_object *obj = mmo->obj; 741 struct drm_device *dev = obj->base.dev; 742 struct drm_i915_private *i915 = to_i915(dev); 743 struct intel_runtime_pm *rpm = &i915->runtime_pm; 744 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 745 int write = !!(access_type & PROT_WRITE); 746 struct i915_gem_ww_ctx ww; 747 unsigned long obj_offset; 748 unsigned long start, end; /* memory boundaries */ 749 intel_wakeref_t wakeref; 750 struct i915_vma *vma; 751 pgoff_t page_offset; 752 unsigned long pfn; 753 int srcu; 754 int ret; 755 756 obj_offset = (entry->offset >> PAGE_SHIFT) - drm_vma_node_start(&mmo->vma_node); 757 page_offset = (vaddr - entry->start) >> PAGE_SHIFT; 758 page_offset += obj_offset; 759 760 trace_i915_gem_object_fault(obj, page_offset, true, write); 761 762 wakeref = intel_runtime_pm_get(rpm); 763 764 i915_gem_ww_ctx_init(&ww, true); 765 retry: 766 ret = i915_gem_object_lock(obj, &ww); 767 if (ret) 768 goto err_rpm; 769 770 /* Sanity check that we allow writing into this object */ 771 if (i915_gem_object_is_readonly(obj) && write) { 772 ret = -EFAULT; 773 goto err_rpm; 774 } 775 776 ret = i915_gem_object_pin_pages(obj); 777 if (ret) 778 goto err_rpm; 779 780 ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu); 781 if (ret) 782 goto err_pages; 783 784 /* Now pin it into the GTT as needed */ 785 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0, 786 PIN_MAPPABLE | 787 PIN_NONBLOCK /* NOWARN */ | 788 PIN_NOEVICT); 789 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 790 /* Use a partial view if it is bigger than available space */ 791 struct i915_gtt_view view = 792 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES); 793 unsigned int flags; 794 795 flags = PIN_MAPPABLE | PIN_NOSEARCH; 796 if (view.type == I915_GTT_VIEW_NORMAL) 797 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */ 798 799 /* 800 * Userspace is now writing through an untracked VMA, abandon 801 * all hope that the hardware is able to track future writes. 802 */ 803 804 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 805 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) { 806 flags = PIN_MAPPABLE; 807 view.type = I915_GTT_VIEW_PARTIAL; 808 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 809 } 810 811 /* 812 * The entire mappable GGTT is pinned? Unexpected! 813 * Try to evict the object we locked too, as normally we skip it 814 * due to lack of short term pinning inside execbuf. 815 */ 816 if (vma == ERR_PTR(-ENOSPC)) { 817 ret = mutex_lock_interruptible(&ggtt->vm.mutex); 818 if (!ret) { 819 ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL); 820 mutex_unlock(&ggtt->vm.mutex); 821 } 822 if (ret) 823 goto err_reset; 824 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags); 825 } 826 } 827 if (IS_ERR(vma)) { 828 ret = PTR_ERR(vma); 829 goto err_reset; 830 } 831 832 /* Access to snoopable pages through the GTT is incoherent. */ 833 /* 834 * For objects created by userspace through GEM_CREATE with pat_index 835 * set by set_pat extension, coherency is managed by userspace, make 836 * sure we don't fail handling the vm fault by calling 837 * i915_gem_object_has_cache_level() which always return true for such 838 * objects. Otherwise this helper function would fall back to checking 839 * whether the object is un-cached. 840 */ 841 if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) || 842 HAS_LLC(i915))) { 843 ret = -EFAULT; 844 goto err_unpin; 845 } 846 847 ret = i915_vma_pin_fence(vma); 848 if (ret) 849 goto err_unpin; 850 851 set_address_limits(entry, vma, obj_offset, &start, &end); 852 853 pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT; 854 pfn += (start - entry->start) >> PAGE_SHIFT; 855 pfn += obj_offset - vma->gtt_view.partial.offset; 856 857 /* Finally, remap it using the new GTT offset */ 858 ret = remap_io_mapping(ufi->orig_map->pmap, entry->protection, 859 start, pfn, end - start); 860 if (ret) 861 goto err_fence; 862 863 assert_rpm_wakelock_held(rpm); 864 865 /* Mark as being mmapped into userspace for later revocation */ 866 mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 867 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) 868 list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list); 869 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 870 871 /* Track the mmo associated with the fenced vma */ 872 vma->mmo = mmo; 873 874 if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) 875 intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 876 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); 877 878 if (write) { 879 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 880 i915_vma_set_ggtt_write(vma); 881 obj->mm.dirty = true; 882 } 883 884 err_fence: 885 i915_vma_unpin_fence(vma); 886 err_unpin: 887 __i915_vma_unpin(vma); 888 err_reset: 889 intel_gt_reset_unlock(ggtt->vm.gt, srcu); 890 err_pages: 891 i915_gem_object_unpin_pages(obj); 892 err_rpm: 893 if (ret == -EDEADLK) { 894 ret = i915_gem_ww_ctx_backoff(&ww); 895 if (!ret) 896 goto retry; 897 } 898 i915_gem_ww_ctx_fini(&ww); 899 intel_runtime_pm_put(rpm, wakeref); 900 uvmfault_unlockall(ufi, NULL, &obj->base.uobj); 901 return i915_error_to_vmf_fault(ret); 902 } 903 904 int 905 i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi, 906 off_t offset, vaddr_t vaddr, vm_page_t *pps, int npages, int centeridx, 907 vm_prot_t access_type, int flags) 908 { 909 struct drm_vma_offset_node *node; 910 struct drm_device *dev = gem_obj->dev; 911 struct vm_map_entry *entry = ufi->entry; 912 vsize_t size = entry->end - entry->start; 913 struct i915_mmap_offset *mmo = NULL; 914 915 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 916 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 917 entry->offset >> PAGE_SHIFT, 918 size >> PAGE_SHIFT); 919 if (likely(node)) 920 mmo = container_of(node, struct i915_mmap_offset, vma_node); 921 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 922 if (!mmo) { 923 uvmfault_unlockall(ufi, NULL, &gem_obj->uobj); 924 return EACCES; 925 } 926 927 KASSERT(gem_obj == &mmo->obj->base); 928 929 if (mmo->mmap_type == I915_MMAP_TYPE_GTT) 930 return vm_fault_gtt(mmo, ufi, vaddr, access_type); 931 932 return vm_fault_cpu(mmo, ufi, access_type); 933 } 934 935 #endif /* !__linux__ */ 936 937 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) 938 { 939 struct i915_vma *vma; 940 941 GEM_BUG_ON(!obj->userfault_count); 942 943 for_each_ggtt_vma(vma, obj) 944 i915_vma_revoke_mmap(vma); 945 946 GEM_BUG_ON(obj->userfault_count); 947 } 948 949 /* 950 * It is vital that we remove the page mapping if we have mapped a tiled 951 * object through the GTT and then lose the fence register due to 952 * resource pressure. Similarly if the object has been moved out of the 953 * aperture, than pages mapped into userspace must be revoked. Removing the 954 * mapping will then trigger a page fault on the next user access, allowing 955 * fixup by vm_fault_gtt(). 956 */ 957 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) 958 { 959 struct drm_i915_private *i915 = to_i915(obj->base.dev); 960 intel_wakeref_t wakeref; 961 962 /* 963 * Serialisation between user GTT access and our code depends upon 964 * revoking the CPU's PTE whilst the mutex is held. The next user 965 * pagefault then has to wait until we release the mutex. 966 * 967 * Note that RPM complicates somewhat by adding an additional 968 * requirement that operations to the GGTT be made holding the RPM 969 * wakeref. 970 */ 971 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 972 mutex_lock(&to_gt(i915)->ggtt->vm.mutex); 973 974 if (!obj->userfault_count) 975 goto out; 976 977 __i915_gem_object_release_mmap_gtt(obj); 978 979 /* 980 * Ensure that the CPU's PTE are revoked and there are not outstanding 981 * memory transactions from userspace before we return. The TLB 982 * flushing implied above by changing the PTE above *should* be 983 * sufficient, an extra barrier here just provides us with a bit 984 * of paranoid documentation about our requirement to serialise 985 * memory writes before touching registers / GSM. 986 */ 987 wmb(); 988 989 out: 990 mutex_unlock(&to_gt(i915)->ggtt->vm.mutex); 991 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 992 } 993 994 void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj) 995 { 996 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); 997 struct ttm_device *bdev = bo->bdev; 998 999 #ifdef __linux__ 1000 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping); 1001 #endif 1002 1003 /* 1004 * We have exclusive access here via runtime suspend. All other callers 1005 * must first grab the rpm wakeref. 1006 */ 1007 GEM_BUG_ON(!obj->userfault_count); 1008 list_del(&obj->userfault_link); 1009 obj->userfault_count = 0; 1010 } 1011 1012 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) 1013 { 1014 struct i915_mmap_offset *mmo, *mn; 1015 1016 if (obj->ops->unmap_virtual) 1017 obj->ops->unmap_virtual(obj); 1018 1019 spin_lock(&obj->mmo.lock); 1020 rbtree_postorder_for_each_entry_safe(mmo, mn, 1021 &obj->mmo.offsets, offset) { 1022 /* 1023 * vma_node_unmap for GTT mmaps handled already in 1024 * __i915_gem_object_release_mmap_gtt 1025 */ 1026 if (mmo->mmap_type == I915_MMAP_TYPE_GTT) 1027 continue; 1028 1029 spin_unlock(&obj->mmo.lock); 1030 #ifdef __linux__ 1031 drm_vma_node_unmap(&mmo->vma_node, 1032 obj->base.dev->anon_inode->i_mapping); 1033 #endif 1034 spin_lock(&obj->mmo.lock); 1035 } 1036 spin_unlock(&obj->mmo.lock); 1037 } 1038 1039 static struct i915_mmap_offset * 1040 lookup_mmo(struct drm_i915_gem_object *obj, 1041 enum i915_mmap_type mmap_type) 1042 { 1043 struct rb_node *rb; 1044 1045 spin_lock(&obj->mmo.lock); 1046 rb = obj->mmo.offsets.rb_node; 1047 while (rb) { 1048 struct i915_mmap_offset *mmo = 1049 rb_entry(rb, typeof(*mmo), offset); 1050 1051 if (mmo->mmap_type == mmap_type) { 1052 spin_unlock(&obj->mmo.lock); 1053 return mmo; 1054 } 1055 1056 if (mmo->mmap_type < mmap_type) 1057 rb = rb->rb_right; 1058 else 1059 rb = rb->rb_left; 1060 } 1061 spin_unlock(&obj->mmo.lock); 1062 1063 return NULL; 1064 } 1065 1066 static struct i915_mmap_offset * 1067 insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo) 1068 { 1069 struct rb_node *rb, **p; 1070 1071 spin_lock(&obj->mmo.lock); 1072 rb = NULL; 1073 p = &obj->mmo.offsets.rb_node; 1074 while (*p) { 1075 struct i915_mmap_offset *pos; 1076 1077 rb = *p; 1078 pos = rb_entry(rb, typeof(*pos), offset); 1079 1080 if (pos->mmap_type == mmo->mmap_type) { 1081 spin_unlock(&obj->mmo.lock); 1082 drm_vma_offset_remove(obj->base.dev->vma_offset_manager, 1083 &mmo->vma_node); 1084 kfree(mmo); 1085 return pos; 1086 } 1087 1088 if (pos->mmap_type < mmo->mmap_type) 1089 p = &rb->rb_right; 1090 else 1091 p = &rb->rb_left; 1092 } 1093 rb_link_node(&mmo->offset, rb, p); 1094 rb_insert_color(&mmo->offset, &obj->mmo.offsets); 1095 spin_unlock(&obj->mmo.lock); 1096 1097 return mmo; 1098 } 1099 1100 static struct i915_mmap_offset * 1101 mmap_offset_attach(struct drm_i915_gem_object *obj, 1102 enum i915_mmap_type mmap_type, 1103 struct drm_file *file) 1104 { 1105 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1106 struct i915_mmap_offset *mmo; 1107 int err; 1108 1109 GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops); 1110 1111 mmo = lookup_mmo(obj, mmap_type); 1112 if (mmo) 1113 goto out; 1114 1115 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL); 1116 if (!mmo) 1117 return ERR_PTR(-ENOMEM); 1118 1119 mmo->obj = obj; 1120 mmo->mmap_type = mmap_type; 1121 drm_vma_node_reset(&mmo->vma_node); 1122 1123 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, 1124 &mmo->vma_node, obj->base.size / PAGE_SIZE); 1125 if (likely(!err)) 1126 goto insert; 1127 1128 /* Attempt to reap some mmap space from dead objects */ 1129 err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT, 1130 NULL); 1131 if (err) 1132 goto err; 1133 1134 i915_gem_drain_freed_objects(i915); 1135 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager, 1136 &mmo->vma_node, obj->base.size / PAGE_SIZE); 1137 if (err) 1138 goto err; 1139 1140 insert: 1141 mmo = insert_mmo(obj, mmo); 1142 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo); 1143 out: 1144 if (file) 1145 drm_vma_node_allow_once(&mmo->vma_node, file); 1146 return mmo; 1147 1148 err: 1149 kfree(mmo); 1150 return ERR_PTR(err); 1151 } 1152 1153 static int 1154 __assign_mmap_offset(struct drm_i915_gem_object *obj, 1155 enum i915_mmap_type mmap_type, 1156 u64 *offset, struct drm_file *file) 1157 { 1158 struct i915_mmap_offset *mmo; 1159 1160 if (i915_gem_object_never_mmap(obj)) 1161 return -ENODEV; 1162 1163 if (obj->ops->mmap_offset) { 1164 if (mmap_type != I915_MMAP_TYPE_FIXED) 1165 return -ENODEV; 1166 1167 *offset = obj->ops->mmap_offset(obj); 1168 return 0; 1169 } 1170 1171 if (mmap_type == I915_MMAP_TYPE_FIXED) 1172 return -ENODEV; 1173 1174 if (mmap_type != I915_MMAP_TYPE_GTT && 1175 !i915_gem_object_has_struct_page(obj) && 1176 !i915_gem_object_has_iomem(obj)) 1177 return -ENODEV; 1178 1179 mmo = mmap_offset_attach(obj, mmap_type, file); 1180 if (IS_ERR(mmo)) 1181 return PTR_ERR(mmo); 1182 1183 *offset = drm_vma_node_offset_addr(&mmo->vma_node); 1184 return 0; 1185 } 1186 1187 static int 1188 __assign_mmap_offset_handle(struct drm_file *file, 1189 u32 handle, 1190 enum i915_mmap_type mmap_type, 1191 u64 *offset) 1192 { 1193 struct drm_i915_gem_object *obj; 1194 int err; 1195 1196 obj = i915_gem_object_lookup(file, handle); 1197 if (!obj) 1198 return -ENOENT; 1199 1200 err = i915_gem_object_lock_interruptible(obj, NULL); 1201 if (err) 1202 goto out_put; 1203 err = __assign_mmap_offset(obj, mmap_type, offset, file); 1204 i915_gem_object_unlock(obj); 1205 out_put: 1206 i915_gem_object_put(obj); 1207 return err; 1208 } 1209 1210 int 1211 i915_gem_dumb_mmap_offset(struct drm_file *file, 1212 struct drm_device *dev, 1213 u32 handle, 1214 u64 *offset) 1215 { 1216 struct drm_i915_private *i915 = to_i915(dev); 1217 enum i915_mmap_type mmap_type; 1218 1219 if (HAS_LMEM(to_i915(dev))) 1220 mmap_type = I915_MMAP_TYPE_FIXED; 1221 else if (pat_enabled()) 1222 mmap_type = I915_MMAP_TYPE_WC; 1223 else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 1224 return -ENODEV; 1225 else 1226 mmap_type = I915_MMAP_TYPE_GTT; 1227 1228 return __assign_mmap_offset_handle(file, handle, mmap_type, offset); 1229 } 1230 1231 /** 1232 * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing 1233 * @dev: DRM device 1234 * @data: GTT mapping ioctl data 1235 * @file: GEM object info 1236 * 1237 * Simply returns the fake offset to userspace so it can mmap it. 1238 * The mmap call will end up in drm_gem_mmap(), which will set things 1239 * up so we can get faults in the handler above. 1240 * 1241 * The fault handler will take care of binding the object into the GTT 1242 * (since it may have been evicted to make room for something), allocating 1243 * a fence register, and mapping the appropriate aperture address into 1244 * userspace. 1245 */ 1246 int 1247 i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data, 1248 struct drm_file *file) 1249 { 1250 struct drm_i915_private *i915 = to_i915(dev); 1251 struct drm_i915_gem_mmap_offset *args = data; 1252 enum i915_mmap_type type; 1253 int err; 1254 1255 /* 1256 * Historically we failed to check args.pad and args.offset 1257 * and so we cannot use those fields for user input and we cannot 1258 * add -EINVAL for them as the ABI is fixed, i.e. old userspace 1259 * may be feeding in garbage in those fields. 1260 * 1261 * if (args->pad) return -EINVAL; is verbotten! 1262 */ 1263 1264 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 1265 NULL, 0, NULL); 1266 if (err) 1267 return err; 1268 1269 switch (args->flags) { 1270 case I915_MMAP_OFFSET_GTT: 1271 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) 1272 return -ENODEV; 1273 type = I915_MMAP_TYPE_GTT; 1274 break; 1275 1276 case I915_MMAP_OFFSET_WC: 1277 if (!pat_enabled()) 1278 return -ENODEV; 1279 type = I915_MMAP_TYPE_WC; 1280 break; 1281 1282 case I915_MMAP_OFFSET_WB: 1283 type = I915_MMAP_TYPE_WB; 1284 break; 1285 1286 case I915_MMAP_OFFSET_UC: 1287 if (!pat_enabled()) 1288 return -ENODEV; 1289 type = I915_MMAP_TYPE_UC; 1290 break; 1291 1292 case I915_MMAP_OFFSET_FIXED: 1293 type = I915_MMAP_TYPE_FIXED; 1294 break; 1295 1296 default: 1297 return -EINVAL; 1298 } 1299 1300 return __assign_mmap_offset_handle(file, args->handle, type, &args->offset); 1301 } 1302 1303 #ifdef __linux__ 1304 1305 static void vm_open(struct vm_area_struct *vma) 1306 { 1307 struct i915_mmap_offset *mmo = vma->vm_private_data; 1308 struct drm_i915_gem_object *obj = mmo->obj; 1309 1310 GEM_BUG_ON(!obj); 1311 i915_gem_object_get(obj); 1312 } 1313 1314 static void vm_close(struct vm_area_struct *vma) 1315 { 1316 struct i915_mmap_offset *mmo = vma->vm_private_data; 1317 struct drm_i915_gem_object *obj = mmo->obj; 1318 1319 GEM_BUG_ON(!obj); 1320 i915_gem_object_put(obj); 1321 } 1322 1323 static const struct vm_operations_struct vm_ops_gtt = { 1324 .fault = vm_fault_gtt, 1325 .access = vm_access, 1326 .open = vm_open, 1327 .close = vm_close, 1328 }; 1329 1330 static const struct vm_operations_struct vm_ops_cpu = { 1331 .fault = vm_fault_cpu, 1332 .access = vm_access, 1333 .open = vm_open, 1334 .close = vm_close, 1335 }; 1336 1337 static int singleton_release(struct inode *inode, struct file *file) 1338 { 1339 struct drm_i915_private *i915 = file->private_data; 1340 1341 cmpxchg(&i915->gem.mmap_singleton, file, NULL); 1342 drm_dev_put(&i915->drm); 1343 1344 return 0; 1345 } 1346 1347 static const struct file_operations singleton_fops = { 1348 .owner = THIS_MODULE, 1349 .release = singleton_release, 1350 }; 1351 1352 static struct file *mmap_singleton(struct drm_i915_private *i915) 1353 { 1354 struct file *file; 1355 1356 rcu_read_lock(); 1357 file = READ_ONCE(i915->gem.mmap_singleton); 1358 if (file && !get_file_rcu(file)) 1359 file = NULL; 1360 rcu_read_unlock(); 1361 if (file) 1362 return file; 1363 1364 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR); 1365 if (IS_ERR(file)) 1366 return file; 1367 1368 /* Everyone shares a single global address space */ 1369 file->f_mapping = i915->drm.anon_inode->i_mapping; 1370 1371 smp_store_mb(i915->gem.mmap_singleton, file); 1372 drm_dev_get(&i915->drm); 1373 1374 return file; 1375 } 1376 1377 static int 1378 i915_gem_object_mmap(struct drm_i915_gem_object *obj, 1379 struct i915_mmap_offset *mmo, 1380 struct vm_area_struct *vma) 1381 { 1382 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1383 struct drm_device *dev = &i915->drm; 1384 struct file *anon; 1385 1386 if (i915_gem_object_is_readonly(obj)) { 1387 if (vma->vm_flags & VM_WRITE) { 1388 i915_gem_object_put(obj); 1389 return -EINVAL; 1390 } 1391 vm_flags_clear(vma, VM_MAYWRITE); 1392 } 1393 1394 anon = mmap_singleton(to_i915(dev)); 1395 if (IS_ERR(anon)) { 1396 i915_gem_object_put(obj); 1397 return PTR_ERR(anon); 1398 } 1399 1400 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO); 1401 1402 /* 1403 * We keep the ref on mmo->obj, not vm_file, but we require 1404 * vma->vm_file->f_mapping, see vma_link(), for later revocation. 1405 * Our userspace is accustomed to having per-file resource cleanup 1406 * (i.e. contexts, objects and requests) on their close(fd), which 1407 * requires avoiding extraneous references to their filp, hence why 1408 * we prefer to use an anonymous file for their mmaps. 1409 */ 1410 vma_set_file(vma, anon); 1411 /* Drop the initial creation reference, the vma is now holding one. */ 1412 fput(anon); 1413 1414 if (obj->ops->mmap_ops) { 1415 vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags)); 1416 vma->vm_ops = obj->ops->mmap_ops; 1417 vma->vm_private_data = obj->base.vma_node.driver_private; 1418 return 0; 1419 } 1420 1421 vma->vm_private_data = mmo; 1422 1423 switch (mmo->mmap_type) { 1424 case I915_MMAP_TYPE_WC: 1425 vma->vm_page_prot = 1426 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1427 vma->vm_ops = &vm_ops_cpu; 1428 break; 1429 1430 case I915_MMAP_TYPE_FIXED: 1431 GEM_WARN_ON(1); 1432 fallthrough; 1433 case I915_MMAP_TYPE_WB: 1434 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 1435 vma->vm_ops = &vm_ops_cpu; 1436 break; 1437 1438 case I915_MMAP_TYPE_UC: 1439 vma->vm_page_prot = 1440 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 1441 vma->vm_ops = &vm_ops_cpu; 1442 break; 1443 1444 case I915_MMAP_TYPE_GTT: 1445 vma->vm_page_prot = 1446 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1447 vma->vm_ops = &vm_ops_gtt; 1448 break; 1449 } 1450 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1451 1452 return 0; 1453 } 1454 1455 /* 1456 * This overcomes the limitation in drm_gem_mmap's assignment of a 1457 * drm_gem_object as the vma->vm_private_data. Since we need to 1458 * be able to resolve multiple mmap offsets which could be tied 1459 * to a single gem object. 1460 */ 1461 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1462 { 1463 struct drm_vma_offset_node *node; 1464 struct drm_file *priv = filp->private_data; 1465 struct drm_device *dev = priv->minor->dev; 1466 struct drm_i915_gem_object *obj = NULL; 1467 struct i915_mmap_offset *mmo = NULL; 1468 1469 if (drm_dev_is_unplugged(dev)) 1470 return -ENODEV; 1471 1472 rcu_read_lock(); 1473 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1474 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1475 vma->vm_pgoff, 1476 vma_pages(vma)); 1477 if (node && drm_vma_node_is_allowed(node, priv)) { 1478 /* 1479 * Skip 0-refcnted objects as it is in the process of being 1480 * destroyed and will be invalid when the vma manager lock 1481 * is released. 1482 */ 1483 if (!node->driver_private) { 1484 mmo = container_of(node, struct i915_mmap_offset, vma_node); 1485 obj = i915_gem_object_get_rcu(mmo->obj); 1486 1487 GEM_BUG_ON(obj && obj->ops->mmap_ops); 1488 } else { 1489 obj = i915_gem_object_get_rcu 1490 (container_of(node, struct drm_i915_gem_object, 1491 base.vma_node)); 1492 1493 GEM_BUG_ON(obj && !obj->ops->mmap_ops); 1494 } 1495 } 1496 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1497 rcu_read_unlock(); 1498 if (!obj) 1499 return node ? -EACCES : -EINVAL; 1500 1501 return i915_gem_object_mmap(obj, mmo, vma); 1502 } 1503 1504 #else /* !__linux__ */ 1505 1506 /* 1507 * This overcomes the limitation in drm_gem_mmap's assignment of a 1508 * drm_gem_object as the vma->vm_private_data. Since we need to 1509 * be able to resolve multiple mmap offsets which could be tied 1510 * to a single gem object. 1511 */ 1512 struct uvm_object * 1513 i915_gem_mmap(struct file *filp, vm_prot_t accessprot, 1514 voff_t off, vsize_t size) 1515 { 1516 struct drm_vma_offset_node *node; 1517 struct drm_file *priv = (void *)filp; 1518 struct drm_device *dev = priv->minor->dev; 1519 struct drm_i915_gem_object *obj = NULL; 1520 struct i915_mmap_offset *mmo = NULL; 1521 1522 if (drm_dev_is_unplugged(dev)) 1523 return NULL; 1524 1525 rcu_read_lock(); 1526 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1527 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1528 off >> PAGE_SHIFT, 1529 atop(round_page(size))); 1530 if (node && drm_vma_node_is_allowed(node, priv)) { 1531 /* 1532 * Skip 0-refcnted objects as it is in the process of being 1533 * destroyed and will be invalid when the vma manager lock 1534 * is released. 1535 */ 1536 if (!node->driver_private) { 1537 mmo = container_of(node, struct i915_mmap_offset, vma_node); 1538 obj = i915_gem_object_get_rcu(mmo->obj); 1539 1540 GEM_BUG_ON(obj && obj->ops->mmap_ops); 1541 } else { 1542 obj = i915_gem_object_get_rcu 1543 (container_of(node, struct drm_i915_gem_object, 1544 base.vma_node)); 1545 1546 GEM_BUG_ON(obj && !obj->ops->mmap_ops); 1547 } 1548 } 1549 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1550 rcu_read_unlock(); 1551 if (!obj) 1552 return NULL; 1553 1554 if (i915_gem_object_is_readonly(obj)) { 1555 if (accessprot & PROT_WRITE) { 1556 i915_gem_object_put(obj); 1557 return NULL; 1558 } 1559 } 1560 1561 if (obj->ops->mmap_ops) 1562 uvm_obj_init(&obj->base.uobj, obj->ops->mmap_ops, 1); 1563 1564 return &obj->base.uobj; 1565 } 1566 1567 #endif /* !__linux__ */ 1568 1569 #ifdef notyet 1570 int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma) 1571 { 1572 struct drm_i915_private *i915 = to_i915(obj->base.dev); 1573 struct drm_device *dev = &i915->drm; 1574 struct i915_mmap_offset *mmo = NULL; 1575 enum i915_mmap_type mmap_type; 1576 struct i915_ggtt *ggtt = to_gt(i915)->ggtt; 1577 1578 if (drm_dev_is_unplugged(dev)) 1579 return -ENODEV; 1580 1581 /* handle ttm object */ 1582 if (obj->ops->mmap_ops) { 1583 /* 1584 * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake offset 1585 * to calculate page offset so set that up. 1586 */ 1587 vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node); 1588 } else { 1589 /* handle stolen and smem objects */ 1590 mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC; 1591 mmo = mmap_offset_attach(obj, mmap_type, NULL); 1592 if (IS_ERR(mmo)) 1593 return PTR_ERR(mmo); 1594 1595 vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node); 1596 } 1597 1598 /* 1599 * When we install vm_ops for mmap we are too late for 1600 * the vm_ops->open() which increases the ref_count of 1601 * this obj and then it gets decreased by the vm_ops->close(). 1602 * To balance this increase the obj ref_count here. 1603 */ 1604 obj = i915_gem_object_get(obj); 1605 return i915_gem_object_mmap(obj, mmo, vma); 1606 } 1607 #endif /* notyet */ 1608 1609 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1610 #include "selftests/i915_gem_mman.c" 1611 #endif 1612