1 /* 2 * Copyright © 2012-2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <drm/drmP.h> 26 #include <drm/i915_drm.h> 27 #include "i915_drv.h" 28 #include "i915_trace.h" 29 #include "intel_drv.h" 30 #include <linux/mmu_context.h> 31 #include <linux/mmu_notifier.h> 32 #include <linux/mempolicy.h> 33 #include <linux/swap.h> 34 35 struct i915_mm_struct { 36 struct mm_struct *mm; 37 struct drm_i915_private *i915; 38 struct i915_mmu_notifier *mn; 39 struct hlist_node node; 40 struct kref kref; 41 struct work_struct work; 42 }; 43 44 #if defined(CONFIG_MMU_NOTIFIER) 45 #include <linux/interval_tree.h> 46 47 struct i915_mmu_notifier { 48 spinlock_t lock; 49 struct hlist_node node; 50 struct mmu_notifier mn; 51 struct rb_root objects; 52 struct workqueue_struct *wq; 53 }; 54 55 struct i915_mmu_object { 56 struct i915_mmu_notifier *mn; 57 struct drm_i915_gem_object *obj; 58 struct interval_tree_node it; 59 struct list_head link; 60 struct work_struct work; 61 bool attached; 62 }; 63 64 static void wait_rendering(struct drm_i915_gem_object *obj) 65 { 66 unsigned long active = __I915_BO_ACTIVE(obj); 67 int idx; 68 69 for_each_active(active, idx) 70 i915_gem_active_wait_unlocked(&obj->last_read[idx], 71 false, NULL, NULL); 72 } 73 74 static void cancel_userptr(struct work_struct *work) 75 { 76 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); 77 struct drm_i915_gem_object *obj = mo->obj; 78 struct drm_device *dev = obj->base.dev; 79 80 wait_rendering(obj); 81 82 mutex_lock(&dev->struct_mutex); 83 /* Cancel any active worker and force us to re-evaluate gup */ 84 obj->userptr.work = NULL; 85 86 if (obj->pages != NULL) { 87 /* We are inside a kthread context and can't be interrupted */ 88 WARN_ON(i915_gem_object_unbind(obj)); 89 WARN_ON(i915_gem_object_put_pages(obj)); 90 } 91 92 i915_gem_object_put(obj); 93 mutex_unlock(&dev->struct_mutex); 94 } 95 96 static void add_object(struct i915_mmu_object *mo) 97 { 98 if (mo->attached) 99 return; 100 101 interval_tree_insert(&mo->it, &mo->mn->objects); 102 mo->attached = true; 103 } 104 105 static void del_object(struct i915_mmu_object *mo) 106 { 107 if (!mo->attached) 108 return; 109 110 interval_tree_remove(&mo->it, &mo->mn->objects); 111 mo->attached = false; 112 } 113 114 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, 115 struct mm_struct *mm, 116 unsigned long start, 117 unsigned long end) 118 { 119 struct i915_mmu_notifier *mn = 120 container_of(_mn, struct i915_mmu_notifier, mn); 121 struct i915_mmu_object *mo; 122 struct interval_tree_node *it; 123 LIST_HEAD(cancelled); 124 125 if (RB_EMPTY_ROOT(&mn->objects)) 126 return; 127 128 /* interval ranges are inclusive, but invalidate range is exclusive */ 129 end--; 130 131 spin_lock(&mn->lock); 132 it = interval_tree_iter_first(&mn->objects, start, end); 133 while (it) { 134 /* The mmu_object is released late when destroying the 135 * GEM object so it is entirely possible to gain a 136 * reference on an object in the process of being freed 137 * since our serialisation is via the spinlock and not 138 * the struct_mutex - and consequently use it after it 139 * is freed and then double free it. To prevent that 140 * use-after-free we only acquire a reference on the 141 * object if it is not in the process of being destroyed. 142 */ 143 mo = container_of(it, struct i915_mmu_object, it); 144 if (kref_get_unless_zero(&mo->obj->base.refcount)) 145 queue_work(mn->wq, &mo->work); 146 147 list_add(&mo->link, &cancelled); 148 it = interval_tree_iter_next(it, start, end); 149 } 150 list_for_each_entry(mo, &cancelled, link) 151 del_object(mo); 152 spin_unlock(&mn->lock); 153 154 flush_workqueue(mn->wq); 155 } 156 157 static const struct mmu_notifier_ops i915_gem_userptr_notifier = { 158 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start, 159 }; 160 161 static struct i915_mmu_notifier * 162 i915_mmu_notifier_create(struct mm_struct *mm) 163 { 164 struct i915_mmu_notifier *mn; 165 int ret; 166 167 mn = kmalloc(sizeof(*mn), M_DRM, GFP_KERNEL); 168 if (mn == NULL) 169 return ERR_PTR(-ENOMEM); 170 171 spin_lock_init(&mn->lock); 172 mn->mn.ops = &i915_gem_userptr_notifier; 173 mn->objects = RB_ROOT; 174 mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); 175 if (mn->wq == NULL) { 176 kfree(mn); 177 return ERR_PTR(-ENOMEM); 178 } 179 180 /* Protected by mmap_sem (write-lock) */ 181 ret = __mmu_notifier_register(&mn->mn, mm); 182 if (ret) { 183 destroy_workqueue(mn->wq); 184 kfree(mn); 185 return ERR_PTR(ret); 186 } 187 188 return mn; 189 } 190 191 static void 192 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) 193 { 194 struct i915_mmu_object *mo; 195 196 mo = obj->userptr.mmu_object; 197 if (mo == NULL) 198 return; 199 200 spin_lock(&mo->mn->lock); 201 del_object(mo); 202 spin_unlock(&mo->mn->lock); 203 kfree(mo); 204 205 obj->userptr.mmu_object = NULL; 206 } 207 208 static struct i915_mmu_notifier * 209 i915_mmu_notifier_find(struct i915_mm_struct *mm) 210 { 211 struct i915_mmu_notifier *mn = mm->mn; 212 213 mn = mm->mn; 214 if (mn) 215 return mn; 216 217 down_write(&mm->mm->mmap_sem); 218 mutex_lock(&mm->i915->mm_lock); 219 if ((mn = mm->mn) == NULL) { 220 mn = i915_mmu_notifier_create(mm->mm); 221 if (!IS_ERR(mn)) 222 mm->mn = mn; 223 } 224 mutex_unlock(&mm->i915->mm_lock); 225 up_write(&mm->mm->mmap_sem); 226 227 return mn; 228 } 229 230 static int 231 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, 232 unsigned flags) 233 { 234 struct i915_mmu_notifier *mn; 235 struct i915_mmu_object *mo; 236 237 if (flags & I915_USERPTR_UNSYNCHRONIZED) 238 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; 239 240 if (WARN_ON(obj->userptr.mm == NULL)) 241 return -EINVAL; 242 243 mn = i915_mmu_notifier_find(obj->userptr.mm); 244 if (IS_ERR(mn)) 245 return PTR_ERR(mn); 246 247 mo = kzalloc(sizeof(*mo), GFP_KERNEL); 248 if (mo == NULL) 249 return -ENOMEM; 250 251 mo->mn = mn; 252 mo->obj = obj; 253 mo->it.start = obj->userptr.ptr; 254 mo->it.last = obj->userptr.ptr + obj->base.size - 1; 255 INIT_WORK(&mo->work, cancel_userptr); 256 257 obj->userptr.mmu_object = mo; 258 return 0; 259 } 260 261 static void 262 i915_mmu_notifier_free(struct i915_mmu_notifier *mn, 263 struct mm_struct *mm) 264 { 265 if (mn == NULL) 266 return; 267 268 mmu_notifier_unregister(&mn->mn, mm); 269 destroy_workqueue(mn->wq); 270 kfree(mn); 271 } 272 273 #else 274 275 static void 276 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) 277 { 278 } 279 280 static int 281 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, 282 unsigned flags) 283 { 284 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0) 285 return -ENODEV; 286 287 if (!capable(CAP_SYS_ADMIN)) 288 return -EPERM; 289 290 return 0; 291 } 292 293 static void 294 i915_mmu_notifier_free(struct i915_mmu_notifier *mn, 295 struct mm_struct *mm) 296 { 297 } 298 299 #endif 300 301 static struct i915_mm_struct * 302 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) 303 { 304 struct i915_mm_struct *mm; 305 306 /* Protected by dev_priv->mm_lock */ 307 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) 308 if (mm->mm == real) 309 return mm; 310 311 return NULL; 312 } 313 314 static int 315 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) 316 { 317 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 318 struct i915_mm_struct *mm; 319 int ret = 0; 320 321 /* During release of the GEM object we hold the struct_mutex. This 322 * precludes us from calling mmput() at that time as that may be 323 * the last reference and so call exit_mmap(). exit_mmap() will 324 * attempt to reap the vma, and if we were holding a GTT mmap 325 * would then call drm_gem_vm_close() and attempt to reacquire 326 * the struct mutex. So in order to avoid that recursion, we have 327 * to defer releasing the mm reference until after we drop the 328 * struct_mutex, i.e. we need to schedule a worker to do the clean 329 * up. 330 */ 331 mutex_lock(&dev_priv->mm_lock); 332 mm = __i915_mm_struct_find(dev_priv, current->mm); 333 #if 0 334 if (mm == NULL) { 335 mm = kmalloc(sizeof(*mm), M_DRM, GFP_KERNEL); 336 if (mm == NULL) { 337 #endif 338 ret = -ENOMEM; 339 #if 0 340 goto out; 341 } 342 343 kref_init(&mm->kref); 344 mm->i915 = to_i915(obj->base.dev); 345 346 mm->mm = current->mm; 347 atomic_inc(¤t->mm->mm_count); 348 349 mm->mn = NULL; 350 351 /* Protected by dev_priv->mm_lock */ 352 hash_add(dev_priv->mm_structs, 353 &mm->node, (unsigned long)mm->mm); 354 } else 355 kref_get(&mm->kref); 356 357 obj->userptr.mm = mm; 358 out: 359 mutex_unlock(&dev_priv->mm_lock); 360 #endif 361 return ret; 362 } 363 364 static void 365 __i915_mm_struct_free__worker(struct work_struct *work) 366 { 367 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); 368 i915_mmu_notifier_free(mm->mn, mm->mm); 369 #if 0 370 mmdrop(mm->mm); 371 #endif 372 kfree(mm); 373 } 374 375 static void 376 __i915_mm_struct_free(struct kref *kref) 377 { 378 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); 379 380 /* Protected by dev_priv->mm_lock */ 381 hash_del(&mm->node); 382 mutex_unlock(&mm->i915->mm_lock); 383 384 INIT_WORK(&mm->work, __i915_mm_struct_free__worker); 385 schedule_work(&mm->work); 386 } 387 388 static void 389 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) 390 { 391 if (obj->userptr.mm == NULL) 392 return; 393 394 kref_put_mutex(&obj->userptr.mm->kref, 395 __i915_mm_struct_free, 396 &to_i915(obj->base.dev)->mm_lock); 397 obj->userptr.mm = NULL; 398 } 399 400 struct get_pages_work { 401 struct work_struct work; 402 struct drm_i915_gem_object *obj; 403 struct task_struct *task; 404 }; 405 406 #if IS_ENABLED(CONFIG_SWIOTLB) 407 #define swiotlb_active() swiotlb_nr_tbl() 408 #else 409 #define swiotlb_active() 0 410 #endif 411 412 #if 0 413 static int 414 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) 415 { 416 struct scatterlist *sg; 417 int ret, n; 418 419 *st = kmalloc(sizeof(**st), M_DRM, GFP_KERNEL); 420 if (*st == NULL) 421 return -ENOMEM; 422 423 if (swiotlb_active()) { 424 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); 425 if (ret) 426 goto err; 427 428 for_each_sg((*st)->sgl, sg, num_pages, n) 429 sg_set_page(sg, pvec[n], PAGE_SIZE, 0); 430 } else { 431 ret = sg_alloc_table_from_pages(*st, pvec, num_pages, 432 0, num_pages << PAGE_SHIFT, 433 GFP_KERNEL); 434 if (ret) 435 goto err; 436 } 437 438 return 0; 439 440 err: 441 kfree(*st); 442 *st = NULL; 443 return ret; 444 } 445 446 static int 447 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, 448 struct page **pvec, int num_pages) 449 { 450 int ret; 451 452 ret = st_set_pages(&obj->pages, pvec, num_pages); 453 if (ret) 454 return ret; 455 456 ret = i915_gem_gtt_prepare_object(obj); 457 if (ret) { 458 sg_free_table(obj->pages); 459 kfree(obj->pages); 460 obj->pages = NULL; 461 } 462 463 return ret; 464 } 465 #endif 466 467 static int 468 __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, 469 bool value) 470 { 471 int ret = 0; 472 473 /* During mm_invalidate_range we need to cancel any userptr that 474 * overlaps the range being invalidated. Doing so requires the 475 * struct_mutex, and that risks recursion. In order to cause 476 * recursion, the user must alias the userptr address space with 477 * a GTT mmapping (possible with a MAP_FIXED) - then when we have 478 * to invalidate that mmaping, mm_invalidate_range is called with 479 * the userptr address *and* the struct_mutex held. To prevent that 480 * we set a flag under the i915_mmu_notifier spinlock to indicate 481 * whether this object is valid. 482 */ 483 #if defined(CONFIG_MMU_NOTIFIER) 484 if (obj->userptr.mmu_object == NULL) 485 return 0; 486 487 spin_lock(&obj->userptr.mmu_object->mn->lock); 488 /* In order to serialise get_pages with an outstanding 489 * cancel_userptr, we must drop the struct_mutex and try again. 490 */ 491 if (!value) 492 del_object(obj->userptr.mmu_object); 493 else if (!work_pending(&obj->userptr.mmu_object->work)) 494 add_object(obj->userptr.mmu_object); 495 else 496 ret = -EAGAIN; 497 spin_unlock(&obj->userptr.mmu_object->mn->lock); 498 #endif 499 500 return ret; 501 } 502 503 #if 0 504 static void 505 __i915_gem_userptr_get_pages_worker(struct work_struct *_work) 506 { 507 struct get_pages_work *work = container_of(_work, typeof(*work), work); 508 struct drm_i915_gem_object *obj = work->obj; 509 struct drm_device *dev = obj->base.dev; 510 const int npages = obj->base.size >> PAGE_SHIFT; 511 struct page **pvec; 512 int pinned, ret; 513 514 ret = -ENOMEM; 515 pinned = 0; 516 517 pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); 518 if (pvec != NULL) { 519 struct mm_struct *mm = obj->userptr.mm->mm; 520 521 ret = -EFAULT; 522 if (atomic_inc_not_zero(&mm->mm_users)) { 523 down_read(&mm->mmap_sem); 524 while (pinned < npages) { 525 ret = get_user_pages_remote 526 (work->task, mm, 527 obj->userptr.ptr + pinned * PAGE_SIZE, 528 npages - pinned, 529 !obj->userptr.read_only, 0, 530 pvec + pinned, NULL); 531 if (ret < 0) 532 break; 533 534 pinned += ret; 535 } 536 up_read(&mm->mmap_sem); 537 mmput(mm); 538 } 539 } 540 541 mutex_lock(&dev->struct_mutex); 542 if (obj->userptr.work == &work->work) { 543 if (pinned == npages) { 544 ret = __i915_gem_userptr_set_pages(obj, pvec, npages); 545 if (ret == 0) { 546 list_add_tail(&obj->global_list, 547 &to_i915(dev)->mm.unbound_list); 548 obj->get_page.sg = obj->pages->sgl; 549 obj->get_page.last = 0; 550 pinned = 0; 551 } 552 } 553 obj->userptr.work = ERR_PTR(ret); 554 if (ret) 555 __i915_gem_userptr_set_active(obj, false); 556 } 557 558 obj->userptr.workers--; 559 i915_gem_object_put(obj); 560 mutex_unlock(&dev->struct_mutex); 561 562 release_pages(pvec, pinned, 0); 563 drm_free_large(pvec); 564 565 put_task_struct(work->task); 566 kfree(work); 567 } 568 569 static int 570 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, 571 bool *active) 572 { 573 struct get_pages_work *work; 574 575 /* Spawn a worker so that we can acquire the 576 * user pages without holding our mutex. Access 577 * to the user pages requires mmap_sem, and we have 578 * a strict lock ordering of mmap_sem, struct_mutex - 579 * we already hold struct_mutex here and so cannot 580 * call gup without encountering a lock inversion. 581 * 582 * Userspace will keep on repeating the operation 583 * (thanks to EAGAIN) until either we hit the fast 584 * path or the worker completes. If the worker is 585 * cancelled or superseded, the task is still run 586 * but the results ignored. (This leads to 587 * complications that we may have a stray object 588 * refcount that we need to be wary of when 589 * checking for existing objects during creation.) 590 * If the worker encounters an error, it reports 591 * that error back to this function through 592 * obj->userptr.work = ERR_PTR. 593 */ 594 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS) 595 return -EAGAIN; 596 597 work = kmalloc(sizeof(*work), GFP_KERNEL); 598 if (work == NULL) 599 return -ENOMEM; 600 601 obj->userptr.work = &work->work; 602 obj->userptr.workers++; 603 604 work->obj = i915_gem_object_get(obj); 605 606 work->task = current; 607 get_task_struct(work->task); 608 609 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); 610 schedule_work(&work->work); 611 612 *active = true; 613 return -EAGAIN; 614 } 615 #endif 616 617 static int 618 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) 619 { 620 #if 0 621 const int num_pages = obj->base.size >> PAGE_SHIFT; 622 struct page **pvec; 623 int pinned, ret; 624 bool active; 625 626 /* If userspace should engineer that these pages are replaced in 627 * the vma between us binding this page into the GTT and completion 628 * of rendering... Their loss. If they change the mapping of their 629 * pages they need to create a new bo to point to the new vma. 630 * 631 * However, that still leaves open the possibility of the vma 632 * being copied upon fork. Which falls under the same userspace 633 * synchronisation issue as a regular bo, except that this time 634 * the process may not be expecting that a particular piece of 635 * memory is tied to the GPU. 636 * 637 * Fortunately, we can hook into the mmu_notifier in order to 638 * discard the page references prior to anything nasty happening 639 * to the vma (discard or cloning) which should prevent the more 640 * egregious cases from causing harm. 641 */ 642 if (IS_ERR(obj->userptr.work)) { 643 /* active flag will have been dropped already by the worker */ 644 ret = PTR_ERR(obj->userptr.work); 645 obj->userptr.work = NULL; 646 return ret; 647 } 648 if (obj->userptr.work) 649 /* active flag should still be held for the pending work */ 650 return -EAGAIN; 651 652 /* Let the mmu-notifier know that we have begun and need cancellation */ 653 ret = __i915_gem_userptr_set_active(obj, true); 654 if (ret) 655 return ret; 656 657 pvec = NULL; 658 pinned = 0; 659 if (obj->userptr.mm->mm == current->mm) { 660 pvec = drm_malloc_gfp(num_pages, sizeof(struct page *), 661 GFP_TEMPORARY); 662 if (pvec == NULL) { 663 __i915_gem_userptr_set_active(obj, false); 664 return -ENOMEM; 665 } 666 667 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, 668 !obj->userptr.read_only, pvec); 669 } 670 671 active = false; 672 if (pinned < 0) 673 ret = pinned, pinned = 0; 674 else if (pinned < num_pages) 675 ret = __i915_gem_userptr_get_pages_schedule(obj, &active); 676 else 677 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); 678 if (ret) { 679 __i915_gem_userptr_set_active(obj, active); 680 release_pages(pvec, pinned, 0); 681 } 682 drm_free_large(pvec); 683 return ret; 684 #else 685 return 0; 686 #endif /* 0 */ 687 } 688 689 static void 690 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 691 { 692 struct sgt_iter sgt_iter; 693 struct page *page; 694 695 BUG_ON(obj->userptr.work != NULL); 696 __i915_gem_userptr_set_active(obj, false); 697 698 if (obj->madv != I915_MADV_WILLNEED) 699 obj->dirty = 0; 700 701 i915_gem_gtt_finish_object(obj); 702 703 for_each_sgt_page(page, sgt_iter, obj->pages) { 704 if (obj->dirty) 705 set_page_dirty(page); 706 707 mark_page_accessed(page); 708 #if 0 709 put_page(page); 710 #endif 711 } 712 obj->dirty = 0; 713 714 sg_free_table(obj->pages); 715 kfree(obj->pages); 716 } 717 718 static void 719 i915_gem_userptr_release(struct drm_i915_gem_object *obj) 720 { 721 i915_gem_userptr_release__mmu_notifier(obj); 722 i915_gem_userptr_release__mm_struct(obj); 723 } 724 725 static int 726 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) 727 { 728 if (obj->userptr.mmu_object) 729 return 0; 730 731 return i915_gem_userptr_init__mmu_notifier(obj, 0); 732 } 733 734 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { 735 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, 736 .get_pages = i915_gem_userptr_get_pages, 737 .put_pages = i915_gem_userptr_put_pages, 738 .dmabuf_export = i915_gem_userptr_dmabuf_export, 739 .release = i915_gem_userptr_release, 740 }; 741 742 /** 743 * Creates a new mm object that wraps some normal memory from the process 744 * context - user memory. 745 * 746 * We impose several restrictions upon the memory being mapped 747 * into the GPU. 748 * 1. It must be page aligned (both start/end addresses, i.e ptr and size). 749 * 2. It must be normal system memory, not a pointer into another map of IO 750 * space (e.g. it must not be a GTT mmapping of another object). 751 * 3. We only allow a bo as large as we could in theory map into the GTT, 752 * that is we limit the size to the total size of the GTT. 753 * 4. The bo is marked as being snoopable. The backing pages are left 754 * accessible directly by the CPU, but reads and writes by the GPU may 755 * incur the cost of a snoop (unless you have an LLC architecture). 756 * 757 * Synchronisation between multiple users and the GPU is left to userspace 758 * through the normal set-domain-ioctl. The kernel will enforce that the 759 * GPU relinquishes the VMA before it is returned back to the system 760 * i.e. upon free(), munmap() or process termination. However, the userspace 761 * malloc() library may not immediately relinquish the VMA after free() and 762 * instead reuse it whilst the GPU is still reading and writing to the VMA. 763 * Caveat emptor. 764 * 765 * Also note, that the object created here is not currently a "first class" 766 * object, in that several ioctls are banned. These are the CPU access 767 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use 768 * direct access via your pointer rather than use those ioctls. Another 769 * restriction is that we do not allow userptr surfaces to be pinned to the 770 * hardware and so we reject any attempt to create a framebuffer out of a 771 * userptr. 772 * 773 * If you think this is a good interface to use to pass GPU memory between 774 * drivers, please use dma-buf instead. In fact, wherever possible use 775 * dma-buf instead. 776 */ 777 int 778 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 779 { 780 struct drm_i915_gem_userptr *args = data; 781 struct drm_i915_gem_object *obj; 782 int ret; 783 u32 handle; 784 785 if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) { 786 /* We cannot support coherent userptr objects on hw without 787 * LLC and broken snooping. 788 */ 789 return -ENODEV; 790 } 791 792 if (args->flags & ~(I915_USERPTR_READ_ONLY | 793 I915_USERPTR_UNSYNCHRONIZED)) 794 return -EINVAL; 795 796 if (offset_in_page(args->user_ptr | args->user_size)) 797 return -EINVAL; 798 799 #if 0 800 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE, 801 (char __user *)(unsigned long)args->user_ptr, args->user_size)) 802 return -EFAULT; 803 #endif 804 805 if (args->flags & I915_USERPTR_READ_ONLY) { 806 /* On almost all of the current hw, we cannot tell the GPU that a 807 * page is readonly, so this is just a placeholder in the uAPI. 808 */ 809 return -ENODEV; 810 } 811 812 obj = i915_gem_object_alloc(dev); 813 if (obj == NULL) 814 return -ENOMEM; 815 816 drm_gem_private_object_init(dev, &obj->base, args->user_size); 817 i915_gem_object_init(obj, &i915_gem_userptr_ops); 818 obj->cache_level = I915_CACHE_LLC; 819 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 820 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 821 822 obj->userptr.ptr = args->user_ptr; 823 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY); 824 825 /* And keep a pointer to the current->mm for resolving the user pages 826 * at binding. This means that we need to hook into the mmu_notifier 827 * in order to detect if the mmu is destroyed. 828 */ 829 ret = i915_gem_userptr_init__mm_struct(obj); 830 if (ret == 0) 831 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); 832 if (ret == 0) 833 ret = drm_gem_handle_create(file, &obj->base, &handle); 834 835 /* drop reference from allocate - handle holds it now */ 836 i915_gem_object_put_unlocked(obj); 837 if (ret) 838 return ret; 839 840 args->handle = handle; 841 return 0; 842 } 843 844 void i915_gem_init_userptr(struct drm_i915_private *dev_priv) 845 { 846 lockinit(&dev_priv->mm_lock, "i915dmm", 0, LK_CANRECURSE); 847 hash_init(dev_priv->mm_structs); 848 } 849