1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <linux/rbtree.h> 32 33 #include <drm/drm.h> 34 #include <drm/drm_drv.h> 35 #include <drm/drm_file.h> 36 #include <drm/drm_framebuffer.h> 37 #include <drm/drm_gem.h> 38 #include <drm/drm_prime.h> 39 40 #include "drm_internal.h" 41 42 /** 43 * DOC: overview and lifetime rules 44 * 45 * Similar to GEM global names, PRIME file descriptors are also used to share 46 * buffer objects across processes. They offer additional security: as file 47 * descriptors must be explicitly sent over UNIX domain sockets to be shared 48 * between applications, they can't be guessed like the globally unique GEM 49 * names. 50 * 51 * Drivers that support the PRIME API implement the 52 * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations. 53 * GEM based drivers must use drm_gem_prime_handle_to_fd() and 54 * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the 55 * actual driver interfaces is provided through the &drm_gem_object_funcs.export 56 * and &drm_driver.gem_prime_import hooks. 57 * 58 * &dma_buf_ops implementations for GEM drivers are all individually exported 59 * for drivers which need to overwrite or reimplement some of them. 60 * 61 * Reference Counting for GEM Drivers 62 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 63 * 64 * On the export the &dma_buf holds a reference to the exported buffer object, 65 * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD 66 * IOCTL, when it first calls &drm_gem_object_funcs.export 67 * and stores the exporting GEM object in the &dma_buf.priv field. This 68 * reference needs to be released when the final reference to the &dma_buf 69 * itself is dropped and its &dma_buf_ops.release function is called. For 70 * GEM-based drivers, the &dma_buf should be exported using 71 * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release(). 72 * 73 * Thus the chain of references always flows in one direction, avoiding loops: 74 * importing GEM object -> dma-buf -> exported GEM bo. A further complication 75 * are the lookup caches for import and export. These are required to guarantee 76 * that any given object will always have only one uniqe userspace handle. This 77 * is required to allow userspace to detect duplicated imports, since some GEM 78 * drivers do fail command submissions if a given buffer object is listed more 79 * than once. These import and export caches in &drm_prime_file_private only 80 * retain a weak reference, which is cleaned up when the corresponding object is 81 * released. 82 * 83 * Self-importing: If userspace is using PRIME as a replacement for flink then 84 * it will get a fd->handle request for a GEM object that it created. Drivers 85 * should detect this situation and return back the underlying object from the 86 * dma-buf private. For GEM based drivers this is handled in 87 * drm_gem_prime_import() already. 88 */ 89 90 struct drm_prime_member { 91 struct dma_buf *dma_buf; 92 uint32_t handle; 93 94 struct rb_node dmabuf_rb; 95 struct rb_node handle_rb; 96 }; 97 98 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, 99 struct dma_buf *dma_buf, uint32_t handle) 100 { 101 struct drm_prime_member *member; 102 struct rb_node **p, *rb; 103 104 member = kmalloc(sizeof(*member), GFP_KERNEL); 105 if (!member) 106 return -ENOMEM; 107 108 get_dma_buf(dma_buf); 109 member->dma_buf = dma_buf; 110 member->handle = handle; 111 112 rb = NULL; 113 p = &prime_fpriv->dmabufs.rb_node; 114 while (*p) { 115 struct drm_prime_member *pos; 116 117 rb = *p; 118 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 119 if (dma_buf > pos->dma_buf) 120 p = &rb->rb_right; 121 else 122 p = &rb->rb_left; 123 } 124 rb_link_node(&member->dmabuf_rb, rb, p); 125 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); 126 127 rb = NULL; 128 p = &prime_fpriv->handles.rb_node; 129 while (*p) { 130 struct drm_prime_member *pos; 131 132 rb = *p; 133 pos = rb_entry(rb, struct drm_prime_member, handle_rb); 134 if (handle > pos->handle) 135 p = &rb->rb_right; 136 else 137 p = &rb->rb_left; 138 } 139 rb_link_node(&member->handle_rb, rb, p); 140 rb_insert_color(&member->handle_rb, &prime_fpriv->handles); 141 142 return 0; 143 } 144 145 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, 146 uint32_t handle) 147 { 148 struct rb_node *rb; 149 150 rb = prime_fpriv->handles.rb_node; 151 while (rb) { 152 struct drm_prime_member *member; 153 154 member = rb_entry(rb, struct drm_prime_member, handle_rb); 155 if (member->handle == handle) 156 return member->dma_buf; 157 else if (member->handle < handle) 158 rb = rb->rb_right; 159 else 160 rb = rb->rb_left; 161 } 162 163 return NULL; 164 } 165 166 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, 167 struct dma_buf *dma_buf, 168 uint32_t *handle) 169 { 170 struct rb_node *rb; 171 172 rb = prime_fpriv->dmabufs.rb_node; 173 while (rb) { 174 struct drm_prime_member *member; 175 176 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 177 if (member->dma_buf == dma_buf) { 178 *handle = member->handle; 179 return 0; 180 } else if (member->dma_buf < dma_buf) { 181 rb = rb->rb_right; 182 } else { 183 rb = rb->rb_left; 184 } 185 } 186 187 return -ENOENT; 188 } 189 190 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, 191 struct dma_buf *dma_buf) 192 { 193 struct rb_node *rb; 194 195 rb = prime_fpriv->dmabufs.rb_node; 196 while (rb) { 197 struct drm_prime_member *member; 198 199 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); 200 if (member->dma_buf == dma_buf) { 201 rb_erase(&member->handle_rb, &prime_fpriv->handles); 202 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); 203 204 dma_buf_put(dma_buf); 205 kfree(member); 206 return; 207 } else if (member->dma_buf < dma_buf) { 208 rb = rb->rb_right; 209 } else { 210 rb = rb->rb_left; 211 } 212 } 213 } 214 215 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 216 { 217 rw_init(&prime_fpriv->lock, "primlk"); 218 prime_fpriv->dmabufs = RB_ROOT; 219 prime_fpriv->handles = RB_ROOT; 220 } 221 222 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 223 { 224 /* by now drm_gem_release should've made sure the list is empty */ 225 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); 226 } 227 228 /** 229 * drm_gem_dmabuf_export - &dma_buf export implementation for GEM 230 * @dev: parent device for the exported dmabuf 231 * @exp_info: the export information used by dma_buf_export() 232 * 233 * This wraps dma_buf_export() for use by generic GEM drivers that are using 234 * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take 235 * a reference to the &drm_device and the exported &drm_gem_object (stored in 236 * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release(). 237 * 238 * Returns the new dmabuf. 239 */ 240 struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, 241 struct dma_buf_export_info *exp_info) 242 { 243 struct drm_gem_object *obj = exp_info->priv; 244 struct dma_buf *dma_buf; 245 246 dma_buf = dma_buf_export(exp_info); 247 if (IS_ERR(dma_buf)) 248 return dma_buf; 249 250 drm_dev_get(dev); 251 drm_gem_object_get(obj); 252 #ifdef __linux__ 253 dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping; 254 #endif 255 256 return dma_buf; 257 } 258 EXPORT_SYMBOL(drm_gem_dmabuf_export); 259 260 /** 261 * drm_gem_dmabuf_release - &dma_buf release implementation for GEM 262 * @dma_buf: buffer to be released 263 * 264 * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers 265 * must use this in their &dma_buf_ops structure as the release callback. 266 * drm_gem_dmabuf_release() should be used in conjunction with 267 * drm_gem_dmabuf_export(). 268 */ 269 void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 270 { 271 struct drm_gem_object *obj = dma_buf->priv; 272 struct drm_device *dev = obj->dev; 273 274 /* drop the reference on the export fd holds */ 275 drm_gem_object_put_unlocked(obj); 276 277 drm_dev_put(dev); 278 } 279 EXPORT_SYMBOL(drm_gem_dmabuf_release); 280 281 /** 282 * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers 283 * @dev: dev to export the buffer from 284 * @file_priv: drm file-private structure 285 * @prime_fd: fd id of the dma-buf which should be imported 286 * @handle: pointer to storage for the handle of the imported buffer object 287 * 288 * This is the PRIME import function which must be used mandatorily by GEM 289 * drivers to ensure correct lifetime management of the underlying GEM object. 290 * The actual importing of GEM object from the dma-buf is done through the 291 * &drm_driver.gem_prime_import driver callback. 292 * 293 * Returns 0 on success or a negative error code on failure. 294 */ 295 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 296 struct drm_file *file_priv, int prime_fd, 297 uint32_t *handle) 298 { 299 struct dma_buf *dma_buf; 300 struct drm_gem_object *obj; 301 int ret; 302 303 dma_buf = dma_buf_get(prime_fd); 304 if (IS_ERR(dma_buf)) 305 return PTR_ERR(dma_buf); 306 307 mutex_lock(&file_priv->prime.lock); 308 309 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 310 dma_buf, handle); 311 if (ret == 0) 312 goto out_put; 313 314 /* never seen this one, need to import */ 315 mutex_lock(&dev->object_name_lock); 316 if (dev->driver->gem_prime_import) 317 obj = dev->driver->gem_prime_import(dev, dma_buf); 318 else 319 obj = drm_gem_prime_import(dev, dma_buf); 320 if (IS_ERR(obj)) { 321 ret = PTR_ERR(obj); 322 goto out_unlock; 323 } 324 325 if (obj->dma_buf) { 326 WARN_ON(obj->dma_buf != dma_buf); 327 } else { 328 obj->dma_buf = dma_buf; 329 get_dma_buf(dma_buf); 330 } 331 332 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ 333 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 334 drm_gem_object_put_unlocked(obj); 335 if (ret) 336 goto out_put; 337 338 ret = drm_prime_add_buf_handle(&file_priv->prime, 339 dma_buf, *handle); 340 mutex_unlock(&file_priv->prime.lock); 341 if (ret) 342 goto fail; 343 344 dma_buf_put(dma_buf); 345 346 return 0; 347 348 fail: 349 /* hmm, if driver attached, we are relying on the free-object path 350 * to detach.. which seems ok.. 351 */ 352 drm_gem_handle_delete(file_priv, *handle); 353 dma_buf_put(dma_buf); 354 return ret; 355 356 out_unlock: 357 mutex_unlock(&dev->object_name_lock); 358 out_put: 359 mutex_unlock(&file_priv->prime.lock); 360 dma_buf_put(dma_buf); 361 return ret; 362 } 363 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 364 365 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 366 struct drm_file *file_priv) 367 { 368 struct drm_prime_handle *args = data; 369 370 if (!dev->driver->prime_fd_to_handle) 371 return -ENOSYS; 372 373 return dev->driver->prime_fd_to_handle(dev, file_priv, 374 args->fd, &args->handle); 375 } 376 377 static struct dma_buf *export_and_register_object(struct drm_device *dev, 378 struct drm_gem_object *obj, 379 uint32_t flags) 380 { 381 struct dma_buf *dmabuf; 382 383 /* prevent races with concurrent gem_close. */ 384 if (obj->handle_count == 0) { 385 dmabuf = ERR_PTR(-ENOENT); 386 return dmabuf; 387 } 388 389 if (obj->funcs && obj->funcs->export) 390 dmabuf = obj->funcs->export(obj, flags); 391 else if (dev->driver->gem_prime_export) 392 dmabuf = dev->driver->gem_prime_export(obj, flags); 393 else 394 dmabuf = drm_gem_prime_export(obj, flags); 395 if (IS_ERR(dmabuf)) { 396 /* normally the created dma-buf takes ownership of the ref, 397 * but if that fails then drop the ref 398 */ 399 return dmabuf; 400 } 401 402 /* 403 * Note that callers do not need to clean up the export cache 404 * since the check for obj->handle_count guarantees that someone 405 * will clean it up. 406 */ 407 obj->dma_buf = dmabuf; 408 get_dma_buf(obj->dma_buf); 409 410 return dmabuf; 411 } 412 413 /** 414 * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers 415 * @dev: dev to export the buffer from 416 * @file_priv: drm file-private structure 417 * @handle: buffer handle to export 418 * @flags: flags like DRM_CLOEXEC 419 * @prime_fd: pointer to storage for the fd id of the create dma-buf 420 * 421 * This is the PRIME export function which must be used mandatorily by GEM 422 * drivers to ensure correct lifetime management of the underlying GEM object. 423 * The actual exporting from GEM object to a dma-buf is done through the 424 * &drm_driver.gem_prime_export driver callback. 425 */ 426 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 427 struct drm_file *file_priv, uint32_t handle, 428 uint32_t flags, 429 int *prime_fd) 430 { 431 struct drm_gem_object *obj; 432 int ret = 0; 433 struct dma_buf *dmabuf; 434 435 mutex_lock(&file_priv->prime.lock); 436 obj = drm_gem_object_lookup(file_priv, handle); 437 if (!obj) { 438 ret = -ENOENT; 439 goto out_unlock; 440 } 441 442 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); 443 if (dmabuf) { 444 get_dma_buf(dmabuf); 445 goto out_have_handle; 446 } 447 448 mutex_lock(&dev->object_name_lock); 449 #ifdef notyet 450 /* re-export the original imported object */ 451 if (obj->import_attach) { 452 dmabuf = obj->import_attach->dmabuf; 453 get_dma_buf(dmabuf); 454 goto out_have_obj; 455 } 456 #endif 457 458 if (obj->dma_buf) { 459 get_dma_buf(obj->dma_buf); 460 dmabuf = obj->dma_buf; 461 goto out_have_obj; 462 } 463 464 dmabuf = export_and_register_object(dev, obj, flags); 465 if (IS_ERR(dmabuf)) { 466 /* normally the created dma-buf takes ownership of the ref, 467 * but if that fails then drop the ref 468 */ 469 ret = PTR_ERR(dmabuf); 470 mutex_unlock(&dev->object_name_lock); 471 goto out; 472 } 473 474 out_have_obj: 475 /* 476 * If we've exported this buffer then cheat and add it to the import list 477 * so we get the correct handle back. We must do this under the 478 * protection of dev->object_name_lock to ensure that a racing gem close 479 * ioctl doesn't miss to remove this buffer handle from the cache. 480 */ 481 ret = drm_prime_add_buf_handle(&file_priv->prime, 482 dmabuf, handle); 483 mutex_unlock(&dev->object_name_lock); 484 if (ret) 485 goto fail_put_dmabuf; 486 487 out_have_handle: 488 ret = dma_buf_fd(dmabuf, flags); 489 /* 490 * We must _not_ remove the buffer from the handle cache since the newly 491 * created dma buf is already linked in the global obj->dma_buf pointer, 492 * and that is invariant as long as a userspace gem handle exists. 493 * Closing the handle will clean out the cache anyway, so we don't leak. 494 */ 495 if (ret < 0) { 496 goto fail_put_dmabuf; 497 } else { 498 *prime_fd = ret; 499 ret = 0; 500 } 501 502 goto out; 503 504 fail_put_dmabuf: 505 dma_buf_put(dmabuf); 506 out: 507 drm_gem_object_put_unlocked(obj); 508 out_unlock: 509 mutex_unlock(&file_priv->prime.lock); 510 511 return ret; 512 } 513 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 514 515 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 516 struct drm_file *file_priv) 517 { 518 struct drm_prime_handle *args = data; 519 520 if (!dev->driver->prime_handle_to_fd) 521 return -ENOSYS; 522 523 /* check flags are valid */ 524 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) 525 return -EINVAL; 526 527 return dev->driver->prime_handle_to_fd(dev, file_priv, 528 args->handle, args->flags, &args->fd); 529 } 530 531 /** 532 * DOC: PRIME Helpers 533 * 534 * Drivers can implement &drm_gem_object_funcs.export and 535 * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper 536 * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions 537 * implement dma-buf support in terms of some lower-level helpers, which are 538 * again exported for drivers to use individually: 539 * 540 * Exporting buffers 541 * ~~~~~~~~~~~~~~~~~ 542 * 543 * Optional pinning of buffers is handled at dma-buf attach and detach time in 544 * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is 545 * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on 546 * &drm_gem_object_funcs.get_sg_table. 547 * 548 * For kernel-internal access there's drm_gem_dmabuf_vmap() and 549 * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by 550 * drm_gem_dmabuf_mmap(). 551 * 552 * Note that these export helpers can only be used if the underlying backing 553 * storage is fully coherent and either permanently pinned, or it is safe to pin 554 * it indefinitely. 555 * 556 * FIXME: The underlying helper functions are named rather inconsistently. 557 * 558 * Exporting buffers 559 * ~~~~~~~~~~~~~~~~~ 560 * 561 * Importing dma-bufs using drm_gem_prime_import() relies on 562 * &drm_driver.gem_prime_import_sg_table. 563 * 564 * Note that similarly to the export helpers this permanently pins the 565 * underlying backing storage. Which is ok for scanout, but is not the best 566 * option for sharing lots of buffers for rendering. 567 */ 568 569 /** 570 * drm_gem_map_attach - dma_buf attach implementation for GEM 571 * @dma_buf: buffer to attach device to 572 * @attach: buffer attachment data 573 * 574 * Calls &drm_gem_object_funcs.pin for device specific handling. This can be 575 * used as the &dma_buf_ops.attach callback. Must be used together with 576 * drm_gem_map_detach(). 577 * 578 * Returns 0 on success, negative error code on failure. 579 */ 580 int drm_gem_map_attach(struct dma_buf *dma_buf, 581 struct dma_buf_attachment *attach) 582 { 583 struct drm_gem_object *obj = dma_buf->priv; 584 585 return drm_gem_pin(obj); 586 } 587 EXPORT_SYMBOL(drm_gem_map_attach); 588 589 /** 590 * drm_gem_map_detach - dma_buf detach implementation for GEM 591 * @dma_buf: buffer to detach from 592 * @attach: attachment to be detached 593 * 594 * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up 595 * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the 596 * &dma_buf_ops.detach callback. 597 */ 598 void drm_gem_map_detach(struct dma_buf *dma_buf, 599 struct dma_buf_attachment *attach) 600 { 601 struct drm_gem_object *obj = dma_buf->priv; 602 603 drm_gem_unpin(obj); 604 } 605 EXPORT_SYMBOL(drm_gem_map_detach); 606 607 #ifdef notyet 608 609 /** 610 * drm_gem_map_dma_buf - map_dma_buf implementation for GEM 611 * @attach: attachment whose scatterlist is to be returned 612 * @dir: direction of DMA transfer 613 * 614 * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This 615 * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together 616 * with drm_gem_unmap_dma_buf(). 617 * 618 * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR 619 * on error. May return -EINTR if it is interrupted by a signal. 620 */ 621 struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 622 enum dma_data_direction dir) 623 { 624 struct drm_gem_object *obj = attach->dmabuf->priv; 625 struct sg_table *sgt; 626 627 if (WARN_ON(dir == DMA_NONE)) 628 return ERR_PTR(-EINVAL); 629 630 if (obj->funcs) 631 sgt = obj->funcs->get_sg_table(obj); 632 else 633 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 634 635 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, 636 DMA_ATTR_SKIP_CPU_SYNC)) { 637 sg_free_table(sgt); 638 kfree(sgt); 639 sgt = ERR_PTR(-ENOMEM); 640 } 641 642 return sgt; 643 } 644 EXPORT_SYMBOL(drm_gem_map_dma_buf); 645 646 /** 647 * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM 648 * @attach: attachment to unmap buffer from 649 * @sgt: scatterlist info of the buffer to unmap 650 * @dir: direction of DMA transfer 651 * 652 * This can be used as the &dma_buf_ops.unmap_dma_buf callback. 653 */ 654 void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 655 struct sg_table *sgt, 656 enum dma_data_direction dir) 657 { 658 if (!sgt) 659 return; 660 661 dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, 662 DMA_ATTR_SKIP_CPU_SYNC); 663 sg_free_table(sgt); 664 kfree(sgt); 665 } 666 EXPORT_SYMBOL(drm_gem_unmap_dma_buf); 667 668 #endif /* notyet */ 669 670 /** 671 * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM 672 * @dma_buf: buffer to be mapped 673 * 674 * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap 675 * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. 676 * 677 * Returns the kernel virtual address or NULL on failure. 678 */ 679 void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 680 { 681 struct drm_gem_object *obj = dma_buf->priv; 682 void *vaddr; 683 684 vaddr = drm_gem_vmap(obj); 685 if (IS_ERR(vaddr)) 686 vaddr = NULL; 687 688 return vaddr; 689 } 690 EXPORT_SYMBOL(drm_gem_dmabuf_vmap); 691 692 /** 693 * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM 694 * @dma_buf: buffer to be unmapped 695 * @vaddr: the virtual address of the buffer 696 * 697 * Releases a kernel virtual mapping. This can be used as the 698 * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. 699 */ 700 void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 701 { 702 struct drm_gem_object *obj = dma_buf->priv; 703 704 drm_gem_vunmap(obj, vaddr); 705 } 706 EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); 707 708 #ifdef notyet 709 710 /** 711 * drm_gem_prime_mmap - PRIME mmap function for GEM drivers 712 * @obj: GEM object 713 * @vma: Virtual address range 714 * 715 * This function sets up a userspace mapping for PRIME exported buffers using 716 * the same codepath that is used for regular GEM buffer mapping on the DRM fd. 717 * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is 718 * called to set up the mapping. 719 * 720 * Drivers can use this as their &drm_driver.gem_prime_mmap callback. 721 */ 722 int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 723 { 724 struct drm_file *priv; 725 struct file *fil; 726 int ret; 727 728 /* Add the fake offset */ 729 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node); 730 731 if (obj->funcs && obj->funcs->mmap) { 732 ret = obj->funcs->mmap(obj, vma); 733 if (ret) 734 return ret; 735 vma->vm_private_data = obj; 736 drm_gem_object_get(obj); 737 return 0; 738 } 739 740 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 741 fil = kzalloc(sizeof(*fil), GFP_KERNEL); 742 if (!priv || !fil) { 743 ret = -ENOMEM; 744 goto out; 745 } 746 747 /* Used by drm_gem_mmap() to lookup the GEM object */ 748 priv->minor = obj->dev->primary; 749 fil->private_data = priv; 750 751 ret = drm_vma_node_allow(&obj->vma_node, priv); 752 if (ret) 753 goto out; 754 755 ret = obj->dev->driver->fops->mmap(fil, vma); 756 757 drm_vma_node_revoke(&obj->vma_node, priv); 758 out: 759 kfree(priv); 760 kfree(fil); 761 762 return ret; 763 } 764 EXPORT_SYMBOL(drm_gem_prime_mmap); 765 766 /** 767 * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM 768 * @dma_buf: buffer to be mapped 769 * @vma: virtual address range 770 * 771 * Provides memory mapping for the buffer. This can be used as the 772 * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap, 773 * which should be set to drm_gem_prime_mmap(). 774 * 775 * FIXME: There's really no point to this wrapper, drivers which need anything 776 * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback. 777 * 778 * Returns 0 on success or a negative error code on failure. 779 */ 780 int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 781 { 782 struct drm_gem_object *obj = dma_buf->priv; 783 struct drm_device *dev = obj->dev; 784 785 if (!dev->driver->gem_prime_mmap) 786 return -ENOSYS; 787 788 return dev->driver->gem_prime_mmap(obj, vma); 789 } 790 EXPORT_SYMBOL(drm_gem_dmabuf_mmap); 791 792 #endif /* notyet */ 793 794 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 795 #ifdef notyet 796 .cache_sgt_mapping = true, 797 .attach = drm_gem_map_attach, 798 .detach = drm_gem_map_detach, 799 .map_dma_buf = drm_gem_map_dma_buf, 800 .unmap_dma_buf = drm_gem_unmap_dma_buf, 801 #endif 802 .release = drm_gem_dmabuf_release, 803 #ifdef notyet 804 .mmap = drm_gem_dmabuf_mmap, 805 .vmap = drm_gem_dmabuf_vmap, 806 .vunmap = drm_gem_dmabuf_vunmap, 807 #endif 808 }; 809 810 /** 811 * drm_prime_pages_to_sg - converts a page array into an sg list 812 * @pages: pointer to the array of page pointers to convert 813 * @nr_pages: length of the page vector 814 * 815 * This helper creates an sg table object from a set of pages 816 * the driver is responsible for mapping the pages into the 817 * importers address space for use with dma_buf itself. 818 * 819 * This is useful for implementing &drm_gem_object_funcs.get_sg_table. 820 */ 821 struct sg_table *drm_prime_pages_to_sg(struct vm_page **pages, unsigned int nr_pages) 822 { 823 STUB(); 824 return NULL; 825 #ifdef notyet 826 struct sg_table *sg = NULL; 827 int ret; 828 829 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 830 if (!sg) { 831 ret = -ENOMEM; 832 goto out; 833 } 834 835 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 836 nr_pages << PAGE_SHIFT, GFP_KERNEL); 837 if (ret) 838 goto out; 839 840 return sg; 841 out: 842 kfree(sg); 843 return ERR_PTR(ret); 844 #endif 845 } 846 EXPORT_SYMBOL(drm_prime_pages_to_sg); 847 848 /** 849 * drm_gem_prime_export - helper library implementation of the export callback 850 * @obj: GEM object to export 851 * @flags: flags like DRM_CLOEXEC and DRM_RDWR 852 * 853 * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers 854 * using the PRIME helpers. It is used as the default in 855 * drm_gem_prime_handle_to_fd(). 856 */ 857 struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, 858 int flags) 859 { 860 struct drm_device *dev = obj->dev; 861 struct dma_buf_export_info exp_info = { 862 #ifdef __linux__ 863 .exp_name = KBUILD_MODNAME, /* white lie for debug */ 864 .owner = dev->driver->fops->owner, 865 #endif 866 .ops = &drm_gem_prime_dmabuf_ops, 867 .size = obj->size, 868 .flags = flags, 869 .priv = obj, 870 .resv = obj->resv, 871 }; 872 873 return drm_gem_dmabuf_export(dev, &exp_info); 874 } 875 EXPORT_SYMBOL(drm_gem_prime_export); 876 877 /** 878 * drm_gem_prime_import_dev - core implementation of the import callback 879 * @dev: drm_device to import into 880 * @dma_buf: dma-buf object to import 881 * @attach_dev: struct device to dma_buf attach 882 * 883 * This is the core of drm_gem_prime_import(). It's designed to be called by 884 * drivers who want to use a different device structure than &drm_device.dev for 885 * attaching via dma_buf. This function calls 886 * &drm_driver.gem_prime_import_sg_table internally. 887 * 888 * Drivers must arrange to call drm_prime_gem_destroy() from their 889 * &drm_gem_object_funcs.free hook when using this function. 890 */ 891 struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, 892 struct dma_buf *dma_buf, 893 struct device *attach_dev) 894 { 895 struct dma_buf_attachment *attach; 896 #ifdef notyet 897 struct sg_table *sgt; 898 #endif 899 struct drm_gem_object *obj; 900 int ret; 901 902 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 903 obj = dma_buf->priv; 904 if (obj->dev == dev) { 905 /* 906 * Importing dmabuf exported from out own gem increases 907 * refcount on gem itself instead of f_count of dmabuf. 908 */ 909 drm_gem_object_get(obj); 910 return obj; 911 } 912 } 913 914 #ifdef notyet 915 if (!dev->driver->gem_prime_import_sg_table) 916 return ERR_PTR(-EINVAL); 917 #endif 918 919 attach = dma_buf_attach(dma_buf, attach_dev); 920 if (IS_ERR(attach)) 921 return ERR_CAST(attach); 922 923 #ifdef notyet 924 get_dma_buf(dma_buf); 925 926 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 927 if (IS_ERR(sgt)) { 928 ret = PTR_ERR(sgt); 929 goto fail_detach; 930 } 931 932 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); 933 if (IS_ERR(obj)) { 934 ret = PTR_ERR(obj); 935 goto fail_unmap; 936 } 937 938 obj->import_attach = attach; 939 obj->resv = dma_buf->resv; 940 941 return obj; 942 943 fail_unmap: 944 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 945 fail_detach: 946 dma_buf_detach(dma_buf, attach); 947 dma_buf_put(dma_buf); 948 949 return ERR_PTR(ret); 950 #else 951 ret = 0; 952 panic(__func__); 953 #endif 954 } 955 EXPORT_SYMBOL(drm_gem_prime_import_dev); 956 957 /** 958 * drm_gem_prime_import - helper library implementation of the import callback 959 * @dev: drm_device to import into 960 * @dma_buf: dma-buf object to import 961 * 962 * This is the implementation of the gem_prime_import functions for GEM drivers 963 * using the PRIME helpers. Drivers can use this as their 964 * &drm_driver.gem_prime_import implementation. It is used as the default 965 * implementation in drm_gem_prime_fd_to_handle(). 966 * 967 * Drivers must arrange to call drm_prime_gem_destroy() from their 968 * &drm_gem_object_funcs.free hook when using this function. 969 */ 970 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 971 struct dma_buf *dma_buf) 972 { 973 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev); 974 } 975 EXPORT_SYMBOL(drm_gem_prime_import); 976 977 /** 978 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array 979 * @sgt: scatter-gather table to convert 980 * @pages: optional array of page pointers to store the page array in 981 * @addrs: optional array to store the dma bus address of each page 982 * @max_entries: size of both the passed-in arrays 983 * 984 * Exports an sg table into an array of pages and addresses. This is currently 985 * required by the TTM driver in order to do correct fault handling. 986 * 987 * Drivers can use this in their &drm_driver.gem_prime_import_sg_table 988 * implementation. 989 */ 990 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct vm_page **pages, 991 dma_addr_t *addrs, int max_entries) 992 { 993 unsigned count; 994 struct scatterlist *sg; 995 struct vm_page *page; 996 u32 page_len, page_index; 997 dma_addr_t addr; 998 u32 dma_len, dma_index; 999 1000 /* 1001 * Scatterlist elements contains both pages and DMA addresses, but 1002 * one shoud not assume 1:1 relation between them. The sg->length is 1003 * the size of the physical memory chunk described by the sg->page, 1004 * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk 1005 * described by the sg_dma_address(sg). 1006 */ 1007 page_index = 0; 1008 dma_index = 0; 1009 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 1010 page_len = sg->length; 1011 page = sg_page(sg); 1012 dma_len = sg_dma_len(sg); 1013 addr = sg_dma_address(sg); 1014 1015 while (pages && page_len > 0) { 1016 if (WARN_ON(page_index >= max_entries)) 1017 return -1; 1018 pages[page_index] = page; 1019 page++; 1020 page_len -= PAGE_SIZE; 1021 page_index++; 1022 } 1023 while (addrs && dma_len > 0) { 1024 if (WARN_ON(dma_index >= max_entries)) 1025 return -1; 1026 addrs[dma_index] = addr; 1027 addr += PAGE_SIZE; 1028 dma_len -= PAGE_SIZE; 1029 dma_index++; 1030 } 1031 } 1032 return 0; 1033 } 1034 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 1035 1036 /** 1037 * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object 1038 * @obj: GEM object which was created from a dma-buf 1039 * @sg: the sg-table which was pinned at import time 1040 * 1041 * This is the cleanup functions which GEM drivers need to call when they use 1042 * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs. 1043 */ 1044 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 1045 { 1046 STUB(); 1047 #ifdef notyet 1048 struct dma_buf_attachment *attach; 1049 struct dma_buf *dma_buf; 1050 attach = obj->import_attach; 1051 if (sg) 1052 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 1053 dma_buf = attach->dmabuf; 1054 dma_buf_detach(attach->dmabuf, attach); 1055 /* remove the reference */ 1056 dma_buf_put(dma_buf); 1057 #endif 1058 } 1059 EXPORT_SYMBOL(drm_prime_gem_destroy); 1060