1 /* $NetBSD: drm_bufs.c,v 1.12 2020/02/14 14:34:57 maya Exp $ */ 2 3 /* 4 * Legacy: Generic DRM Buffer Management 5 * 6 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 7 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 8 * All Rights Reserved. 9 * 10 * Author: Rickard E. (Rik) Faith <faith@valinux.com> 11 * Author: Gareth Hughes <gareth@valinux.com> 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a 14 * copy of this software and associated documentation files (the "Software"), 15 * to deal in the Software without restriction, including without limitation 16 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 17 * and/or sell copies of the Software, and to permit persons to whom the 18 * Software is furnished to do so, subject to the following conditions: 19 * 20 * The above copyright notice and this permission notice (including the next 21 * paragraph) shall be included in all copies or substantial portions of the 22 * Software. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 27 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 28 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 29 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 30 * OTHER DEALINGS IN THE SOFTWARE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: drm_bufs.c,v 1.12 2020/02/14 14:34:57 maya Exp $"); 35 36 #include <linux/vmalloc.h> 37 #include <linux/slab.h> 38 #include <linux/log2.h> 39 #include <linux/export.h> 40 #include <asm/shmparam.h> 41 #include <drm/drmP.h> 42 #include "drm_legacy.h" 43 44 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, 45 struct drm_local_map *map) 46 { 47 struct drm_map_list *entry; 48 list_for_each_entry(entry, &dev->maplist, head) { 49 /* 50 * Because the kernel-userspace ABI is fixed at a 32-bit offset 51 * while PCI resources may live above that, we only compare the 52 * lower 32 bits of the map offset for maps of type 53 * _DRM_FRAMEBUFFER or _DRM_REGISTERS. 54 * It is assumed that if a driver have more than one resource 55 * of each type, the lower 32 bits are different. 56 */ 57 if (!entry->map || 58 map->type != entry->map->type || 59 entry->master != dev->primary->master) 60 continue; 61 switch (map->type) { 62 case _DRM_SHM: 63 if (map->flags != _DRM_CONTAINS_LOCK) 64 break; 65 return entry; 66 case _DRM_REGISTERS: 67 case _DRM_FRAME_BUFFER: 68 if ((entry->map->offset & 0xffffffff) == 69 (map->offset & 0xffffffff)) 70 return entry; 71 default: /* Make gcc happy */ 72 ; 73 } 74 if (entry->map->offset == map->offset) 75 return entry; 76 } 77 78 return NULL; 79 } 80 81 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, 82 unsigned long user_token, int hashed_handle, int shm) 83 { 84 int use_hashed_handle, shift; 85 unsigned long add; 86 87 use_hashed_handle = (user_token &~ 0xffffffffUL) || hashed_handle; 88 if (!use_hashed_handle) { 89 int ret; 90 hash->key = user_token >> PAGE_SHIFT; 91 ret = drm_ht_insert_item(&dev->map_hash, hash); 92 if (ret != -EINVAL) 93 return ret; 94 } 95 96 shift = 0; 97 add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT; 98 if (shm && (SHMLBA > PAGE_SIZE)) { 99 int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1; 100 101 /* For shared memory, we have to preserve the SHMLBA 102 * bits of the eventual vma->vm_pgoff value during 103 * mmap(). Otherwise we run into cache aliasing problems 104 * on some platforms. On these platforms, the pgoff of 105 * a mmap() request is used to pick a suitable virtual 106 * address for the mmap() region such that it will not 107 * cause cache aliasing problems. 108 * 109 * Therefore, make sure the SHMLBA relevant bits of the 110 * hash value we use are equal to those in the original 111 * kernel virtual address. 112 */ 113 shift = bits; 114 add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); 115 } 116 117 return drm_ht_just_insert_please(&dev->map_hash, hash, 118 user_token, 32 - PAGE_SHIFT - 3, 119 shift, add); 120 } 121 122 /** 123 * Core function to create a range of memory available for mapping by a 124 * non-root process. 125 * 126 * Adjusts the memory offset to its absolute value according to the mapping 127 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where 128 * applicable and if supported by the kernel. 129 */ 130 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, 131 unsigned int size, enum drm_map_type type, 132 enum drm_map_flags flags, 133 struct drm_map_list ** maplist) 134 { 135 struct drm_local_map *map; 136 struct drm_map_list *list; 137 drm_dma_handle_t *dmah; 138 unsigned long user_token; 139 int ret; 140 141 map = kmalloc(sizeof(*map), GFP_KERNEL); 142 if (!map) 143 return -ENOMEM; 144 145 map->offset = offset; 146 map->size = size; 147 map->flags = flags; 148 map->type = type; 149 150 /* Only allow shared memory to be removable since we only keep enough 151 * book keeping information about shared memory to allow for removal 152 * when processes fork. 153 */ 154 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { 155 kfree(map); 156 return -EINVAL; 157 } 158 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 159 (unsigned long long)map->offset, map->size, map->type); 160 161 /* page-align _DRM_SHM maps. They are allocated here so there is no security 162 * hole created by that and it works around various broken drivers that use 163 * a non-aligned quantity to map the SAREA. --BenH 164 */ 165 if (map->type == _DRM_SHM) 166 map->size = PAGE_ALIGN(map->size); 167 168 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 169 kfree(map); 170 return -EINVAL; 171 } 172 map->mtrr = -1; 173 map->handle = NULL; 174 175 switch (map->type) { 176 case _DRM_REGISTERS: 177 case _DRM_FRAME_BUFFER: 178 #ifndef __NetBSD__ /* XXX No idea what this is for... */ 179 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) 180 if (map->offset + (map->size-1) < map->offset || 181 map->offset < virt_to_phys(high_memory)) { 182 kfree(map); 183 return -EINVAL; 184 } 185 #endif 186 #endif 187 /* Some drivers preinitialize some maps, without the X Server 188 * needing to be aware of it. Therefore, we just return success 189 * when the server tries to create a duplicate map. 190 */ 191 list = drm_find_matching_map(dev, map); 192 if (list != NULL) { 193 if (list->map->size != map->size) { 194 DRM_DEBUG("Matching maps of type %d with " 195 "mismatched sizes, (%ld vs %ld)\n", 196 map->type, map->size, 197 list->map->size); 198 list->map->size = map->size; 199 } 200 201 kfree(map); 202 *maplist = list; 203 return 0; 204 } 205 206 if (map->type == _DRM_FRAME_BUFFER || 207 (map->flags & _DRM_WRITE_COMBINING)) { 208 map->mtrr = 209 arch_phys_wc_add(map->offset, map->size); 210 } 211 if (map->type == _DRM_REGISTERS) { 212 #ifdef __NetBSD__ 213 drm_legacy_ioremap(map, dev); 214 #else 215 if (map->flags & _DRM_WRITE_COMBINING) 216 map->handle = ioremap_wc(map->offset, 217 map->size); 218 else 219 map->handle = ioremap(map->offset, map->size); 220 #endif 221 if (!map->handle) { 222 kfree(map); 223 return -ENOMEM; 224 } 225 } 226 227 break; 228 case _DRM_SHM: 229 list = drm_find_matching_map(dev, map); 230 if (list != NULL) { 231 if(list->map->size != map->size) { 232 DRM_DEBUG("Matching maps of type %d with " 233 "mismatched sizes, (%ld vs %ld)\n", 234 map->type, map->size, list->map->size); 235 list->map->size = map->size; 236 } 237 238 kfree(map); 239 *maplist = list; 240 return 0; 241 } 242 map->handle = vmalloc_user(map->size); 243 DRM_DEBUG("%lu %d %p\n", 244 map->size, order_base_2(map->size), map->handle); 245 if (!map->handle) { 246 kfree(map); 247 return -ENOMEM; 248 } 249 map->offset = (unsigned long)map->handle; 250 if (map->flags & _DRM_CONTAINS_LOCK) { 251 /* Prevent a 2nd X Server from creating a 2nd lock */ 252 spin_lock(&dev->primary->master->lock.spinlock); 253 if (dev->primary->master->lock.hw_lock != NULL) { 254 vfree(map->handle); 255 kfree(map); 256 spin_unlock(&dev->primary->master->lock.spinlock); 257 return -EBUSY; 258 } 259 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ 260 spin_unlock(&dev->primary->master->lock.spinlock); 261 } 262 break; 263 case _DRM_AGP: { 264 struct drm_agp_mem *entry; 265 int valid = 0; 266 267 if (!dev->agp) { 268 kfree(map); 269 return -EINVAL; 270 } 271 #ifdef __alpha__ 272 map->offset += dev->hose->mem_space->start; 273 #endif 274 /* In some cases (i810 driver), user space may have already 275 * added the AGP base itself, because dev->agp->base previously 276 * only got set during AGP enable. So, only add the base 277 * address if the map's offset isn't already within the 278 * aperture. 279 */ 280 #ifdef __NetBSD__ 281 if (map->offset < dev->agp->base || 282 map->offset > dev->agp->base + 283 dev->agp->agp_info.aki_info.ai_aperture_size - 1) { 284 map->offset += dev->agp->base; 285 } 286 #else 287 if (map->offset < dev->agp->base || 288 map->offset > dev->agp->base + 289 dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { 290 map->offset += dev->agp->base; 291 } 292 #endif 293 map->mtrr = dev->agp->agp_mtrr; /* for getmap */ 294 295 /* This assumes the DRM is in total control of AGP space. 296 * It's not always the case as AGP can be in the control 297 * of user space (i.e. i810 driver). So this loop will get 298 * skipped and we double check that dev->agp->memory is 299 * actually set as well as being invalid before EPERM'ing 300 */ 301 list_for_each_entry(entry, &dev->agp->memory, head) { 302 if ((map->offset >= entry->bound) && 303 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { 304 valid = 1; 305 break; 306 } 307 } 308 if (!list_empty(&dev->agp->memory) && !valid) { 309 kfree(map); 310 return -EPERM; 311 } 312 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n", 313 (unsigned long long)map->offset, map->size); 314 315 break; 316 } 317 case _DRM_SCATTER_GATHER: 318 if (!dev->sg) { 319 kfree(map); 320 return -EINVAL; 321 } 322 map->offset += (unsigned long)dev->sg->virtual; 323 break; 324 case _DRM_CONSISTENT: 325 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, 326 * As we're limiting the address to 2^32-1 (or less), 327 * casting it down to 32 bits is no problem, but we 328 * need to point to a 64bit variable first. */ 329 dmah = drm_pci_alloc(dev, map->size, map->size); 330 if (!dmah) { 331 kfree(map); 332 return -ENOMEM; 333 } 334 map->handle = dmah->vaddr; 335 map->offset = (unsigned long)dmah->busaddr; 336 #ifdef __NetBSD__ 337 map->lm_data.dmah = dmah; 338 #else 339 kfree(dmah); 340 #endif 341 break; 342 default: 343 kfree(map); 344 return -EINVAL; 345 } 346 347 list = kzalloc(sizeof(*list), GFP_KERNEL); 348 if (!list) { 349 if (map->type == _DRM_REGISTERS) 350 #ifdef __NetBSD__ 351 drm_legacy_ioremapfree(map, dev); 352 #else 353 iounmap(map->handle); 354 #endif 355 kfree(map); 356 return -EINVAL; 357 } 358 list->map = map; 359 360 mutex_lock(&dev->struct_mutex); 361 list_add(&list->head, &dev->maplist); 362 363 /* Assign a 32-bit handle */ 364 /* We do it here so that dev->struct_mutex protects the increment */ 365 user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : 366 map->offset; 367 ret = drm_map_handle(dev, &list->hash, user_token, 0, 368 (map->type == _DRM_SHM)); 369 if (ret) { 370 if (map->type == _DRM_REGISTERS) 371 #ifdef __NetBSD__ /* XXX What about other map types...? */ 372 drm_legacy_ioremapfree(map, dev); 373 #else 374 iounmap(map->handle); 375 #endif 376 kfree(map); 377 kfree(list); 378 mutex_unlock(&dev->struct_mutex); 379 return ret; 380 } 381 382 list->user_token = list->hash.key << PAGE_SHIFT; 383 mutex_unlock(&dev->struct_mutex); 384 385 if (!(map->flags & _DRM_DRIVER)) 386 list->master = dev->primary->master; 387 *maplist = list; 388 return 0; 389 } 390 391 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset, 392 unsigned int size, enum drm_map_type type, 393 enum drm_map_flags flags, struct drm_local_map **map_ptr) 394 { 395 struct drm_map_list *list; 396 int rc; 397 398 rc = drm_addmap_core(dev, offset, size, type, flags, &list); 399 if (!rc) 400 *map_ptr = list->map; 401 return rc; 402 } 403 EXPORT_SYMBOL(drm_legacy_addmap); 404 405 /** 406 * Ioctl to specify a range of memory that is available for mapping by a 407 * non-root process. 408 * 409 * \param inode device inode. 410 * \param file_priv DRM file private. 411 * \param cmd command. 412 * \param arg pointer to a drm_map structure. 413 * \return zero on success or a negative value on error. 414 * 415 */ 416 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data, 417 struct drm_file *file_priv) 418 { 419 struct drm_map *map = data; 420 struct drm_map_list *maplist; 421 int err; 422 423 if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) 424 return -EPERM; 425 426 err = drm_addmap_core(dev, map->offset, map->size, map->type, 427 map->flags, &maplist); 428 429 if (err) 430 return err; 431 432 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ 433 map->handle = (void *)(unsigned long)maplist->user_token; 434 435 /* 436 * It appears that there are no users of this value whatsoever -- 437 * drmAddMap just discards it. Let's not encourage its use. 438 * (Keeping drm_addmap_core's returned mtrr value would be wrong -- 439 * it's not a real mtrr index anymore.) 440 */ 441 map->mtrr = -1; 442 443 return 0; 444 } 445 446 /** 447 * Remove a map private from list and deallocate resources if the mapping 448 * isn't in use. 449 * 450 * Searches the map on drm_device::maplist, removes it from the list, see if 451 * its being used, and free any associate resource (such as MTRR's) if it's not 452 * being on use. 453 * 454 * \sa drm_legacy_addmap 455 */ 456 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) 457 { 458 struct drm_map_list *r_list = NULL, *list_t; 459 #ifndef __NetBSD__ 460 drm_dma_handle_t dmah; 461 #endif 462 int found = 0; 463 struct drm_master *master; 464 465 /* Find the list entry for the map and remove it */ 466 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { 467 if (r_list->map == map) { 468 master = r_list->master; 469 list_del(&r_list->head); 470 drm_ht_remove_key(&dev->map_hash, 471 r_list->user_token >> PAGE_SHIFT); 472 kfree(r_list); 473 found = 1; 474 break; 475 } 476 } 477 478 if (!found) 479 return -EINVAL; 480 481 switch (map->type) { 482 case _DRM_REGISTERS: 483 #ifdef __NetBSD__ 484 drm_legacy_ioremapfree(map, dev); 485 #else 486 iounmap(map->handle); 487 #endif 488 /* FALLTHROUGH */ 489 case _DRM_FRAME_BUFFER: 490 arch_phys_wc_del(map->mtrr); 491 break; 492 case _DRM_SHM: 493 if (master && (map->flags & _DRM_CONTAINS_LOCK)) { 494 spin_lock(&master->lock.spinlock); 495 /* 496 * If we successfully removed this mapping, 497 * then the mapping must have been there in the 498 * first place, and we must have had a 499 * heavyweight lock, so we assert here instead 500 * of just checking and failing. 501 * 502 * XXX What about the _DRM_CONTAINS_LOCK flag? 503 * Where is that supposed to be set? Is it 504 * equivalent to having a master set? 505 * 506 * XXX There is copypasta of this in 507 * drm_fops.c. 508 */ 509 BUG_ON(master->lock.hw_lock == NULL); 510 if (dev->sigdata.lock == master->lock.hw_lock) 511 dev->sigdata.lock = NULL; 512 master->lock.hw_lock = NULL; /* SHM removed */ 513 master->lock.file_priv = NULL; 514 #ifdef __NetBSD__ 515 DRM_SPIN_WAKEUP_ALL(&master->lock.lock_queue, 516 &master->lock.spinlock); 517 #else 518 wake_up_interruptible_all(&master->lock.lock_queue); 519 #endif 520 spin_unlock(&master->lock.spinlock); 521 } 522 vfree(map->handle); 523 break; 524 case _DRM_AGP: 525 case _DRM_SCATTER_GATHER: 526 break; 527 case _DRM_CONSISTENT: 528 #ifdef __NetBSD__ 529 drm_pci_free(dev, map->lm_data.dmah); 530 #else 531 dmah.vaddr = map->handle; 532 dmah.busaddr = map->offset; 533 dmah.size = map->size; 534 __drm_legacy_pci_free(dev, &dmah); 535 #endif 536 break; 537 } 538 kfree(map); 539 540 return 0; 541 } 542 EXPORT_SYMBOL(drm_legacy_rmmap_locked); 543 544 int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) 545 { 546 int ret; 547 548 mutex_lock(&dev->struct_mutex); 549 ret = drm_legacy_rmmap_locked(dev, map); 550 mutex_unlock(&dev->struct_mutex); 551 552 return ret; 553 } 554 EXPORT_SYMBOL(drm_legacy_rmmap); 555 556 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on 557 * the last close of the device, and this is necessary for cleanup when things 558 * exit uncleanly. Therefore, having userland manually remove mappings seems 559 * like a pointless exercise since they're going away anyway. 560 * 561 * One use case might be after addmap is allowed for normal users for SHM and 562 * gets used by drivers that the server doesn't need to care about. This seems 563 * unlikely. 564 * 565 * \param inode device inode. 566 * \param file_priv DRM file private. 567 * \param cmd command. 568 * \param arg pointer to a struct drm_map structure. 569 * \return zero on success or a negative value on error. 570 */ 571 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, 572 struct drm_file *file_priv) 573 { 574 struct drm_map *request = data; 575 struct drm_local_map *map = NULL; 576 struct drm_map_list *r_list; 577 int ret; 578 579 mutex_lock(&dev->struct_mutex); 580 list_for_each_entry(r_list, &dev->maplist, head) { 581 if (r_list->map && 582 r_list->user_token == (unsigned long)request->handle && 583 r_list->map->flags & _DRM_REMOVABLE) { 584 map = r_list->map; 585 break; 586 } 587 } 588 589 /* List has wrapped around to the head pointer, or its empty we didn't 590 * find anything. 591 */ 592 if (list_empty(&dev->maplist) || !map) { 593 mutex_unlock(&dev->struct_mutex); 594 return -EINVAL; 595 } 596 597 /* Register and framebuffer maps are permanent */ 598 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { 599 mutex_unlock(&dev->struct_mutex); 600 return 0; 601 } 602 603 ret = drm_legacy_rmmap_locked(dev, map); 604 605 mutex_unlock(&dev->struct_mutex); 606 607 return ret; 608 } 609 610 /** 611 * Cleanup after an error on one of the addbufs() functions. 612 * 613 * \param dev DRM device. 614 * \param entry buffer entry where the error occurred. 615 * 616 * Frees any pages and buffers associated with the given entry. 617 */ 618 static void drm_cleanup_buf_error(struct drm_device * dev, 619 struct drm_buf_entry * entry) 620 { 621 int i; 622 623 if (entry->seg_count) { 624 for (i = 0; i < entry->seg_count; i++) { 625 if (entry->seglist[i]) { 626 drm_pci_free(dev, entry->seglist[i]); 627 } 628 } 629 kfree(entry->seglist); 630 631 entry->seg_count = 0; 632 } 633 634 if (entry->buf_count) { 635 for (i = 0; i < entry->buf_count; i++) { 636 kfree(entry->buflist[i].dev_private); 637 } 638 kfree(entry->buflist); 639 640 entry->buf_count = 0; 641 } 642 } 643 644 #if IS_ENABLED(CONFIG_AGP) 645 /** 646 * Add AGP buffers for DMA transfers. 647 * 648 * \param dev struct drm_device to which the buffers are to be added. 649 * \param request pointer to a struct drm_buf_desc describing the request. 650 * \return zero on success or a negative number on failure. 651 * 652 * After some sanity checks creates a drm_buf structure for each buffer and 653 * reallocates the buffer list of the same size order to accommodate the new 654 * buffers. 655 */ 656 int drm_legacy_addbufs_agp(struct drm_device *dev, 657 struct drm_buf_desc *request) 658 { 659 struct drm_device_dma *dma = dev->dma; 660 struct drm_buf_entry *entry; 661 struct drm_agp_mem *agp_entry; 662 struct drm_buf *buf; 663 unsigned long offset; 664 unsigned long agp_offset; 665 int count; 666 int order; 667 int size; 668 int alignment; 669 int page_order; 670 int total; 671 int byte_count; 672 int i, valid; 673 struct drm_buf **temp_buflist; 674 675 if (!dma) 676 return -EINVAL; 677 678 count = request->count; 679 order = order_base_2(request->size); 680 size = 1 << order; 681 682 alignment = (request->flags & _DRM_PAGE_ALIGN) 683 ? PAGE_ALIGN(size) : size; 684 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 685 total = PAGE_SIZE << page_order; 686 687 byte_count = 0; 688 agp_offset = dev->agp->base + request->agp_start; 689 690 DRM_DEBUG("count: %d\n", count); 691 DRM_DEBUG("order: %d\n", order); 692 DRM_DEBUG("size: %d\n", size); 693 DRM_DEBUG("agp_offset: %lx\n", agp_offset); 694 DRM_DEBUG("alignment: %d\n", alignment); 695 DRM_DEBUG("page_order: %d\n", page_order); 696 DRM_DEBUG("total: %d\n", total); 697 698 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 699 return -EINVAL; 700 701 /* Make sure buffers are located in AGP memory that we own */ 702 valid = 0; 703 list_for_each_entry(agp_entry, &dev->agp->memory, head) { 704 if ((agp_offset >= agp_entry->bound) && 705 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { 706 valid = 1; 707 break; 708 } 709 } 710 if (!list_empty(&dev->agp->memory) && !valid) { 711 DRM_DEBUG("zone invalid\n"); 712 return -EINVAL; 713 } 714 spin_lock(&dev->buf_lock); 715 if (dev->buf_use) { 716 spin_unlock(&dev->buf_lock); 717 return -EBUSY; 718 } 719 atomic_inc(&dev->buf_alloc); 720 spin_unlock(&dev->buf_lock); 721 722 mutex_lock(&dev->struct_mutex); 723 entry = &dma->bufs[order]; 724 if (entry->buf_count) { 725 mutex_unlock(&dev->struct_mutex); 726 atomic_dec(&dev->buf_alloc); 727 return -ENOMEM; /* May only call once for each order */ 728 } 729 730 if (count < 0 || count > 4096) { 731 mutex_unlock(&dev->struct_mutex); 732 atomic_dec(&dev->buf_alloc); 733 return -EINVAL; 734 } 735 736 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); 737 if (!entry->buflist) { 738 mutex_unlock(&dev->struct_mutex); 739 atomic_dec(&dev->buf_alloc); 740 return -ENOMEM; 741 } 742 743 entry->buf_size = size; 744 entry->page_order = page_order; 745 746 offset = 0; 747 748 while (entry->buf_count < count) { 749 buf = &entry->buflist[entry->buf_count]; 750 buf->idx = dma->buf_count + entry->buf_count; 751 buf->total = alignment; 752 buf->order = order; 753 buf->used = 0; 754 755 buf->offset = (dma->byte_count + offset); 756 buf->bus_address = agp_offset + offset; 757 buf->address = (void *)(agp_offset + offset); 758 buf->next = NULL; 759 buf->waiting = 0; 760 buf->pending = 0; 761 buf->file_priv = NULL; 762 763 buf->dev_priv_size = dev->driver->dev_priv_size; 764 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 765 if (!buf->dev_private) { 766 /* Set count correctly so we free the proper amount. */ 767 entry->buf_count = count; 768 drm_cleanup_buf_error(dev, entry); 769 mutex_unlock(&dev->struct_mutex); 770 atomic_dec(&dev->buf_alloc); 771 return -ENOMEM; 772 } 773 774 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 775 776 offset += alignment; 777 entry->buf_count++; 778 byte_count += PAGE_SIZE << page_order; 779 } 780 781 DRM_DEBUG("byte_count: %d\n", byte_count); 782 783 temp_buflist = krealloc(dma->buflist, 784 (dma->buf_count + entry->buf_count) * 785 sizeof(*dma->buflist), GFP_KERNEL); 786 if (!temp_buflist) { 787 /* Free the entry because it isn't valid */ 788 drm_cleanup_buf_error(dev, entry); 789 mutex_unlock(&dev->struct_mutex); 790 atomic_dec(&dev->buf_alloc); 791 return -ENOMEM; 792 } 793 dma->buflist = temp_buflist; 794 795 for (i = 0; i < entry->buf_count; i++) { 796 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 797 } 798 799 dma->buf_count += entry->buf_count; 800 dma->seg_count += entry->seg_count; 801 dma->page_count += byte_count >> PAGE_SHIFT; 802 dma->byte_count += byte_count; 803 804 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 805 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 806 807 mutex_unlock(&dev->struct_mutex); 808 809 request->count = entry->buf_count; 810 request->size = size; 811 812 dma->flags = _DRM_DMA_USE_AGP; 813 814 atomic_dec(&dev->buf_alloc); 815 return 0; 816 } 817 EXPORT_SYMBOL(drm_legacy_addbufs_agp); 818 #endif /* CONFIG_AGP */ 819 820 int drm_legacy_addbufs_pci(struct drm_device *dev, 821 struct drm_buf_desc *request) 822 { 823 struct drm_device_dma *dma = dev->dma; 824 int count; 825 int order; 826 int size; 827 int total; 828 int page_order; 829 struct drm_buf_entry *entry; 830 drm_dma_handle_t *dmah; 831 struct drm_buf *buf; 832 int alignment; 833 unsigned long offset; 834 int i; 835 int byte_count; 836 int page_count; 837 unsigned long *temp_pagelist; 838 struct drm_buf **temp_buflist; 839 840 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) 841 return -EINVAL; 842 843 if (!dma) 844 return -EINVAL; 845 846 if (!capable(CAP_SYS_ADMIN)) 847 return -EPERM; 848 849 count = request->count; 850 order = order_base_2(request->size); 851 size = 1 << order; 852 853 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 854 request->count, request->size, size, order); 855 856 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 857 return -EINVAL; 858 859 alignment = (request->flags & _DRM_PAGE_ALIGN) 860 ? PAGE_ALIGN(size) : size; 861 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 862 total = PAGE_SIZE << page_order; 863 864 spin_lock(&dev->buf_lock); 865 if (dev->buf_use) { 866 spin_unlock(&dev->buf_lock); 867 return -EBUSY; 868 } 869 atomic_inc(&dev->buf_alloc); 870 spin_unlock(&dev->buf_lock); 871 872 mutex_lock(&dev->struct_mutex); 873 entry = &dma->bufs[order]; 874 if (entry->buf_count) { 875 mutex_unlock(&dev->struct_mutex); 876 atomic_dec(&dev->buf_alloc); 877 return -ENOMEM; /* May only call once for each order */ 878 } 879 880 if (count < 0 || count > 4096) { 881 mutex_unlock(&dev->struct_mutex); 882 atomic_dec(&dev->buf_alloc); 883 return -EINVAL; 884 } 885 886 entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); 887 if (!entry->buflist) { 888 mutex_unlock(&dev->struct_mutex); 889 atomic_dec(&dev->buf_alloc); 890 return -ENOMEM; 891 } 892 893 entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL); 894 if (!entry->seglist) { 895 kfree(entry->buflist); 896 mutex_unlock(&dev->struct_mutex); 897 atomic_dec(&dev->buf_alloc); 898 return -ENOMEM; 899 } 900 901 /* Keep the original pagelist until we know all the allocations 902 * have succeeded 903 */ 904 temp_pagelist = kmalloc((dma->page_count + (count << page_order)) * 905 sizeof(*dma->pagelist), GFP_KERNEL); 906 if (!temp_pagelist) { 907 kfree(entry->buflist); 908 kfree(entry->seglist); 909 mutex_unlock(&dev->struct_mutex); 910 atomic_dec(&dev->buf_alloc); 911 return -ENOMEM; 912 } 913 memcpy(temp_pagelist, 914 dma->pagelist, dma->page_count * sizeof(*dma->pagelist)); 915 DRM_DEBUG("pagelist: %d entries\n", 916 dma->page_count + (count << page_order)); 917 918 entry->buf_size = size; 919 entry->page_order = page_order; 920 byte_count = 0; 921 page_count = 0; 922 923 while (entry->buf_count < count) { 924 925 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000); 926 927 if (!dmah) { 928 /* Set count correctly so we free the proper amount. */ 929 entry->buf_count = count; 930 entry->seg_count = count; 931 drm_cleanup_buf_error(dev, entry); 932 kfree(temp_pagelist); 933 mutex_unlock(&dev->struct_mutex); 934 atomic_dec(&dev->buf_alloc); 935 return -ENOMEM; 936 } 937 entry->seglist[entry->seg_count++] = dmah; 938 for (i = 0; i < (1 << page_order); i++) { 939 DRM_DEBUG("page %d @ 0x%08lx\n", 940 dma->page_count + page_count, 941 (unsigned long)dmah->vaddr + PAGE_SIZE * i); 942 temp_pagelist[dma->page_count + page_count++] 943 = (unsigned long)dmah->vaddr + PAGE_SIZE * i; 944 } 945 for (offset = 0; 946 offset + size <= total && entry->buf_count < count; 947 offset += alignment, ++entry->buf_count) { 948 buf = &entry->buflist[entry->buf_count]; 949 buf->idx = dma->buf_count + entry->buf_count; 950 buf->total = alignment; 951 buf->order = order; 952 buf->used = 0; 953 buf->offset = (dma->byte_count + byte_count + offset); 954 buf->address = (void *)(dmah->vaddr + offset); 955 buf->bus_address = dmah->busaddr + offset; 956 buf->next = NULL; 957 buf->waiting = 0; 958 buf->pending = 0; 959 buf->file_priv = NULL; 960 961 buf->dev_priv_size = dev->driver->dev_priv_size; 962 buf->dev_private = kzalloc(buf->dev_priv_size, 963 GFP_KERNEL); 964 if (!buf->dev_private) { 965 /* Set count correctly so we free the proper amount. */ 966 entry->buf_count = count; 967 entry->seg_count = count; 968 drm_cleanup_buf_error(dev, entry); 969 kfree(temp_pagelist); 970 mutex_unlock(&dev->struct_mutex); 971 atomic_dec(&dev->buf_alloc); 972 return -ENOMEM; 973 } 974 975 DRM_DEBUG("buffer %d @ %p\n", 976 entry->buf_count, buf->address); 977 } 978 byte_count += PAGE_SIZE << page_order; 979 } 980 981 temp_buflist = krealloc(dma->buflist, 982 (dma->buf_count + entry->buf_count) * 983 sizeof(*dma->buflist), GFP_KERNEL); 984 if (!temp_buflist) { 985 /* Free the entry because it isn't valid */ 986 drm_cleanup_buf_error(dev, entry); 987 kfree(temp_pagelist); 988 mutex_unlock(&dev->struct_mutex); 989 atomic_dec(&dev->buf_alloc); 990 return -ENOMEM; 991 } 992 dma->buflist = temp_buflist; 993 994 for (i = 0; i < entry->buf_count; i++) { 995 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 996 } 997 998 /* No allocations failed, so now we can replace the original pagelist 999 * with the new one. 1000 */ 1001 if (dma->page_count) { 1002 kfree(dma->pagelist); 1003 } 1004 dma->pagelist = temp_pagelist; 1005 1006 dma->buf_count += entry->buf_count; 1007 dma->seg_count += entry->seg_count; 1008 dma->page_count += entry->seg_count << page_order; 1009 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 1010 1011 mutex_unlock(&dev->struct_mutex); 1012 1013 request->count = entry->buf_count; 1014 request->size = size; 1015 1016 if (request->flags & _DRM_PCI_BUFFER_RO) 1017 dma->flags = _DRM_DMA_USE_PCI_RO; 1018 1019 atomic_dec(&dev->buf_alloc); 1020 return 0; 1021 1022 } 1023 EXPORT_SYMBOL(drm_legacy_addbufs_pci); 1024 1025 static int drm_legacy_addbufs_sg(struct drm_device *dev, 1026 struct drm_buf_desc *request) 1027 { 1028 struct drm_device_dma *dma = dev->dma; 1029 struct drm_buf_entry *entry; 1030 struct drm_buf *buf; 1031 unsigned long offset; 1032 unsigned long agp_offset; 1033 int count; 1034 int order; 1035 int size; 1036 int alignment; 1037 int page_order; 1038 int total; 1039 int byte_count; 1040 int i; 1041 struct drm_buf **temp_buflist; 1042 1043 if (!drm_core_check_feature(dev, DRIVER_SG)) 1044 return -EINVAL; 1045 1046 if (!dma) 1047 return -EINVAL; 1048 1049 if (!capable(CAP_SYS_ADMIN)) 1050 return -EPERM; 1051 1052 count = request->count; 1053 order = order_base_2(request->size); 1054 size = 1 << order; 1055 1056 alignment = (request->flags & _DRM_PAGE_ALIGN) 1057 ? PAGE_ALIGN(size) : size; 1058 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 1059 total = PAGE_SIZE << page_order; 1060 1061 byte_count = 0; 1062 agp_offset = request->agp_start; 1063 1064 DRM_DEBUG("count: %d\n", count); 1065 DRM_DEBUG("order: %d\n", order); 1066 DRM_DEBUG("size: %d\n", size); 1067 DRM_DEBUG("agp_offset: %lu\n", agp_offset); 1068 DRM_DEBUG("alignment: %d\n", alignment); 1069 DRM_DEBUG("page_order: %d\n", page_order); 1070 DRM_DEBUG("total: %d\n", total); 1071 1072 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1073 return -EINVAL; 1074 1075 spin_lock(&dev->buf_lock); 1076 if (dev->buf_use) { 1077 spin_unlock(&dev->buf_lock); 1078 return -EBUSY; 1079 } 1080 atomic_inc(&dev->buf_alloc); 1081 spin_unlock(&dev->buf_lock); 1082 1083 mutex_lock(&dev->struct_mutex); 1084 entry = &dma->bufs[order]; 1085 if (entry->buf_count) { 1086 mutex_unlock(&dev->struct_mutex); 1087 atomic_dec(&dev->buf_alloc); 1088 return -ENOMEM; /* May only call once for each order */ 1089 } 1090 1091 if (count < 0 || count > 4096) { 1092 mutex_unlock(&dev->struct_mutex); 1093 atomic_dec(&dev->buf_alloc); 1094 return -EINVAL; 1095 } 1096 1097 entry->buflist = kzalloc(count * sizeof(*entry->buflist), 1098 GFP_KERNEL); 1099 if (!entry->buflist) { 1100 mutex_unlock(&dev->struct_mutex); 1101 atomic_dec(&dev->buf_alloc); 1102 return -ENOMEM; 1103 } 1104 1105 entry->buf_size = size; 1106 entry->page_order = page_order; 1107 1108 offset = 0; 1109 1110 while (entry->buf_count < count) { 1111 buf = &entry->buflist[entry->buf_count]; 1112 buf->idx = dma->buf_count + entry->buf_count; 1113 buf->total = alignment; 1114 buf->order = order; 1115 buf->used = 0; 1116 1117 buf->offset = (dma->byte_count + offset); 1118 buf->bus_address = agp_offset + offset; 1119 buf->address = (void *)(agp_offset + offset 1120 + (unsigned long)dev->sg->virtual); 1121 buf->next = NULL; 1122 buf->waiting = 0; 1123 buf->pending = 0; 1124 buf->file_priv = NULL; 1125 1126 buf->dev_priv_size = dev->driver->dev_priv_size; 1127 buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL); 1128 if (!buf->dev_private) { 1129 /* Set count correctly so we free the proper amount. */ 1130 entry->buf_count = count; 1131 drm_cleanup_buf_error(dev, entry); 1132 mutex_unlock(&dev->struct_mutex); 1133 atomic_dec(&dev->buf_alloc); 1134 return -ENOMEM; 1135 } 1136 1137 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address); 1138 1139 offset += alignment; 1140 entry->buf_count++; 1141 byte_count += PAGE_SIZE << page_order; 1142 } 1143 1144 DRM_DEBUG("byte_count: %d\n", byte_count); 1145 1146 temp_buflist = krealloc(dma->buflist, 1147 (dma->buf_count + entry->buf_count) * 1148 sizeof(*dma->buflist), GFP_KERNEL); 1149 if (!temp_buflist) { 1150 /* Free the entry because it isn't valid */ 1151 drm_cleanup_buf_error(dev, entry); 1152 mutex_unlock(&dev->struct_mutex); 1153 atomic_dec(&dev->buf_alloc); 1154 return -ENOMEM; 1155 } 1156 dma->buflist = temp_buflist; 1157 1158 for (i = 0; i < entry->buf_count; i++) { 1159 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 1160 } 1161 1162 dma->buf_count += entry->buf_count; 1163 dma->seg_count += entry->seg_count; 1164 dma->page_count += byte_count >> PAGE_SHIFT; 1165 dma->byte_count += byte_count; 1166 1167 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 1168 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 1169 1170 mutex_unlock(&dev->struct_mutex); 1171 1172 request->count = entry->buf_count; 1173 request->size = size; 1174 1175 dma->flags = _DRM_DMA_USE_SG; 1176 1177 atomic_dec(&dev->buf_alloc); 1178 return 0; 1179 } 1180 1181 /** 1182 * Add buffers for DMA transfers (ioctl). 1183 * 1184 * \param inode device inode. 1185 * \param file_priv DRM file private. 1186 * \param cmd command. 1187 * \param arg pointer to a struct drm_buf_desc request. 1188 * \return zero on success or a negative number on failure. 1189 * 1190 * According with the memory type specified in drm_buf_desc::flags and the 1191 * build options, it dispatches the call either to addbufs_agp(), 1192 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent 1193 * PCI memory respectively. 1194 */ 1195 int drm_legacy_addbufs(struct drm_device *dev, void *data, 1196 struct drm_file *file_priv) 1197 { 1198 struct drm_buf_desc *request = data; 1199 int ret; 1200 1201 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1202 return -EINVAL; 1203 1204 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1205 return -EINVAL; 1206 1207 #if IS_ENABLED(CONFIG_AGP) 1208 if (request->flags & _DRM_AGP_BUFFER) 1209 ret = drm_legacy_addbufs_agp(dev, request); 1210 else 1211 #endif 1212 if (request->flags & _DRM_SG_BUFFER) 1213 ret = drm_legacy_addbufs_sg(dev, request); 1214 else if (request->flags & _DRM_FB_BUFFER) 1215 ret = -EINVAL; 1216 else 1217 ret = drm_legacy_addbufs_pci(dev, request); 1218 1219 return ret; 1220 } 1221 1222 /** 1223 * Get information about the buffer mappings. 1224 * 1225 * This was originally mean for debugging purposes, or by a sophisticated 1226 * client library to determine how best to use the available buffers (e.g., 1227 * large buffers can be used for image transfer). 1228 * 1229 * \param inode device inode. 1230 * \param file_priv DRM file private. 1231 * \param cmd command. 1232 * \param arg pointer to a drm_buf_info structure. 1233 * \return zero on success or a negative number on failure. 1234 * 1235 * Increments drm_device::buf_use while holding the drm_device::buf_lock 1236 * lock, preventing of allocating more buffers after this call. Information 1237 * about each requested buffer is then copied into user space. 1238 */ 1239 int drm_legacy_infobufs(struct drm_device *dev, void *data, 1240 struct drm_file *file_priv) 1241 { 1242 struct drm_device_dma *dma = dev->dma; 1243 struct drm_buf_info *request = data; 1244 int i; 1245 int count; 1246 1247 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1248 return -EINVAL; 1249 1250 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1251 return -EINVAL; 1252 1253 if (!dma) 1254 return -EINVAL; 1255 1256 spin_lock(&dev->buf_lock); 1257 if (atomic_read(&dev->buf_alloc)) { 1258 spin_unlock(&dev->buf_lock); 1259 return -EBUSY; 1260 } 1261 ++dev->buf_use; /* Can't allocate more after this call */ 1262 spin_unlock(&dev->buf_lock); 1263 1264 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1265 if (dma->bufs[i].buf_count) 1266 ++count; 1267 } 1268 1269 DRM_DEBUG("count = %d\n", count); 1270 1271 if (request->count >= count) { 1272 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 1273 if (dma->bufs[i].buf_count) { 1274 struct drm_buf_desc __user *to = 1275 &request->list[count]; 1276 struct drm_buf_entry *from = &dma->bufs[i]; 1277 if (copy_to_user(&to->count, 1278 &from->buf_count, 1279 sizeof(from->buf_count)) || 1280 copy_to_user(&to->size, 1281 &from->buf_size, 1282 sizeof(from->buf_size)) || 1283 copy_to_user(&to->low_mark, 1284 &from->low_mark, 1285 sizeof(from->low_mark)) || 1286 copy_to_user(&to->high_mark, 1287 &from->high_mark, 1288 sizeof(from->high_mark))) 1289 return -EFAULT; 1290 1291 DRM_DEBUG("%d %d %d %d %d\n", 1292 i, 1293 dma->bufs[i].buf_count, 1294 dma->bufs[i].buf_size, 1295 dma->bufs[i].low_mark, 1296 dma->bufs[i].high_mark); 1297 ++count; 1298 } 1299 } 1300 } 1301 request->count = count; 1302 1303 return 0; 1304 } 1305 1306 /** 1307 * Specifies a low and high water mark for buffer allocation 1308 * 1309 * \param inode device inode. 1310 * \param file_priv DRM file private. 1311 * \param cmd command. 1312 * \param arg a pointer to a drm_buf_desc structure. 1313 * \return zero on success or a negative number on failure. 1314 * 1315 * Verifies that the size order is bounded between the admissible orders and 1316 * updates the respective drm_device_dma::bufs entry low and high water mark. 1317 * 1318 * \note This ioctl is deprecated and mostly never used. 1319 */ 1320 int drm_legacy_markbufs(struct drm_device *dev, void *data, 1321 struct drm_file *file_priv) 1322 { 1323 struct drm_device_dma *dma = dev->dma; 1324 struct drm_buf_desc *request = data; 1325 int order; 1326 struct drm_buf_entry *entry; 1327 1328 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1329 return -EINVAL; 1330 1331 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1332 return -EINVAL; 1333 1334 if (!dma) 1335 return -EINVAL; 1336 1337 DRM_DEBUG("%d, %d, %d\n", 1338 request->size, request->low_mark, request->high_mark); 1339 order = order_base_2(request->size); 1340 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 1341 return -EINVAL; 1342 entry = &dma->bufs[order]; 1343 1344 if (request->low_mark < 0 || request->low_mark > entry->buf_count) 1345 return -EINVAL; 1346 if (request->high_mark < 0 || request->high_mark > entry->buf_count) 1347 return -EINVAL; 1348 1349 entry->low_mark = request->low_mark; 1350 entry->high_mark = request->high_mark; 1351 1352 return 0; 1353 } 1354 1355 /** 1356 * Unreserve the buffers in list, previously reserved using drmDMA. 1357 * 1358 * \param inode device inode. 1359 * \param file_priv DRM file private. 1360 * \param cmd command. 1361 * \param arg pointer to a drm_buf_free structure. 1362 * \return zero on success or a negative number on failure. 1363 * 1364 * Calls free_buffer() for each used buffer. 1365 * This function is primarily used for debugging. 1366 */ 1367 int drm_legacy_freebufs(struct drm_device *dev, void *data, 1368 struct drm_file *file_priv) 1369 { 1370 struct drm_device_dma *dma = dev->dma; 1371 struct drm_buf_free *request = data; 1372 int i; 1373 int idx; 1374 struct drm_buf *buf; 1375 1376 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1377 return -EINVAL; 1378 1379 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1380 return -EINVAL; 1381 1382 if (!dma) 1383 return -EINVAL; 1384 1385 DRM_DEBUG("%d\n", request->count); 1386 for (i = 0; i < request->count; i++) { 1387 if (copy_from_user(&idx, &request->list[i], sizeof(idx))) 1388 return -EFAULT; 1389 if (idx < 0 || idx >= dma->buf_count) { 1390 DRM_ERROR("Index %d (of %d max)\n", 1391 idx, dma->buf_count - 1); 1392 return -EINVAL; 1393 } 1394 buf = dma->buflist[idx]; 1395 if (buf->file_priv != file_priv) { 1396 DRM_ERROR("Process %d freeing buffer not owned\n", 1397 task_pid_nr(current)); 1398 return -EINVAL; 1399 } 1400 drm_legacy_free_buffer(dev, buf); 1401 } 1402 1403 return 0; 1404 } 1405 1406 /** 1407 * Maps all of the DMA buffers into client-virtual space (ioctl). 1408 * 1409 * \param inode device inode. 1410 * \param file_priv DRM file private. 1411 * \param cmd command. 1412 * \param arg pointer to a drm_buf_map structure. 1413 * \return zero on success or a negative number on failure. 1414 * 1415 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information 1416 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with 1417 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls 1418 * drm_mmap_dma(). 1419 */ 1420 int drm_legacy_mapbufs(struct drm_device *dev, void *data, 1421 struct drm_file *file_priv) 1422 { 1423 struct drm_device_dma *dma = dev->dma; 1424 int retcode = 0; 1425 const int zero = 0; 1426 unsigned long virtual; 1427 unsigned long address; 1428 struct drm_buf_map *request = data; 1429 int i; 1430 1431 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1432 return -EINVAL; 1433 1434 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 1435 return -EINVAL; 1436 1437 if (!dma) 1438 return -EINVAL; 1439 1440 spin_lock(&dev->buf_lock); 1441 if (atomic_read(&dev->buf_alloc)) { 1442 spin_unlock(&dev->buf_lock); 1443 return -EBUSY; 1444 } 1445 dev->buf_use++; /* Can't allocate more after this call */ 1446 spin_unlock(&dev->buf_lock); 1447 1448 if (request->count >= dma->buf_count) { 1449 if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) 1450 || (drm_core_check_feature(dev, DRIVER_SG) 1451 && (dma->flags & _DRM_DMA_USE_SG))) { 1452 struct drm_local_map *map = dev->agp_buffer_map; 1453 unsigned long token = dev->agp_buffer_token; 1454 1455 if (!map) { 1456 retcode = -EINVAL; 1457 goto done; 1458 } 1459 virtual = vm_mmap(file_priv->filp, 0, map->size, 1460 PROT_READ | PROT_WRITE, 1461 MAP_SHARED, 1462 token); 1463 } else { 1464 virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, 1465 PROT_READ | PROT_WRITE, 1466 MAP_SHARED, 0); 1467 } 1468 if (virtual > -1024UL) { 1469 /* Real error */ 1470 retcode = (signed long)virtual; 1471 goto done; 1472 } 1473 request->virtual = (void __user *)virtual; 1474 1475 for (i = 0; i < dma->buf_count; i++) { 1476 if (copy_to_user(&request->list[i].idx, 1477 &dma->buflist[i]->idx, 1478 sizeof(request->list[0].idx))) { 1479 retcode = -EFAULT; 1480 goto done; 1481 } 1482 if (copy_to_user(&request->list[i].total, 1483 &dma->buflist[i]->total, 1484 sizeof(request->list[0].total))) { 1485 retcode = -EFAULT; 1486 goto done; 1487 } 1488 if (copy_to_user(&request->list[i].used, 1489 &zero, sizeof(zero))) { 1490 retcode = -EFAULT; 1491 goto done; 1492 } 1493 address = virtual + dma->buflist[i]->offset; /* *** */ 1494 if (copy_to_user(&request->list[i].address, 1495 &address, sizeof(address))) { 1496 retcode = -EFAULT; 1497 goto done; 1498 } 1499 } 1500 } 1501 done: 1502 request->count = dma->buf_count; 1503 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); 1504 1505 return retcode; 1506 } 1507 1508 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data, 1509 struct drm_file *file_priv) 1510 { 1511 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1512 return -EINVAL; 1513 1514 if (dev->driver->dma_ioctl) 1515 return dev->driver->dma_ioctl(dev, data, file_priv); 1516 else 1517 return -EINVAL; 1518 } 1519 1520 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) 1521 { 1522 struct drm_map_list *entry; 1523 1524 list_for_each_entry(entry, &dev->maplist, head) { 1525 if (entry->map && entry->map->type == _DRM_SHM && 1526 (entry->map->flags & _DRM_CONTAINS_LOCK)) { 1527 return entry->map; 1528 } 1529 } 1530 return NULL; 1531 } 1532 EXPORT_SYMBOL(drm_legacy_getsarea); 1533