xref: /dflybsd-src/sys/dev/drm/drm_bufs.c (revision c98db40744766ab0803912f29557df02814bcd9d)
1 /*
2  * Legacy: Generic DRM Buffer Management
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
9  * Author: Gareth Hughes <gareth@valinux.com>
10  *
11  * Permission is hereby granted, free of charge, to any person obtaining a
12  * copy of this software and associated documentation files (the "Software"),
13  * to deal in the Software without restriction, including without limitation
14  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15  * and/or sell copies of the Software, and to permit persons to whom the
16  * Software is furnished to do so, subject to the following conditions:
17  *
18  * The above copyright notice and this permission notice (including the next
19  * paragraph) shall be included in all copies or substantial portions of the
20  * Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
25  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28  * OTHER DEALINGS IN THE SOFTWARE.
29  */
30 
31 #include <linux/vmalloc.h>
32 #include <linux/log2.h>
33 #include <linux/export.h>
34 #include <asm/shmparam.h>
35 #include <drm/drmP.h>
36 #include "drm_legacy.h"
37 
38 #if 0
39 static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
40 						  struct drm_local_map *map)
41 {
42 	struct drm_map_list *entry;
43 	list_for_each_entry(entry, &dev->maplist, head) {
44 		/*
45 		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
46 		 * while PCI resources may live above that, we only compare the
47 		 * lower 32 bits of the map offset for maps of type
48 		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
49 		 * It is assumed that if a driver have more than one resource
50 		 * of each type, the lower 32 bits are different.
51 		 */
52 		if (!entry->map ||
53 		    map->type != entry->map->type ||
54 		    entry->master != dev->primary->master)
55 			continue;
56 		switch (map->type) {
57 		case _DRM_SHM:
58 			if (map->flags != _DRM_CONTAINS_LOCK)
59 				break;
60 			return entry;
61 		case _DRM_REGISTERS:
62 		case _DRM_FRAME_BUFFER:
63 			if ((entry->map->offset & 0xffffffff) ==
64 			    (map->offset & 0xffffffff))
65 				return entry;
66 		default: /* Make gcc happy */
67 			;
68 		}
69 		if (entry->map->offset == map->offset)
70 			return entry;
71 	}
72 
73 	return NULL;
74 }
75 
76 static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
77 			  unsigned long user_token, int hashed_handle, int shm)
78 {
79 }
80 #endif
81 
82 /**
83  * Core function to create a range of memory available for mapping by a
84  * non-root process.
85  *
86  * Adjusts the memory offset to its absolute value according to the mapping
87  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
88  * applicable and if supported by the kernel.
89  */
90 static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
91 			   unsigned int size, enum drm_map_type type,
92 			   enum drm_map_flags flags,
93 			   struct drm_map_list ** maplist)
94 {
95 	struct drm_local_map *map;
96 	struct drm_map_list *list = NULL;
97 	drm_dma_handle_t *dmah;
98 
99 	/* Allocate a new map structure, fill it in, and do any type-specific
100 	 * initialization necessary.
101 	 */
102 	map = kmalloc(sizeof(*map), M_DRM, M_ZERO | M_WAITOK | M_NULLOK);
103 	if (!map)
104 		return -ENOMEM;
105 
106 	map->offset = offset;
107 	map->size = size;
108 	map->flags = flags;
109 	map->type = type;
110 
111 	/* Only allow shared memory to be removable since we only keep enough
112 	 * book keeping information about shared memory to allow for removal
113 	 * when processes fork.
114 	 */
115 	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
116 		kfree(map);
117 		return -EINVAL;
118 	}
119 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
120 		DRM_ERROR("offset/size not page aligned: 0x%jx/0x%04x\n",
121 		    (uintmax_t)offset, size);
122 		kfree(map);
123 		return -EINVAL;
124 	}
125 	if (offset + size < offset) {
126 		DRM_ERROR("offset and size wrap around: 0x%jx/0x%04x\n",
127 		    (uintmax_t)offset, size);
128 		kfree(map);
129 		return -EINVAL;
130 	}
131 
132 	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
133 		  (unsigned long long)map->offset, map->size, map->type);
134 
135 	/* Check if this is just another version of a kernel-allocated map, and
136 	 * just hand that back if so.
137 	 */
138 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
139 	    type == _DRM_SHM) {
140 		list_for_each_entry(list, &dev->maplist, head) {
141 			if (list->map->type == type && (list->map->offset == offset ||
142 			    (list->map->type == _DRM_SHM &&
143 			    list->map->flags == _DRM_CONTAINS_LOCK))) {
144 				list->map->size = size;
145 				DRM_DEBUG("Found kernel map %d\n", type);
146 				goto done;
147 			}
148 		}
149 	}
150 	map->mtrr = -1;
151 	map->handle = NULL;
152 
153 	switch (map->type) {
154 	case _DRM_REGISTERS:
155 	case _DRM_FRAME_BUFFER:
156 
157 		if (map->type == _DRM_FRAME_BUFFER ||
158 		    (map->flags & _DRM_WRITE_COMBINING)) {
159 			map->mtrr =
160 				arch_phys_wc_add(map->offset, map->size);
161 		}
162 		if (map->type == _DRM_REGISTERS) {
163 			if (map->flags & _DRM_WRITE_COMBINING)
164 				map->handle = ioremap_wc(map->offset,
165 							 map->size);
166 			else
167 				map->handle = ioremap(map->offset, map->size);
168 			if (!map->handle) {
169 				kfree(map);
170 				return -ENOMEM;
171 			}
172 		}
173 
174 		break;
175 	case _DRM_SHM:
176 		map->handle = vmalloc_user(map->size);
177 		DRM_DEBUG("%lu %d %p\n",
178 			  map->size, order_base_2(map->size), map->handle);
179 		if (!map->handle) {
180 			kfree(map);
181 			return -ENOMEM;
182 		}
183 		map->offset = (unsigned long)map->handle;
184 		if (map->flags & _DRM_CONTAINS_LOCK) {
185 			/* Prevent a 2nd X Server from creating a 2nd lock */
186 			if (dev->lock.hw_lock != NULL) {
187 				vfree(map->handle);
188 				kfree(map);
189 				return -EBUSY;
190 			}
191 			dev->lock.hw_lock = map->handle; /* Pointer to lock */
192 		}
193 		break;
194 	case _DRM_AGP: {
195 
196 		if (!dev->agp) {
197 			kfree(map);
198 			return -EINVAL;
199 		}
200 		/*valid = 0;*/
201 		/* In some cases (i810 driver), user space may have already
202 		 * added the AGP base itself, because dev->agp->base previously
203 		 * only got set during AGP enable.  So, only add the base
204 		 * address if the map's offset isn't already within the
205 		 * aperture.
206 		 */
207 		if (map->offset < dev->agp->base ||
208 		    map->offset > dev->agp->base +
209 		    dev->agp->agp_info.ai_aperture_size - 1) {
210 			map->offset += dev->agp->base;
211 		}
212 		map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
213 		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
214 			if ((map->offset >= entry->bound) &&
215 			    (map->offset + map->size <=
216 			    entry->bound + entry->pages * PAGE_SIZE)) {
217 				valid = 1;
218 				break;
219 			}
220 		}
221 		if (!valid) {
222 			kfree(map);
223 			return -EACCES;
224 		}*/
225 		break;
226 	}
227 	case _DRM_SCATTER_GATHER:
228 		if (!dev->sg) {
229 			kfree(map);
230 			return -EINVAL;
231 		}
232 		map->handle = (void *)(uintptr_t)(dev->sg->vaddr + offset);
233 		map->offset = dev->sg->vaddr + offset;
234 		break;
235 	case _DRM_CONSISTENT:
236 		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
237 		 * As we're limiting the address to 2^32-1 (or less),
238 		 * casting it down to 32 bits is no problem, but we
239 		 * need to point to a 64bit variable first. */
240 		dmah = drm_pci_alloc(dev, map->size, map->size);
241 		if (!dmah) {
242 			kfree(map);
243 			return -ENOMEM;
244 		}
245 		map->handle = dmah->vaddr;
246 		map->offset = (unsigned long)dmah->busaddr;
247 		kfree(dmah);
248 		break;
249 	default:
250 		DRM_ERROR("Bad map type %d\n", map->type);
251 		kfree(map);
252 		return -EINVAL;
253 	}
254 
255 	list = kzalloc(sizeof(*list), GFP_KERNEL);
256 	if (!list) {
257 		if (map->type == _DRM_REGISTERS)
258 			iounmap(map->handle);
259 		kfree(map);
260 		return -EINVAL;
261 	}
262 	list->map = map;
263 
264 	mutex_lock(&dev->struct_mutex);
265 	list_add(&list->head, &dev->maplist);
266 	mutex_unlock(&dev->struct_mutex);
267 
268 done:
269 	/* Jumped to, with lock held, when a kernel map is found. */
270 
271 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
272 	    map->size);
273 
274 	*maplist = list;
275 
276 	return 0;
277 }
278 
279 int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
280 		      unsigned int size, enum drm_map_type type,
281 		      enum drm_map_flags flags, struct drm_local_map **map_ptr)
282 {
283 	struct drm_map_list *list;
284 	int rc;
285 
286 	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
287 	if (!rc)
288 		*map_ptr = list->map;
289 	return rc;
290 }
291 EXPORT_SYMBOL(drm_legacy_addmap);
292 
293 /**
294  * Ioctl to specify a range of memory that is available for mapping by a
295  * non-root process.
296  *
297  * \param inode device inode.
298  * \param file_priv DRM file private.
299  * \param cmd command.
300  * \param arg pointer to a drm_map structure.
301  * \return zero on success or a negative value on error.
302  *
303  */
304 int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
305 			    struct drm_file *file_priv)
306 {
307 	struct drm_map *request = data;
308 	drm_local_map_t *map;
309 	int err;
310 
311 	if (!(dev->flags & (FREAD|FWRITE)))
312 		return -EACCES; /* Require read/write */
313 
314 	if (!capable(CAP_SYS_ADMIN) && request->type != _DRM_AGP)
315 		return -EACCES;
316 
317 	DRM_LOCK(dev);
318 	err = drm_legacy_addmap(dev, request->offset, request->size, request->type,
319 	    request->flags, &map);
320 	DRM_UNLOCK(dev);
321 	if (err != 0)
322 		return err;
323 
324 	request->offset = map->offset;
325 	request->size = map->size;
326 	request->type = map->type;
327 	request->flags = map->flags;
328 	request->mtrr   = map->mtrr;
329 	request->handle = (void *)map->handle;
330 
331 	return 0;
332 }
333 
334 /*
335  * Get a mapping information.
336  *
337  * \param inode device inode.
338  * \param file_priv DRM file private.
339  * \param cmd command.
340  * \param arg user argument, pointing to a drm_map structure.
341  *
342  * \return zero on success or a negative number on failure.
343  *
344  * Searches for the mapping with the specified offset and copies its information
345  * into userspace
346  */
347 int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
348 			    struct drm_file *file_priv)
349 {
350 	struct drm_map *map = data;
351 	struct drm_map_list *r_list = NULL;
352 	struct list_head *list;
353 	int idx;
354 	int i;
355 
356 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
357 	    drm_core_check_feature(dev, DRIVER_MODESET))
358 		return -EINVAL;
359 
360 	idx = map->offset;
361 	if (idx < 0)
362 		return -EINVAL;
363 
364 	i = 0;
365 	mutex_lock(&dev->struct_mutex);
366 	list_for_each(list, &dev->maplist) {
367 		if (i == idx) {
368 			r_list = list_entry(list, struct drm_map_list, head);
369 			break;
370 		}
371 		i++;
372 	}
373 	if (!r_list || !r_list->map) {
374 		mutex_unlock(&dev->struct_mutex);
375 		return -EINVAL;
376 	}
377 
378 	map->offset = r_list->map->offset;
379 	map->size = r_list->map->size;
380 	map->type = r_list->map->type;
381 	map->flags = r_list->map->flags;
382 	map->handle = (void *)(unsigned long) r_list->user_token;
383 	map->mtrr = r_list->map->mtrr;
384 
385 	mutex_unlock(&dev->struct_mutex);
386 
387 	return 0;
388 }
389 
390 /**
391  * Remove a map private from list and deallocate resources if the mapping
392  * isn't in use.
393  *
394  * Searches the map on drm_device::maplist, removes it from the list, see if
395  * its being used, and free any associate resource (such as MTRR's) if it's not
396  * being on use.
397  *
398  * \sa drm_legacy_addmap
399  */
400 int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
401 {
402 	struct drm_map_list *r_list = NULL, *list_t;
403 	drm_dma_handle_t dmah;
404 	int found = 0;
405 
406 	/* Find the list entry for the map and remove it */
407 	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
408 		if (r_list->map == map) {
409 			list_del(&r_list->head);
410 			kfree(r_list);
411 			found = 1;
412 			break;
413 		}
414 	}
415 
416 	if (!found)
417 		return -EINVAL;
418 
419 	switch (map->type) {
420 	case _DRM_REGISTERS:
421 		drm_legacy_ioremapfree(map, dev);
422 		/* FALLTHROUGH */
423 	case _DRM_FRAME_BUFFER:
424 		arch_phys_wc_del(map->mtrr);
425 		break;
426 	case _DRM_SHM:
427 		kfree(map->handle);
428 		break;
429 	case _DRM_AGP:
430 	case _DRM_SCATTER_GATHER:
431 		break;
432 	case _DRM_CONSISTENT:
433 		dmah.vaddr = map->handle;
434 		dmah.busaddr = map->offset;
435 		dmah.size = map->size;
436 		__drm_legacy_pci_free(dev, &dmah);
437 		break;
438 	}
439 	kfree(map);
440 
441 	return 0;
442 }
443 
444 int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
445 {
446 	int ret;
447 
448 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
449 	    drm_core_check_feature(dev, DRIVER_MODESET))
450 		return -EINVAL;
451 
452 	mutex_lock(&dev->struct_mutex);
453 	ret = drm_legacy_rmmap_locked(dev, map);
454 	mutex_unlock(&dev->struct_mutex);
455 
456 	return ret;
457 }
458 EXPORT_SYMBOL(drm_legacy_rmmap);
459 
460 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
461  * the last close of the device, and this is necessary for cleanup when things
462  * exit uncleanly.  Therefore, having userland manually remove mappings seems
463  * like a pointless exercise since they're going away anyway.
464  *
465  * One use case might be after addmap is allowed for normal users for SHM and
466  * gets used by drivers that the server doesn't need to care about.  This seems
467  * unlikely.
468  *
469  * \param inode device inode.
470  * \param file_priv DRM file private.
471  * \param cmd command.
472  * \param arg pointer to a struct drm_map structure.
473  * \return zero on success or a negative value on error.
474  */
475 int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
476 			   struct drm_file *file_priv)
477 {
478 	struct drm_map *request = data;
479 	struct drm_local_map *map = NULL;
480 	struct drm_map_list *r_list;
481 	int ret;
482 
483 	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
484 	    drm_core_check_feature(dev, DRIVER_MODESET))
485 		return -EINVAL;
486 
487 	mutex_lock(&dev->struct_mutex);
488 	list_for_each_entry(r_list, &dev->maplist, head) {
489 		if (r_list->map &&
490 		    r_list->user_token == (unsigned long)request->handle &&
491 		    r_list->map->flags & _DRM_REMOVABLE) {
492 			map = r_list->map;
493 			break;
494 		}
495 	}
496 
497 	/* List has wrapped around to the head pointer, or its empty we didn't
498 	 * find anything.
499 	 */
500 	if (list_empty(&dev->maplist) || !map) {
501 		mutex_unlock(&dev->struct_mutex);
502 		return -EINVAL;
503 	}
504 
505 	/* Register and framebuffer maps are permanent */
506 	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
507 		mutex_unlock(&dev->struct_mutex);
508 		return 0;
509 	}
510 
511 	ret = drm_legacy_rmmap_locked(dev, map);
512 
513 	mutex_unlock(&dev->struct_mutex);
514 
515 	return ret;
516 }
517 
518 /**
519  * Cleanup after an error on one of the addbufs() functions.
520  *
521  * \param dev DRM device.
522  * \param entry buffer entry where the error occurred.
523  *
524  * Frees any pages and buffers associated with the given entry.
525  */
526 static void drm_cleanup_buf_error(struct drm_device * dev,
527 				  struct drm_buf_entry * entry)
528 {
529 	int i;
530 
531 	if (entry->seg_count) {
532 		for (i = 0; i < entry->seg_count; i++) {
533 			drm_pci_free(dev, entry->seglist[i]);
534 		}
535 		kfree(entry->seglist);
536 
537 		entry->seg_count = 0;
538 	}
539 
540 	if (entry->buf_count) {
541 		for (i = 0; i < entry->buf_count; i++) {
542 			kfree(entry->buflist[i].dev_private);
543 		}
544 		kfree(entry->buflist);
545 
546 		entry->buf_count = 0;
547 	}
548 }
549 
550 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
551 {
552 	struct drm_device_dma *dma = dev->dma;
553 	struct drm_buf_entry *entry;
554 	/* struct drm_agp_mem *agp_entry; */
555 	/* int valid */
556 	struct drm_buf *buf;
557 	unsigned long offset;
558 	unsigned long agp_offset;
559 	int count;
560 	int order;
561 	int size;
562 	int alignment;
563 	int page_order;
564 	int total;
565 	int byte_count;
566 	int i;
567 	struct drm_buf **temp_buflist;
568 
569 	count = request->count;
570 	order = order_base_2(request->size);
571 	size = 1 << order;
572 
573 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
574 	    ? round_page(size) : size;
575 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
576 	total = PAGE_SIZE << page_order;
577 
578 	byte_count = 0;
579 	agp_offset = dev->agp->base + request->agp_start;
580 
581 	DRM_DEBUG("count:      %d\n",  count);
582 	DRM_DEBUG("order:      %d\n",  order);
583 	DRM_DEBUG("size:       %d\n",  size);
584 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
585 	DRM_DEBUG("alignment:  %d\n",  alignment);
586 	DRM_DEBUG("page_order: %d\n",  page_order);
587 	DRM_DEBUG("total:      %d\n",  total);
588 
589 	/* Make sure buffers are located in AGP memory that we own */
590 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
591 	 * memory.  Safe to ignore for now because these ioctls are still
592 	 * root-only.
593 	 */
594 	/*valid = 0;
595 	for (agp_entry = dev->agp->memory; agp_entry;
596 	    agp_entry = agp_entry->next) {
597 		if ((agp_offset >= agp_entry->bound) &&
598 		    (agp_offset + total * count <=
599 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
600 			valid = 1;
601 			break;
602 		}
603 	}
604 	if (!valid) {
605 		DRM_DEBUG("zone invalid\n");
606 		return -EINVAL;
607 	}*/
608 
609 	entry = &dma->bufs[order];
610 
611 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
612 				 M_WAITOK | M_NULLOK | M_ZERO);
613 	if (!entry->buflist) {
614 		return -ENOMEM;
615 	}
616 
617 	entry->buf_size = size;
618 	entry->page_order = page_order;
619 
620 	offset = 0;
621 
622 	while (entry->buf_count < count) {
623 		buf          = &entry->buflist[entry->buf_count];
624 		buf->idx     = dma->buf_count + entry->buf_count;
625 		buf->total   = alignment;
626 		buf->order   = order;
627 		buf->used    = 0;
628 
629 		buf->offset  = (dma->byte_count + offset);
630 		buf->bus_address = agp_offset + offset;
631 		buf->address = (void *)(agp_offset + offset);
632 		buf->next    = NULL;
633 		buf->pending = 0;
634 		buf->file_priv = NULL;
635 
636 		buf->dev_priv_size = dev->driver->dev_priv_size;
637 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
638 					   M_WAITOK | M_NULLOK | M_ZERO);
639 		if (buf->dev_private == NULL) {
640 			/* Set count correctly so we free the proper amount. */
641 			entry->buf_count = count;
642 			drm_cleanup_buf_error(dev, entry);
643 			return -ENOMEM;
644 		}
645 
646 		offset += alignment;
647 		entry->buf_count++;
648 		byte_count += PAGE_SIZE << page_order;
649 	}
650 
651 	DRM_DEBUG("byte_count: %d\n", byte_count);
652 
653 	temp_buflist = krealloc(dma->buflist,
654 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
655 	    M_DRM, M_WAITOK | M_NULLOK);
656 	if (temp_buflist == NULL) {
657 		/* Free the entry because it isn't valid */
658 		drm_cleanup_buf_error(dev, entry);
659 		return -ENOMEM;
660 	}
661 	dma->buflist = temp_buflist;
662 
663 	for (i = 0; i < entry->buf_count; i++) {
664 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
665 	}
666 
667 	dma->buf_count += entry->buf_count;
668 	dma->byte_count += byte_count;
669 
670 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
671 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
672 
673 	request->count = entry->buf_count;
674 	request->size = size;
675 
676 	dma->flags = _DRM_DMA_USE_AGP;
677 
678 	return 0;
679 }
680 
681 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
682 {
683 	struct drm_device_dma *dma = dev->dma;
684 	int count;
685 	int order;
686 	int size;
687 	int total;
688 	int page_order;
689 	struct drm_buf_entry *entry;
690 	drm_dma_handle_t *dmah;
691 	struct drm_buf *buf;
692 	int alignment;
693 	unsigned long offset;
694 	int i;
695 	int byte_count;
696 	int page_count;
697 	unsigned long *temp_pagelist;
698 	struct drm_buf **temp_buflist;
699 
700 	count = request->count;
701 	order = order_base_2(request->size);
702 	size = 1 << order;
703 
704 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
705 	    request->count, request->size, size, order);
706 
707 	alignment = (request->flags & _DRM_PAGE_ALIGN)
708 	    ? round_page(size) : size;
709 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
710 	total = PAGE_SIZE << page_order;
711 
712 	entry = &dma->bufs[order];
713 
714 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
715 				 M_WAITOK | M_NULLOK | M_ZERO);
716 	entry->seglist = kmalloc(count * sizeof(*entry->seglist), M_DRM,
717 				 M_WAITOK | M_NULLOK | M_ZERO);
718 
719 	/* Keep the original pagelist until we know all the allocations
720 	 * have succeeded
721 	 */
722 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
723 				sizeof(*dma->pagelist),
724 				M_DRM, M_WAITOK | M_NULLOK);
725 
726 	if (entry->buflist == NULL || entry->seglist == NULL ||
727 	    temp_pagelist == NULL) {
728 		kfree(temp_pagelist);
729 		kfree(entry->seglist);
730 		kfree(entry->buflist);
731 		return -ENOMEM;
732 	}
733 
734 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
735 	    sizeof(*dma->pagelist));
736 
737 	DRM_DEBUG("pagelist: %d entries\n",
738 	    dma->page_count + (count << page_order));
739 
740 	entry->buf_size	= size;
741 	entry->page_order = page_order;
742 	byte_count = 0;
743 	page_count = 0;
744 
745 	while (entry->buf_count < count) {
746 		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
747 
748 		if (!dmah) {
749 			/* Set count correctly so we free the proper amount. */
750 			entry->buf_count = count;
751 			entry->seg_count = count;
752 			drm_cleanup_buf_error(dev, entry);
753 			kfree(temp_pagelist);
754 			return -ENOMEM;
755 		}
756 		entry->seglist[entry->seg_count++] = dmah;
757 		for (i = 0; i < (1 << page_order); i++) {
758 			DRM_DEBUG("page %d @ 0x%08lx\n",
759 				  dma->page_count + page_count,
760 				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
761 			temp_pagelist[dma->page_count + page_count++]
762 				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
763 		}
764 		for (offset = 0;
765 		    offset + size <= total && entry->buf_count < count;
766 		    offset += alignment, ++entry->buf_count) {
767 			buf	     = &entry->buflist[entry->buf_count];
768 			buf->idx     = dma->buf_count + entry->buf_count;
769 			buf->total   = alignment;
770 			buf->order   = order;
771 			buf->used    = 0;
772 			buf->offset  = (dma->byte_count + byte_count + offset);
773 			buf->address = ((char *)dmah->vaddr + offset);
774 			buf->bus_address = dmah->busaddr + offset;
775 			buf->next    = NULL;
776 			buf->pending = 0;
777 			buf->file_priv = NULL;
778 
779 			buf->dev_priv_size = dev->driver->dev_priv_size;
780 			buf->dev_private = kmalloc(buf->dev_priv_size,
781 						   M_DRM,
782 						   M_WAITOK | M_NULLOK |
783 						    M_ZERO);
784 			if (buf->dev_private == NULL) {
785 				/* Set count correctly so we free the proper amount. */
786 				entry->buf_count = count;
787 				entry->seg_count = count;
788 				drm_cleanup_buf_error(dev, entry);
789 				kfree(temp_pagelist);
790 				return -ENOMEM;
791 			}
792 
793 			DRM_DEBUG("buffer %d @ %p\n",
794 			    entry->buf_count, buf->address);
795 		}
796 		byte_count += PAGE_SIZE << page_order;
797 	}
798 
799 	temp_buflist = krealloc(dma->buflist,
800 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
801 	    M_DRM, M_WAITOK | M_NULLOK);
802 	if (temp_buflist == NULL) {
803 		/* Free the entry because it isn't valid */
804 		drm_cleanup_buf_error(dev, entry);
805 		kfree(temp_pagelist);
806 		return -ENOMEM;
807 	}
808 	dma->buflist = temp_buflist;
809 
810 	for (i = 0; i < entry->buf_count; i++) {
811 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
812 	}
813 
814 	/* No allocations failed, so now we can replace the original pagelist
815 	 * with the new one.
816 	 */
817 	kfree(dma->pagelist);
818 	dma->pagelist = temp_pagelist;
819 
820 	dma->buf_count += entry->buf_count;
821 	dma->seg_count += entry->seg_count;
822 	dma->page_count += entry->seg_count << page_order;
823 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
824 
825 	request->count = entry->buf_count;
826 	request->size = size;
827 
828 	return 0;
829 
830 }
831 
832 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
833 {
834 	struct drm_device_dma *dma = dev->dma;
835 	struct drm_buf_entry *entry;
836 	struct drm_buf *buf;
837 	unsigned long offset;
838 	unsigned long agp_offset;
839 	int count;
840 	int order;
841 	int size;
842 	int alignment;
843 	int page_order;
844 	int total;
845 	int byte_count;
846 	int i;
847 	struct drm_buf **temp_buflist;
848 
849 	count = request->count;
850 	order = order_base_2(request->size);
851 	size = 1 << order;
852 
853 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
854 	    ? round_page(size) : size;
855 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
856 	total = PAGE_SIZE << page_order;
857 
858 	byte_count = 0;
859 	agp_offset = request->agp_start;
860 
861 	DRM_DEBUG("count:      %d\n",  count);
862 	DRM_DEBUG("order:      %d\n",  order);
863 	DRM_DEBUG("size:       %d\n",  size);
864 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
865 	DRM_DEBUG("alignment:  %d\n",  alignment);
866 	DRM_DEBUG("page_order: %d\n",  page_order);
867 	DRM_DEBUG("total:      %d\n",  total);
868 
869 	entry = &dma->bufs[order];
870 
871 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), M_DRM,
872 				 M_WAITOK | M_NULLOK | M_ZERO);
873 	if (entry->buflist == NULL)
874 		return -ENOMEM;
875 
876 	entry->buf_size = size;
877 	entry->page_order = page_order;
878 
879 	offset = 0;
880 
881 	while (entry->buf_count < count) {
882 		buf          = &entry->buflist[entry->buf_count];
883 		buf->idx     = dma->buf_count + entry->buf_count;
884 		buf->total   = alignment;
885 		buf->order   = order;
886 		buf->used    = 0;
887 
888 		buf->offset  = (dma->byte_count + offset);
889 		buf->bus_address = agp_offset + offset;
890 		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
891 		buf->next    = NULL;
892 		buf->pending = 0;
893 		buf->file_priv = NULL;
894 
895 		buf->dev_priv_size = dev->driver->dev_priv_size;
896 		buf->dev_private = kmalloc(buf->dev_priv_size, M_DRM,
897 					   M_WAITOK | M_NULLOK | M_ZERO);
898 		if (buf->dev_private == NULL) {
899 			/* Set count correctly so we free the proper amount. */
900 			entry->buf_count = count;
901 			drm_cleanup_buf_error(dev, entry);
902 			return -ENOMEM;
903 		}
904 
905 		DRM_DEBUG("buffer %d @ %p\n",
906 		    entry->buf_count, buf->address);
907 
908 		offset += alignment;
909 		entry->buf_count++;
910 		byte_count += PAGE_SIZE << page_order;
911 	}
912 
913 	DRM_DEBUG("byte_count: %d\n", byte_count);
914 
915 	temp_buflist = krealloc(dma->buflist,
916 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
917 	    M_DRM, M_WAITOK | M_NULLOK);
918 	if (temp_buflist == NULL) {
919 		/* Free the entry because it isn't valid */
920 		drm_cleanup_buf_error(dev, entry);
921 		return -ENOMEM;
922 	}
923 	dma->buflist = temp_buflist;
924 
925 	for (i = 0; i < entry->buf_count; i++) {
926 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
927 	}
928 
929 	dma->buf_count += entry->buf_count;
930 	dma->byte_count += byte_count;
931 
932 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
933 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
934 
935 	request->count = entry->buf_count;
936 	request->size = size;
937 
938 	dma->flags = _DRM_DMA_USE_SG;
939 
940 	return 0;
941 }
942 
943 /**
944  * Add AGP buffers for DMA transfers.
945  *
946  * \param dev struct drm_device to which the buffers are to be added.
947  * \param request pointer to a struct drm_buf_desc describing the request.
948  * \return zero on success or a negative number on failure.
949  *
950  * After some sanity checks creates a drm_buf structure for each buffer and
951  * reallocates the buffer list of the same size order to accommodate the new
952  * buffers.
953  */
954 int drm_legacy_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
955 {
956 	int order, ret;
957 
958 	if (request->count < 0 || request->count > 4096)
959 		return -EINVAL;
960 
961 	order = order_base_2(request->size);
962 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
963 		return -EINVAL;
964 
965 
966 	/* No more allocations after first buffer-using ioctl. */
967 	if (dev->buf_use != 0) {
968 		return -EBUSY;
969 	}
970 	/* No more than one allocation per order */
971 	if (dev->dma->bufs[order].buf_count != 0) {
972 		return -ENOMEM;
973 	}
974 
975 	ret = drm_do_addbufs_agp(dev, request);
976 
977 	return ret;
978 }
979 
980 static int drm_legacy_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
981 {
982 	int order, ret;
983 
984 	if (!capable(CAP_SYS_ADMIN))
985 		return -EACCES;
986 
987 	if (request->count < 0 || request->count > 4096)
988 		return -EINVAL;
989 
990 	order = order_base_2(request->size);
991 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
992 		return -EINVAL;
993 
994 	spin_lock(&dev->buf_lock);
995 	if (dev->buf_use) {
996 		spin_unlock(&dev->buf_lock);
997 		return -EBUSY;
998 	}
999 	atomic_inc(&dev->buf_alloc);
1000 	spin_unlock(&dev->buf_lock);
1001 
1002 	/* No more than one allocation per order */
1003 	if (dev->dma->bufs[order].buf_count != 0) {
1004 		return -ENOMEM;
1005 	}
1006 
1007 	ret = drm_do_addbufs_sg(dev, request);
1008 
1009 	return ret;
1010 }
1011 
1012 int drm_legacy_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
1013 {
1014 	int order, ret;
1015 
1016 	if (!capable(CAP_SYS_ADMIN))
1017 		return -EACCES;
1018 
1019 	if (request->count < 0 || request->count > 4096)
1020 		return -EINVAL;
1021 
1022 	order = order_base_2(request->size);
1023 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1024 		return -EINVAL;
1025 
1026 	spin_lock(&dev->buf_lock);
1027 	if (dev->buf_use) {
1028 		spin_unlock(&dev->buf_lock);
1029 		return -EBUSY;
1030 	}
1031 	atomic_inc(&dev->buf_alloc);
1032 	spin_unlock(&dev->buf_lock);
1033 
1034 	/* No more allocations after first buffer-using ioctl. */
1035 	if (dev->buf_use != 0) {
1036 		return -EBUSY;
1037 	}
1038 	/* No more than one allocation per order */
1039 	if (dev->dma->bufs[order].buf_count != 0) {
1040 		return -ENOMEM;
1041 	}
1042 
1043 	ret = drm_do_addbufs_pci(dev, request);
1044 
1045 	return ret;
1046 }
1047 
1048 /**
1049  * Add buffers for DMA transfers (ioctl).
1050  *
1051  * \param inode device inode.
1052  * \param file_priv DRM file private.
1053  * \param cmd command.
1054  * \param arg pointer to a struct drm_buf_desc request.
1055  * \return zero on success or a negative number on failure.
1056  *
1057  * According with the memory type specified in drm_buf_desc::flags and the
1058  * build options, it dispatches the call either to addbufs_agp(),
1059  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1060  * PCI memory respectively.
1061  */
1062 int drm_legacy_addbufs(struct drm_device *dev, void *data,
1063 		       struct drm_file *file_priv)
1064 {
1065 	struct drm_buf_desc *request = data;
1066 	int err;
1067 
1068 	if (request->flags & _DRM_AGP_BUFFER)
1069 		err = drm_legacy_addbufs_agp(dev, request);
1070 	else if (request->flags & _DRM_SG_BUFFER)
1071 		err = drm_legacy_addbufs_sg(dev, request);
1072 	else
1073 		err = drm_legacy_addbufs_pci(dev, request);
1074 
1075 	return err;
1076 }
1077 
1078 /**
1079  * Get information about the buffer mappings.
1080  *
1081  * This was originally mean for debugging purposes, or by a sophisticated
1082  * client library to determine how best to use the available buffers (e.g.,
1083  * large buffers can be used for image transfer).
1084  *
1085  * \param inode device inode.
1086  * \param file_priv DRM file private.
1087  * \param cmd command.
1088  * \param arg pointer to a drm_buf_info structure.
1089  * \return zero on success or a negative number on failure.
1090  *
1091  * Increments drm_device::buf_use while holding the drm_device::buf_lock
1092  * lock, preventing of allocating more buffers after this call. Information
1093  * about each requested buffer is then copied into user space.
1094  */
1095 int drm_legacy_infobufs(struct drm_device *dev, void *data,
1096 			struct drm_file *file_priv)
1097 {
1098 	struct drm_device_dma *dma = dev->dma;
1099 	struct drm_buf_info *request = data;
1100 	int i;
1101 	int count;
1102 
1103 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1104 		return -EINVAL;
1105 
1106 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1107 		return -EINVAL;
1108 
1109 	if (!dma)
1110 		return -EINVAL;
1111 
1112 	spin_lock(&dev->buf_lock);
1113 	if (atomic_read(&dev->buf_alloc)) {
1114 		spin_unlock(&dev->buf_lock);
1115 		return -EBUSY;
1116 	}
1117 	++dev->buf_use;		/* Can't allocate more after this call */
1118 	spin_unlock(&dev->buf_lock);
1119 
1120 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1121 		if (dma->bufs[i].buf_count)
1122 			++count;
1123 	}
1124 
1125 	DRM_DEBUG("count = %d\n", count);
1126 
1127 	if (request->count >= count) {
1128 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1129 			if (dma->bufs[i].buf_count) {
1130 				struct drm_buf_desc __user *to =
1131 				    &request->list[count];
1132 				struct drm_buf_entry *from = &dma->bufs[i];
1133 				if (copy_to_user(&to->count,
1134 						 &from->buf_count,
1135 						 sizeof(from->buf_count)) ||
1136 				    copy_to_user(&to->size,
1137 						 &from->buf_size,
1138 						 sizeof(from->buf_size)) ||
1139 				    copy_to_user(&to->low_mark,
1140 						 &from->low_mark,
1141 						 sizeof(from->low_mark)) ||
1142 				    copy_to_user(&to->high_mark,
1143 						 &from->high_mark,
1144 						 sizeof(from->high_mark)))
1145 					return -EFAULT;
1146 
1147 				DRM_DEBUG("%d %d %d %d %d\n",
1148 					  i,
1149 					  dma->bufs[i].buf_count,
1150 					  dma->bufs[i].buf_size,
1151 					  dma->bufs[i].low_mark,
1152 					  dma->bufs[i].high_mark);
1153 				++count;
1154 			}
1155 		}
1156 	}
1157 	request->count = count;
1158 
1159 	return 0;
1160 }
1161 
1162 /**
1163  * Specifies a low and high water mark for buffer allocation
1164  *
1165  * \param inode device inode.
1166  * \param file_priv DRM file private.
1167  * \param cmd command.
1168  * \param arg a pointer to a drm_buf_desc structure.
1169  * \return zero on success or a negative number on failure.
1170  *
1171  * Verifies that the size order is bounded between the admissible orders and
1172  * updates the respective drm_device_dma::bufs entry low and high water mark.
1173  *
1174  * \note This ioctl is deprecated and mostly never used.
1175  */
1176 int drm_legacy_markbufs(struct drm_device *dev, void *data,
1177 			struct drm_file *file_priv)
1178 {
1179 	struct drm_device_dma *dma = dev->dma;
1180 	struct drm_buf_desc *request = data;
1181 	int order;
1182 	struct drm_buf_entry *entry;
1183 
1184 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1185 		return -EINVAL;
1186 
1187 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1188 		return -EINVAL;
1189 
1190 	if (!dma)
1191 		return -EINVAL;
1192 
1193 	DRM_DEBUG("%d, %d, %d\n",
1194 		  request->size, request->low_mark, request->high_mark);
1195 	order = order_base_2(request->size);
1196 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1197 		return -EINVAL;
1198 	entry = &dma->bufs[order];
1199 
1200 	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1201 		return -EINVAL;
1202 	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1203 		return -EINVAL;
1204 
1205 	entry->low_mark = request->low_mark;
1206 	entry->high_mark = request->high_mark;
1207 
1208 	return 0;
1209 }
1210 
1211 /**
1212  * Unreserve the buffers in list, previously reserved using drmDMA.
1213  *
1214  * \param inode device inode.
1215  * \param file_priv DRM file private.
1216  * \param cmd command.
1217  * \param arg pointer to a drm_buf_free structure.
1218  * \return zero on success or a negative number on failure.
1219  *
1220  * Calls free_buffer() for each used buffer.
1221  * This function is primarily used for debugging.
1222  */
1223 int drm_legacy_freebufs(struct drm_device *dev, void *data,
1224 			struct drm_file *file_priv)
1225 {
1226 	struct drm_device_dma *dma = dev->dma;
1227 	struct drm_buf_free *request = data;
1228 	int i;
1229 	int idx;
1230 	struct drm_buf *buf;
1231 	int retcode = 0;
1232 
1233 	DRM_DEBUG("%d\n", request->count);
1234 
1235 	for (i = 0; i < request->count; i++) {
1236 		if (copy_from_user(&idx, &request->list[i], sizeof(idx))) {
1237 			retcode = -EFAULT;
1238 			break;
1239 		}
1240 		if (idx < 0 || idx >= dma->buf_count) {
1241 			DRM_ERROR("Index %d (of %d max)\n",
1242 			    idx, dma->buf_count - 1);
1243 			retcode = -EINVAL;
1244 			break;
1245 		}
1246 		buf = dma->buflist[idx];
1247 		if (buf->file_priv != file_priv) {
1248 			DRM_ERROR("Process %d freeing buffer not owned\n",
1249 			    DRM_CURRENTPID);
1250 			retcode = -EINVAL;
1251 			break;
1252 		}
1253 		drm_legacy_free_buffer(dev, buf);
1254 	}
1255 
1256 	return retcode;
1257 }
1258 
1259 /**
1260  * Maps all of the DMA buffers into client-virtual space (ioctl).
1261  *
1262  * \param inode device inode.
1263  * \param file_priv DRM file private.
1264  * \param cmd command.
1265  * \param arg pointer to a drm_buf_map structure.
1266  * \return zero on success or a negative number on failure.
1267  *
1268  * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1269  * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1270  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1271  * drm_mmap_dma().
1272  */
1273 int drm_legacy_mapbufs(struct drm_device *dev, void *data,
1274 		       struct drm_file *file_priv)
1275 {
1276 	struct drm_device_dma *dma = dev->dma;
1277 	int retcode = 0;
1278 	const int zero = 0;
1279 	vm_offset_t address;
1280 	struct vmspace *vms;
1281 	vm_ooffset_t foff;
1282 	vm_size_t size;
1283 	vm_offset_t vaddr;
1284 	struct drm_buf_map *request = data;
1285 	int i;
1286 
1287 	vms = DRM_CURPROC->td_proc->p_vmspace;
1288 
1289 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1290 		return -EINVAL;
1291 
1292 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1293 		return -EINVAL;
1294 
1295 	if (!dma)
1296 		return -EINVAL;
1297 
1298 	spin_lock(&dev->buf_lock);
1299 	if (atomic_read(&dev->buf_alloc)) {
1300 		spin_unlock(&dev->buf_lock);
1301 		return -EBUSY;
1302 	}
1303 	dev->buf_use++;		/* Can't allocate more after this call */
1304 	spin_unlock(&dev->buf_lock);
1305 
1306 	if (request->count < dma->buf_count)
1307 		goto done;
1308 
1309 	if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
1310 	    (drm_core_check_feature(dev, DRIVER_SG) &&
1311 	    (dma->flags & _DRM_DMA_USE_SG))) {
1312 		drm_local_map_t *map = dev->agp_buffer_map;
1313 
1314 		if (map == NULL) {
1315 			retcode = -EINVAL;
1316 			goto done;
1317 		}
1318 		size = round_page(map->size);
1319 		foff = (unsigned long)map->handle;
1320 	} else {
1321 		size = round_page(dma->byte_count),
1322 		foff = 0;
1323 	}
1324 
1325 	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1326 	retcode = -vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1327 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1328 	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1329 	if (retcode)
1330 		goto done;
1331 
1332 	request->virtual = (void *)vaddr;
1333 
1334 	for (i = 0; i < dma->buf_count; i++) {
1335 		if (copy_to_user(&request->list[i].idx,
1336 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1337 			retcode = -EFAULT;
1338 			goto done;
1339 		}
1340 		if (copy_to_user(&request->list[i].total,
1341 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1342 			retcode = -EFAULT;
1343 			goto done;
1344 		}
1345 		if (copy_to_user(&request->list[i].used, &zero,
1346 		    sizeof(zero))) {
1347 			retcode = -EFAULT;
1348 			goto done;
1349 		}
1350 		address = vaddr + dma->buflist[i]->offset; /* *** */
1351 		if (copy_to_user(&request->list[i].address, &address,
1352 		    sizeof(address))) {
1353 			retcode = -EFAULT;
1354 			goto done;
1355 		}
1356 	}
1357       done:
1358 	request->count = dma->buf_count;
1359 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1360 
1361 	return retcode;
1362 }
1363 
1364 int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
1365 		  struct drm_file *file_priv)
1366 {
1367 	if (drm_core_check_feature(dev, DRIVER_MODESET))
1368 		return -EINVAL;
1369 
1370 	if (dev->driver->dma_ioctl)
1371 		return dev->driver->dma_ioctl(dev, data, file_priv);
1372 	else
1373 		return -EINVAL;
1374 }
1375 
1376 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
1377 {
1378 	struct drm_map_list *entry;
1379 
1380 	list_for_each_entry(entry, &dev->maplist, head) {
1381 		if (entry->map && entry->map->type == _DRM_SHM &&
1382 		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
1383 			return entry->map;
1384 		}
1385 	}
1386 	return NULL;
1387 }
1388 EXPORT_SYMBOL(drm_legacy_getsarea);
1389