xref: /openbsd-src/sys/dev/pci/drm/drm_bufs.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  */
30 
31 /** @file drm_bufs.c
32  * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
33  */
34 
35 #include "sys/types.h"
36 #include "dev/pci/pcireg.h"
37 
38 #include "drmP.h"
39 
40 int	drm_alloc_resource(struct drm_device *, int);
41 int	drm_do_addbufs_agp(struct drm_device *, drm_buf_desc_t *);
42 int	drm_do_addbufs_pci(struct drm_device *, drm_buf_desc_t *);
43 int	drm_do_addbufs_sg(struct drm_device *, drm_buf_desc_t *);
44 
45 /*
46  * Compute order.  Can be made faster.
47  */
48 int
49 drm_order(unsigned long size)
50 {
51 	int order;
52 	unsigned long tmp;
53 
54 	for (order = 0, tmp = size; tmp >>= 1; ++order)
55 		;
56 
57 	if (size & ~(1 << order))
58 		++order;
59 
60 	return order;
61 }
62 
63 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
64  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
65  * address for accessing them.  Cleaned up at unload.
66  */
67 int
68 drm_alloc_resource(struct drm_device *dev, int resource)
69 {
70 	if (resource >= DRM_MAX_PCI_RESOURCE) {
71 		DRM_ERROR("Resource %d too large\n", resource);
72 		return 1;
73 	}
74 
75 	if (dev->pcir[resource] != NULL)
76 		return 0;
77 
78 	dev->pcir[resource] = vga_pci_bar_info(dev->vga_softc, resource);
79 	if (dev->pcir[resource] == NULL) {
80 		DRM_ERROR("Can't get bar info for resource 0x%x\n", resource);
81 		return 1;
82 	}
83 
84 	return 0;
85 }
86 
87 
88 unsigned long
89 drm_get_resource_start(struct drm_device *dev, unsigned int resource)
90 {
91 	if (drm_alloc_resource(dev, resource) != 0)
92 		return 0;
93 
94 	return dev->pcir[resource]->base;
95 }
96 
97 unsigned long
98 drm_get_resource_len(struct drm_device *dev, unsigned int resource)
99 {
100 	if (drm_alloc_resource(dev, resource) != 0)
101 		return 0;
102 
103 	return dev->pcir[resource]->maxsize;
104 }
105 
106 int
107 drm_addmap(struct drm_device * dev, unsigned long offset, unsigned long size,
108     enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
109 {
110 	drm_local_map_t *map;
111 	int align, ret = 0;
112 #if 0 /* disabled for now */
113 	struct drm_agp_mem *entry;
114 	int valid;
115 #endif
116 
117 	/* Only allow shared memory to be removable since we only keep enough
118 	 * book keeping information about shared memory to allow for removal
119 	 * when processes fork.
120 	 */
121 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
122 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
123 		return EINVAL;
124 	}
125 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
126 		DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
127 		    offset, size);
128 		return EINVAL;
129 	}
130 	if (offset + size < offset) {
131 		DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
132 		    offset, size);
133 		return EINVAL;
134 	}
135 
136 	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
137 	    size, type);
138 
139 	/*
140 	 * Check if this is just another version of a kernel-allocated map, and
141 	 * just hand that back if so.
142 	 */
143 	DRM_LOCK();
144 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
145 	    type == _DRM_SHM) {
146 		TAILQ_FOREACH(map, &dev->maplist, link) {
147 			if (map->type == type && (map->offset == offset ||
148 			    (map->type == _DRM_SHM &&
149 			    map->flags == _DRM_CONTAINS_LOCK))) {
150 				DRM_DEBUG("Found kernel map %d\n", type);
151 				goto done;
152 			}
153 		}
154 	}
155 	DRM_UNLOCK();
156 
157 	/* Allocate a new map structure, fill it in, and do any type-specific
158 	 * initialization necessary.
159 	 */
160 	map = drm_calloc(1, sizeof(*map), DRM_MEM_MAPS);
161 	if (map == NULL) {
162 		DRM_LOCK();
163 		return ENOMEM;
164 	}
165 
166 	map->offset = offset;
167 	map->size = size;
168 	map->type = type;
169 	map->flags = flags;
170 
171 
172 	DRM_LOCK();
173 	ret = extent_alloc(dev->handle_ext, map->size, PAGE_SIZE, 0,
174 	    0, EX_NOWAIT, &map->ext);
175 	if (ret) {
176 		DRM_ERROR("can't find free offset\n");
177 		DRM_UNLOCK();
178 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
179 		return (ret);
180 	}
181 	DRM_UNLOCK();
182 
183 	switch (map->type) {
184 	case _DRM_REGISTERS:
185 		map->handle = drm_ioremap(dev, map);
186 		if (map->handle == NULL)
187 			return (EINVAL);
188 		if (!(map->flags & _DRM_WRITE_COMBINING))
189 			break;
190 		/* FALLTHROUGH */
191 	case _DRM_FRAME_BUFFER:
192 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
193 			map->mtrr = 1;
194 		break;
195 	case _DRM_AGP:
196 		/*valid = 0;*/
197 		/* In some cases (i810 driver), user space may have already
198 		 * added the AGP base itself, because dev->agp->base previously
199 		 * only got set during AGP enable.  So, only add the base
200 		 * address if the map's offset isn't already within the
201 		 * aperture.
202 		 */
203 		if (map->offset < dev->agp->base ||
204 		    map->offset > dev->agp->base +
205 		    dev->agp->info.ai_aperture_size - 1) {
206 			map->offset += dev->agp->base;
207 		}
208 		map->mtrr   = dev->agp->mtrr; /* for getmap */
209 #if 0 /* disabled for now */
210 		/*
211 		 * If agp is in control of userspace (some intel drivers for
212 		 * example. In which case ignore this loop.
213 		 */
214 		DRM_LOCK();
215 		TAILQ_FOREACH(entry, &dev->agp->memory, link) {
216 			DRM_DEBUG("bound = %p, pages = %p, %p\n",
217 			    entry->bound, entry->pages,
218 			    entry->pages * PAGE_SIZE);
219 			if ((map->offset >= entry->bound) &&
220 			    (map->offset + map->size <=
221 			    entry->bound + entry->pages * PAGE_SIZE)) {
222 				valid = 1;
223 				break;
224 			}
225 		}
226 		if (!TAILQ_EMPTY(&dev->agp->memory) && !valid) {
227 			DRM_UNLOCK();
228 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
229 			DRM_ERROR("invalid agp map requested\n");
230 			return (EACCES);
231 		}
232 		DRM_UNLOCK();
233 #endif
234 		break;
235 	case _DRM_SCATTER_GATHER:
236 		if (dev->sg == NULL) {
237 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
238 			return (EINVAL);
239 		}
240 		map->offset = map->offset + dev->sg->handle;
241 		break;
242 	case _DRM_SHM:
243 	case _DRM_CONSISTENT:
244 		/* Unfortunately, we don't get any alignment specification from
245 		 * the caller, so we have to guess.  drm_pci_alloc requires
246 		 * a power-of-two alignment, so try to align the bus address of
247 		 * the map to it size if possible, otherwise just assume
248 		 * PAGE_SIZE alignment.
249 		 */
250 		align = map->size;
251 		if ((align & (align - 1)) != 0)
252 			align = PAGE_SIZE;
253 		map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
254 		if (map->dmah == NULL) {
255 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
256 			return (ENOMEM);
257 		}
258 		map->handle = map->dmah->vaddr;
259 		map->offset = map->dmah->busaddr;
260 		if (map->type == _DRM_SHM && map->flags & _DRM_CONTAINS_LOCK) {
261 			DRM_LOCK();
262 			/* Prevent a 2nd X Server from creating a 2nd lock */
263 			if (dev->lock.hw_lock != NULL) {
264 				DRM_UNLOCK();
265 				drm_pci_free(dev, map->dmah);
266 				drm_free(map, sizeof(*map), DRM_MEM_MAPS);
267 				return (EBUSY);
268 			}
269 			dev->lock.hw_lock = map->handle;
270 			DRM_UNLOCK();
271 		}
272 		break;
273 	default:
274 		DRM_ERROR("Bad map type %d\n", map->type);
275 		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
276 		return EINVAL;
277 	}
278 
279 	DRM_LOCK();
280 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
281 done:
282 	DRM_UNLOCK();
283 
284 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
285 	    map->size);
286 
287 	*map_ptr = map;
288 
289 	return 0;
290 }
291 
292 int
293 drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
294 {
295 	struct drm_map		*request = data;
296 	drm_local_map_t		*map;
297 	int			 err;
298 
299 	if (!(file_priv->flags & (FREAD|FWRITE)))
300 		return EACCES; /* Require read/write */
301 
302 	err = drm_addmap(dev, request->offset, request->size, request->type,
303 	    request->flags, &map);
304 	if (err != 0)
305 		return err;
306 
307 	request->offset = map->offset;
308 	request->size = map->size;
309 	request->type = map->type;
310 	request->flags = map->flags;
311 	request->mtrr = map->mtrr;
312 	request->handle = map->handle;
313 
314 	request->handle = (void *)map->ext;
315 
316 	return 0;
317 }
318 
319 void
320 drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
321 {
322 	DRM_LOCK();
323 	drm_rmmap_locked(dev, map);
324 	DRM_UNLOCK();
325 }
326 
327 
328 void
329 drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
330 {
331 	TAILQ_REMOVE(&dev->maplist, map, link);
332 
333 	switch (map->type) {
334 	case _DRM_REGISTERS:
335 		drm_ioremapfree(map);
336 		/* FALLTHROUGH */
337 	case _DRM_FRAME_BUFFER:
338 		if (map->mtrr) {
339 			int retcode;
340 
341 			retcode = drm_mtrr_del(0, map->offset, map->size,
342 			    DRM_MTRR_WC);
343 			DRM_DEBUG("mtrr_del = %d\n", retcode);
344 		}
345 		break;
346 	case _DRM_AGP:
347 	case _DRM_SCATTER_GATHER:
348 		break;
349 	case _DRM_SHM:
350 	case _DRM_CONSISTENT:
351 		drm_pci_free(dev, map->dmah);
352 		break;
353 	default:
354 		DRM_ERROR("Bad map type %d\n", map->type);
355 		break;
356 	}
357 
358 	/* NOCOALESCE set, can't fail */
359 	extent_free(dev->handle_ext, map->ext, map->size, EX_NOWAIT);
360 
361 	drm_free(map, sizeof(*map), DRM_MEM_MAPS);
362 }
363 
364 /* Remove a map private from list and deallocate resources if the mapping
365  * isn't in use.
366  */
367 
368 int
369 drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
370 {
371 	drm_local_map_t	*map;
372 	struct drm_map	*request = data;
373 
374 	DRM_LOCK();
375 	TAILQ_FOREACH(map, &dev->maplist, link) {
376 		if (map->handle == request->handle &&
377 		    map->flags & _DRM_REMOVABLE)
378 			break;
379 	}
380 
381 	/* No match found. */
382 	if (map == NULL) {
383 		DRM_UNLOCK();
384 		return (EINVAL);
385 	}
386 
387 	drm_rmmap_locked(dev, map);
388 
389 	DRM_UNLOCK();
390 
391 	return 0;
392 }
393 
394 
395 
396 int
397 drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
398 {
399 	drm_device_dma_t *dma = dev->dma;
400 	drm_buf_entry_t *entry;
401 	drm_buf_t *buf;
402 	unsigned long offset;
403 	unsigned long agp_offset;
404 	int count;
405 	int order;
406 	int size;
407 	int alignment;
408 	int page_order;
409 	int total;
410 	int byte_count;
411 	int i;
412 	drm_buf_t **temp_buflist;
413 #if 0 /* disabled for now */
414 	struct drm_agp_mem *agp_entry;
415 	int valid;
416 #endif
417 
418 	count = request->count;
419 	order = drm_order(request->size);
420 	size = 1 << order;
421 
422 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
423 	    ? round_page(size) : size;
424 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
425 	total = PAGE_SIZE << page_order;
426 
427 	byte_count = 0;
428 	agp_offset = dev->agp->base + request->agp_start;
429 
430 	DRM_DEBUG("count:      %d\n",  count);
431 	DRM_DEBUG("order:      %d\n",  order);
432 	DRM_DEBUG("size:       %d\n",  size);
433 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
434 	DRM_DEBUG("alignment:  %d\n",  alignment);
435 	DRM_DEBUG("page_order: %d\n",  page_order);
436 	DRM_DEBUG("total:      %d\n",  total);
437 
438 	/* Make sure buffers are located in AGP memory that we own */
439 
440 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
441 	 * memory.  Safe to ignore for now because these ioctls are still
442 	 * root-only.
443 	 */
444 #if 0 /* disabled for now */
445 	valid = 0;
446 	DRM_LOCK();
447 	TAILQ_FOREACH(agp_entry, &dev->agp->memory, link) {
448 		if ((agp_offset >= agp_entry->bound) &&
449 		    (agp_offset + total * count <=
450 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
451 			valid = 1;
452 			break;
453 		}
454 	}
455 	if (!TAILQ_EMPTY(&dev->agp->memory) && !valid) {
456 		DRM_DEBUG("zone invalid\n");
457 		DRM_UNLOCK();
458 		return (EINVAL);
459 	}
460 	DRM_UNLOCK();
461 #endif
462 
463 	entry = &dma->bufs[order];
464 
465 	entry->buflist = drm_calloc(count, sizeof(*entry->buflist),
466 	    DRM_MEM_BUFS);
467 	if (entry->buflist == NULL)
468 		return ENOMEM;
469 
470 	entry->buf_size = size;
471 	entry->page_order = page_order;
472 
473 	offset = 0;
474 
475 	while (entry->buf_count < count) {
476 		buf = &entry->buflist[entry->buf_count];
477 		buf->idx = dma->buf_count + entry->buf_count;
478 		buf->total = alignment;
479 		buf->used = 0;
480 
481 		buf->offset = (dma->byte_count + offset);
482 		buf->bus_address = agp_offset + offset;
483 		buf->pending = 0;
484 		buf->file_priv = NULL;
485 
486 		buf->dev_private = drm_calloc(1, dev->driver.buf_priv_size,
487 		    DRM_MEM_BUFS);
488 		if (buf->dev_private == NULL) {
489 			/* Set count correctly so we free the proper amount. */
490 			entry->buf_count = count;
491 			drm_cleanup_buf(dev, entry);
492 			return ENOMEM;
493 		}
494 
495 		offset += alignment;
496 		entry->buf_count++;
497 		byte_count += PAGE_SIZE << page_order;
498 	}
499 
500 	DRM_DEBUG("byte_count: %d\n", byte_count);
501 
502 	/* OpenBSD lacks realloc in kernel */
503 	temp_buflist = drm_realloc(dma->buflist,
504 	    dma->buf_count * sizeof(*dma->buflist),
505 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM);
506 	if (temp_buflist == NULL) {
507 		/* Free the entry because it isn't valid */
508 		drm_cleanup_buf(dev, entry);
509 		return ENOMEM;
510 	}
511 	dma->buflist = temp_buflist;
512 
513 	for (i = 0; i < entry->buf_count; i++)
514 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
515 
516 	dma->buf_count += entry->buf_count;
517 	dma->byte_count += byte_count;
518 
519 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
520 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
521 
522 	request->count = entry->buf_count;
523 	request->size = size;
524 
525 	dma->flags = _DRM_DMA_USE_AGP;
526 
527 	return 0;
528 }
529 
530 int
531 drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
532 {
533 	drm_device_dma_t *dma = dev->dma;
534 	int count;
535 	int order;
536 	int size;
537 	int total;
538 	int page_order;
539 	drm_buf_entry_t *entry;
540 	drm_buf_t *buf;
541 	int alignment;
542 	unsigned long offset;
543 	int i;
544 	int byte_count;
545 	int page_count;
546 	unsigned long *temp_pagelist;
547 	drm_buf_t **temp_buflist;
548 
549 	count = request->count;
550 	order = drm_order(request->size);
551 	size = 1 << order;
552 
553 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
554 	    request->count, request->size, size, order);
555 
556 	alignment = (request->flags & _DRM_PAGE_ALIGN)
557 	    ? round_page(size) : size;
558 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
559 	total = PAGE_SIZE << page_order;
560 
561 	entry = &dma->bufs[order];
562 
563 	entry->buflist = drm_calloc(count, sizeof(*entry->buflist),
564 	    DRM_MEM_BUFS);
565 	entry->seglist = drm_calloc(count, sizeof(*entry->seglist),
566 	    DRM_MEM_BUFS);
567 
568 	/* Keep the original pagelist until we know all the allocations
569 	 * have succeeded
570 	 */
571 	temp_pagelist = drm_calloc((dma->page_count + (count << page_order)),
572 	    sizeof(*dma->pagelist), DRM_MEM_BUFS);
573 
574 	if (entry->buflist == NULL || entry->seglist == NULL ||
575 	    temp_pagelist == NULL) {
576 		drm_free(temp_pagelist, (dma->page_count + (count <<
577 		    page_order)) * sizeof(*dma->pagelist), DRM_MEM_BUFS);
578 		drm_free(entry->seglist, count * sizeof(*entry->seglist),
579 		    DRM_MEM_BUFS);
580 		drm_free(entry->buflist, count * sizeof(*entry->buflist),
581 		    DRM_MEM_BUFS);
582 		return ENOMEM;
583 	}
584 
585 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
586 	    sizeof(*dma->pagelist));
587 
588 	DRM_DEBUG("pagelist: %d entries\n",
589 	    dma->page_count + (count << page_order));
590 
591 	entry->buf_size	= size;
592 	entry->page_order = page_order;
593 	byte_count = 0;
594 	page_count = 0;
595 
596 	while (entry->buf_count < count) {
597 		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
598 		    0xfffffffful);
599 		if (dmah == NULL) {
600 			/* Set count correctly so we free the proper amount. */
601 			entry->buf_count = count;
602 			entry->seg_count = count;
603 			drm_cleanup_buf(dev, entry);
604 			drm_free(temp_pagelist, (dma->page_count +
605 			   (count << page_order)) * sizeof(*dma->pagelist),
606 			   DRM_MEM_BUFS);
607 			return ENOMEM;
608 		}
609 
610 		entry->seglist[entry->seg_count++] = dmah;
611 		for (i = 0; i < (1 << page_order); i++) {
612 			DRM_DEBUG("page %d @ %p\n",
613 			    dma->page_count + page_count,
614 			    (char *)dmah->vaddr + PAGE_SIZE * i);
615 			temp_pagelist[dma->page_count + page_count++] =
616 			    (long)dmah->vaddr + PAGE_SIZE * i;
617 		}
618 		for (offset = 0;
619 		    offset + size <= total && entry->buf_count < count;
620 		    offset += alignment, ++entry->buf_count) {
621 			buf = &entry->buflist[entry->buf_count];
622 			buf->idx = dma->buf_count + entry->buf_count;
623 			buf->total = alignment;
624 			buf->used = 0;
625 			buf->offset = (dma->byte_count + byte_count + offset);
626 			buf->bus_address = dmah->busaddr + offset;
627 			buf->pending = 0;
628 			buf->file_priv = NULL;
629 
630 			buf->dev_private = drm_calloc(1,
631 			    dev->driver.buf_priv_size, DRM_MEM_BUFS);
632 			if (buf->dev_private == NULL) {
633 				/* Set count so we free the proper amount. */
634 				entry->buf_count = count;
635 				entry->seg_count = count;
636 				drm_cleanup_buf(dev, entry);
637 				drm_free(temp_pagelist, (dma->page_count +
638 				    (count << page_order)) *
639 				    sizeof(*dma->pagelist),
640 				    DRM_MEM_BUFS);
641 				return ENOMEM;
642 			}
643 
644 			DRM_DEBUG("buffer %d\n",
645 			    entry->buf_count);
646 		}
647 		byte_count += PAGE_SIZE << page_order;
648 	}
649 
650 	temp_buflist = drm_realloc(dma->buflist,
651 	    dma->buf_count * sizeof(*dma->buflist),
652 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM);
653 	if (temp_buflist == NULL) {
654 		/* Free the entry because it isn't valid */
655 		drm_cleanup_buf(dev, entry);
656 		drm_free(temp_pagelist, (dma->page_count +
657 		    (count << page_order)) * sizeof(*dma->pagelist),
658 		    DRM_MEM_BUFS);
659 		return ENOMEM;
660 	}
661 	dma->buflist = temp_buflist;
662 
663 	for (i = 0; i < entry->buf_count; i++)
664 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
665 
666 	/* No allocations failed, so now we can replace the orginal pagelist
667 	 * with the new one.
668 	 */
669 	drm_free(dma->pagelist, dma->page_count * sizeof(*dma->pagelist),
670 	    DRM_MEM_BUFS);
671 	dma->pagelist = temp_pagelist;
672 
673 	dma->buf_count += entry->buf_count;
674 	dma->seg_count += entry->seg_count;
675 	dma->page_count += entry->seg_count << page_order;
676 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
677 
678 	request->count = entry->buf_count;
679 	request->size = size;
680 
681 	return 0;
682 
683 }
684 
685 int
686 drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
687 {
688 	drm_device_dma_t *dma = dev->dma;
689 	drm_buf_entry_t *entry;
690 	drm_buf_t *buf;
691 	unsigned long offset;
692 	unsigned long agp_offset;
693 	int count;
694 	int order;
695 	int size;
696 	int alignment;
697 	int page_order;
698 	int total;
699 	int byte_count;
700 	int i;
701 	drm_buf_t **temp_buflist;
702 
703 	count = request->count;
704 	order = drm_order(request->size);
705 	size = 1 << order;
706 
707 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
708 	    ? round_page(size) : size;
709 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
710 	total = PAGE_SIZE << page_order;
711 
712 	byte_count = 0;
713 	agp_offset = request->agp_start;
714 
715 	DRM_DEBUG("count:      %d\n",  count);
716 	DRM_DEBUG("order:      %d\n",  order);
717 	DRM_DEBUG("size:       %d\n",  size);
718 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
719 	DRM_DEBUG("alignment:  %d\n",  alignment);
720 	DRM_DEBUG("page_order: %d\n",  page_order);
721 	DRM_DEBUG("total:      %d\n",  total);
722 
723 	entry = &dma->bufs[order];
724 
725 	entry->buflist = drm_calloc(count, sizeof(*entry->buflist),
726 	    DRM_MEM_BUFS);
727 	if (entry->buflist == NULL)
728 		return ENOMEM;
729 
730 	entry->buf_size = size;
731 	entry->page_order = page_order;
732 
733 	offset = 0;
734 
735 	while (entry->buf_count < count) {
736 		buf = &entry->buflist[entry->buf_count];
737 		buf->idx = dma->buf_count + entry->buf_count;
738 		buf->total = alignment;
739 		buf->used = 0;
740 
741 		buf->offset = (dma->byte_count + offset);
742 		buf->bus_address = agp_offset + offset;
743 		buf->pending = 0;
744 		buf->file_priv = NULL;
745 
746 		buf->dev_private = drm_calloc(1, dev->driver.buf_priv_size,
747 		    DRM_MEM_BUFS);
748 		if (buf->dev_private == NULL) {
749 			/* Set count correctly so we free the proper amount. */
750 			entry->buf_count = count;
751 			drm_cleanup_buf(dev, entry);
752 			return ENOMEM;
753 		}
754 
755 		DRM_DEBUG("buffer %d\n", entry->buf_count);
756 
757 		offset += alignment;
758 		entry->buf_count++;
759 		byte_count += PAGE_SIZE << page_order;
760 	}
761 
762 	DRM_DEBUG("byte_count: %d\n", byte_count);
763 
764 	temp_buflist = drm_realloc(dma->buflist,
765 	    dma->buf_count * sizeof(*dma->buflist),
766 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM);
767 	if (temp_buflist == NULL) {
768 		/* Free the entry because it isn't valid */
769 		drm_cleanup_buf(dev, entry);
770 		return ENOMEM;
771 	}
772 	dma->buflist = temp_buflist;
773 
774 	for (i = 0; i < entry->buf_count; i++)
775 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
776 
777 	dma->buf_count += entry->buf_count;
778 	dma->byte_count += byte_count;
779 
780 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
781 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
782 
783 	request->count = entry->buf_count;
784 	request->size = size;
785 
786 	dma->flags = _DRM_DMA_USE_SG;
787 
788 	return 0;
789 }
790 
791 int
792 drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
793 {
794 	int order, ret;
795 
796 
797 	if (request->count < 0 || request->count > 4096)
798 		return EINVAL;
799 
800 	order = drm_order(request->size);
801 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
802 		return EINVAL;
803 
804 	DRM_SPINLOCK(&dev->dma_lock);
805 
806 	/* No more allocations after first buffer-using ioctl. */
807 	if (dev->buf_use != 0) {
808 		DRM_SPINUNLOCK(&dev->dma_lock);
809 		return EBUSY;
810 	}
811 	/* No more than one allocation per order */
812 	if (dev->dma->bufs[order].buf_count != 0) {
813 		DRM_SPINUNLOCK(&dev->dma_lock);
814 		return ENOMEM;
815 	}
816 
817 	ret = drm_do_addbufs_agp(dev, request);
818 
819 	DRM_SPINUNLOCK(&dev->dma_lock);
820 
821 	return ret;
822 }
823 
824 int
825 drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
826 {
827 	int order, ret;
828 
829 
830 	if (request->count < 0 || request->count > 4096)
831 		return EINVAL;
832 
833 	order = drm_order(request->size);
834 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
835 		return EINVAL;
836 
837 	DRM_SPINLOCK(&dev->dma_lock);
838 
839 	/* No more allocations after first buffer-using ioctl. */
840 	if (dev->buf_use != 0) {
841 		DRM_SPINUNLOCK(&dev->dma_lock);
842 		return EBUSY;
843 	}
844 	/* No more than one allocation per order */
845 	if (dev->dma->bufs[order].buf_count != 0) {
846 		DRM_SPINUNLOCK(&dev->dma_lock);
847 		return ENOMEM;
848 	}
849 
850 	ret = drm_do_addbufs_sg(dev, request);
851 
852 	DRM_SPINUNLOCK(&dev->dma_lock);
853 
854 	return ret;
855 }
856 
857 int
858 drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
859 {
860 	int order, ret;
861 
862 	if (request->count < 0 || request->count > 4096)
863 		return EINVAL;
864 
865 	order = drm_order(request->size);
866 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
867 		return EINVAL;
868 
869 	DRM_SPINLOCK(&dev->dma_lock);
870 
871 	/* No more allocations after first buffer-using ioctl. */
872 	if (dev->buf_use != 0) {
873 		DRM_SPINUNLOCK(&dev->dma_lock);
874 		return EBUSY;
875 	}
876 	/* No more than one allocation per order */
877 	if (dev->dma->bufs[order].buf_count != 0) {
878 		DRM_SPINUNLOCK(&dev->dma_lock);
879 		return ENOMEM;
880 	}
881 
882 	ret = drm_do_addbufs_pci(dev, request);
883 
884 	DRM_SPINUNLOCK(&dev->dma_lock);
885 
886 	return ret;
887 }
888 
889 int
890 drm_addbufs_ioctl(struct drm_device *dev, void *data,
891     struct drm_file *file_priv)
892 {
893 	struct drm_buf_desc *request = data;
894 	int err;
895 
896 	if (request->flags & _DRM_AGP_BUFFER)
897 		err = drm_addbufs_agp(dev, request);
898 	else if (request->flags & _DRM_SG_BUFFER)
899 		err = drm_addbufs_sg(dev, request);
900 	else
901 		err = drm_addbufs_pci(dev, request);
902 
903 	return err;
904 }
905 
906 int
907 drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
908 {
909 	drm_device_dma_t	*dma = dev->dma;
910 	struct drm_buf_free	*request = data;
911 	drm_buf_t		*buf;
912 	int			 i, idx, retcode = 0;
913 
914 	DRM_DEBUG("%d\n", request->count);
915 
916 	DRM_SPINLOCK(&dev->dma_lock);
917 	for (i = 0; i < request->count; i++) {
918 		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
919 			retcode = EFAULT;
920 			break;
921 		}
922 		if (idx < 0 || idx >= dma->buf_count) {
923 			DRM_ERROR("Index %d (of %d max)\n", idx,
924 			    dma->buf_count - 1);
925 			retcode = EINVAL;
926 			break;
927 		}
928 		buf = dma->buflist[idx];
929 		if (buf->file_priv != file_priv) {
930 			DRM_ERROR("Process %d freeing buffer not owned\n",
931 			    DRM_CURRENTPID);
932 			retcode = EINVAL;
933 			break;
934 		}
935 		drm_free_buffer(dev, buf);
936 	}
937 	DRM_SPINUNLOCK(&dev->dma_lock);
938 
939 	return retcode;
940 }
941 
942 int
943 drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
944 {
945 	drm_device_dma_t *dma = dev->dma;
946 	struct vmspace *vms;
947 	struct vnode *vn;
948 	vaddr_t address;
949 	voff_t foff;
950 	vsize_t size;
951 	vaddr_t vaddr;
952 	int retcode = 0;
953 	const int zero = 0;
954 
955 	struct drm_buf_map *request = data;
956 	int i;
957 
958 	if (!vfinddev(file_priv->kdev, VCHR, &vn))
959 		return EINVAL;
960 
961 	vms = DRM_CURPROC->p_vmspace;
962 
963 	DRM_SPINLOCK(&dev->dma_lock);
964 	dev->buf_use++;		/* Can't allocate more after this call */
965 	DRM_SPINUNLOCK(&dev->dma_lock);
966 
967 	if (request->count < dma->buf_count)
968 		goto done;
969 
970 	if ((dev->driver.use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
971 	    (dev->driver.use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
972 		drm_local_map_t *map = dev->agp_buffer_map;
973 
974 		if (map == NULL) {
975 			DRM_DEBUG("couldn't find agp buffer map\n");
976 			retcode = EINVAL;
977 			goto done;
978 		}
979 		size = round_page(map->size);
980 		foff = map->ext;
981 	} else {
982 		size = round_page(dma->byte_count),
983 		foff = 0;
984 	}
985 
986 	vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
987 	retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
988 	    UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
989 	    (caddr_t)vn, foff, DRM_CURPROC->p_rlimit[RLIMIT_MEMLOCK].rlim_cur,DRM_CURPROC);
990 	if (retcode) {
991 		DRM_DEBUG("uvm_mmap failed\n");
992 		goto done;
993 	}
994 
995 	request->virtual = (void *)vaddr;
996 
997 	for (i = 0; i < dma->buf_count; i++) {
998 		if (DRM_COPY_TO_USER(&request->list[i].idx,
999 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1000 			retcode = EFAULT;
1001 			goto done;
1002 		}
1003 		if (DRM_COPY_TO_USER(&request->list[i].total,
1004 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1005 			retcode = EFAULT;
1006 			goto done;
1007 		}
1008 		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1009 		    sizeof(zero))) {
1010 			retcode = EFAULT;
1011 			goto done;
1012 		}
1013 		address = vaddr + dma->buflist[i]->offset; /* *** */
1014 		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1015 		    sizeof(address))) {
1016 			retcode = EFAULT;
1017 			goto done;
1018 		}
1019 	}
1020 
1021  done:
1022 	request->count = dma->buf_count;
1023 
1024 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1025 
1026 	return retcode;
1027 }
1028