xref: /openbsd-src/sys/dev/pci/drm/drm_bufs.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  */
30 
31 /** @file drm_bufs.c
32  * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
33  */
34 
35 #include "sys/types.h"
36 #include "dev/pci/pcireg.h"
37 
38 #include "drmP.h"
39 
40 int	drm_addbufs_pci(struct drm_device *, struct drm_buf_desc *);
41 int	drm_addbufs_sg(struct drm_device *, struct drm_buf_desc *);
42 int	drm_addbufs_agp(struct drm_device *, struct drm_buf_desc *);
43 
44 /*
45  * Compute order.  Can be made faster.
46  */
47 int
48 drm_order(unsigned long size)
49 {
50 	int order;
51 	unsigned long tmp;
52 
53 	for (order = 0, tmp = size; tmp >>= 1; ++order)
54 		;
55 
56 	if (size & ~(1 << order))
57 		++order;
58 
59 	return order;
60 }
61 
62 struct drm_local_map *
63 drm_core_findmap(struct drm_device *dev, unsigned long offset)
64 {
65 	struct drm_local_map	*map;
66 
67 	DRM_LOCK();
68 	TAILQ_FOREACH(map, &dev->maplist, link) {
69 		if (offset == map->ext)
70 			break;
71 	}
72 	DRM_UNLOCK();
73 	return (map);
74 }
75 
76 int
77 drm_addmap(struct drm_device * dev, unsigned long offset, unsigned long size,
78     enum drm_map_type type, enum drm_map_flags flags,
79     struct drm_local_map **map_ptr)
80 {
81 	struct drm_local_map	*map;
82 	int			 align, ret = 0;
83 #if 0 /* disabled for now */
84 	struct drm_agp_mem	*entry;
85 	int			 valid;
86 #endif
87 
88 	/* Only allow shared memory to be removable since we only keep enough
89 	 * book keeping information about shared memory to allow for removal
90 	 * when processes fork.
91 	 */
92 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
93 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
94 		return EINVAL;
95 	}
96 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
97 		DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
98 		    offset, size);
99 		return EINVAL;
100 	}
101 	if (offset + size < offset) {
102 		DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
103 		    offset, size);
104 		return EINVAL;
105 	}
106 
107 	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
108 	    size, type);
109 
110 	/*
111 	 * Check if this is just another version of a kernel-allocated map, and
112 	 * just hand that back if so.
113 	 */
114 	DRM_LOCK();
115 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
116 	    type == _DRM_SHM) {
117 		TAILQ_FOREACH(map, &dev->maplist, link) {
118 			if (map->type == type && (map->offset == offset ||
119 			    (map->type == _DRM_SHM &&
120 			    map->flags == _DRM_CONTAINS_LOCK))) {
121 				DRM_DEBUG("Found kernel map %d\n", type);
122 				goto done;
123 			}
124 		}
125 	}
126 	DRM_UNLOCK();
127 
128 	/* Allocate a new map structure, fill it in, and do any type-specific
129 	 * initialization necessary.
130 	 */
131 	map = drm_calloc(1, sizeof(*map));
132 	if (map == NULL) {
133 		DRM_LOCK();
134 		return ENOMEM;
135 	}
136 
137 	map->offset = offset;
138 	map->size = size;
139 	map->type = type;
140 	map->flags = flags;
141 
142 
143 	DRM_LOCK();
144 	ret = extent_alloc(dev->handle_ext, map->size, PAGE_SIZE, 0,
145 	    0, EX_NOWAIT, &map->ext);
146 	if (ret) {
147 		DRM_ERROR("can't find free offset\n");
148 		DRM_UNLOCK();
149 		drm_free(map);
150 		return (ret);
151 	}
152 	DRM_UNLOCK();
153 
154 	switch (map->type) {
155 	case _DRM_REGISTERS:
156 		if (!(map->flags & _DRM_WRITE_COMBINING))
157 			break;
158 		/* FALLTHROUGH */
159 	case _DRM_FRAME_BUFFER:
160 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
161 			map->mtrr = 1;
162 		break;
163 	case _DRM_AGP:
164 		/*valid = 0;*/
165 		/* In some cases (i810 driver), user space may have already
166 		 * added the AGP base itself, because dev->agp->base previously
167 		 * only got set during AGP enable.  So, only add the base
168 		 * address if the map's offset isn't already within the
169 		 * aperture.
170 		 */
171 		if (map->offset < dev->agp->base ||
172 		    map->offset > dev->agp->base +
173 		    dev->agp->info.ai_aperture_size - 1) {
174 			map->offset += dev->agp->base;
175 		}
176 		map->mtrr   = dev->agp->mtrr; /* for getmap */
177 #if 0 /* disabled for now */
178 		/*
179 		 * If agp is in control of userspace (some intel drivers for
180 		 * example. In which case ignore this loop.
181 		 */
182 		DRM_LOCK();
183 		TAILQ_FOREACH(entry, &dev->agp->memory, link) {
184 			DRM_DEBUG("bound = %p, pages = %p, %p\n",
185 			    entry->bound, entry->pages,
186 			    entry->pages * PAGE_SIZE);
187 			if ((map->offset >= entry->bound) &&
188 			    (map->offset + map->size <=
189 			    entry->bound + entry->pages * PAGE_SIZE)) {
190 				valid = 1;
191 				break;
192 			}
193 		}
194 		if (!TAILQ_EMPTY(&dev->agp->memory) && !valid) {
195 			DRM_UNLOCK();
196 			drm_free(map);
197 			DRM_ERROR("invalid agp map requested\n");
198 			return (EACCES);
199 		}
200 		DRM_UNLOCK();
201 #endif
202 		break;
203 	case _DRM_SCATTER_GATHER:
204 		if (dev->sg == NULL) {
205 			drm_free(map);
206 			return (EINVAL);
207 		}
208 		map->offset += dev->sg->handle;
209 		break;
210 	case _DRM_SHM:
211 	case _DRM_CONSISTENT:
212 		/*
213 		 * Unfortunately, we don't get any alignment specification from
214 		 * the caller, so we have to guess. So try to align the bus
215 		 * address of the map to its size if possible, otherwise just
216 		 * assume PAGE_SIZE alignment.
217 		 */
218 		align = map->size;
219 		if ((align & (align - 1)) != 0)
220 			align = PAGE_SIZE;
221 		map->dmamem = drm_dmamem_alloc(dev->dmat, map->size, align,
222 		    1, map->size, 0, 0);
223 		if (map->dmamem == NULL) {
224 			drm_free(map);
225 			return (ENOMEM);
226 		}
227 		map->handle = map->dmamem->kva;
228 		map->offset = map->dmamem->map->dm_segs[0].ds_addr;
229 		if (map->type == _DRM_SHM && map->flags & _DRM_CONTAINS_LOCK) {
230 			DRM_LOCK();
231 			/* Prevent a 2nd X Server from creating a 2nd lock */
232 			if (dev->lock.hw_lock != NULL) {
233 				DRM_UNLOCK();
234 				drm_dmamem_free(dev->dmat, map->dmamem);
235 				drm_free(map);
236 				return (EBUSY);
237 			}
238 			dev->lock.hw_lock = map->handle;
239 			DRM_UNLOCK();
240 		}
241 		break;
242 	default:
243 		DRM_ERROR("Bad map type %d\n", map->type);
244 		drm_free(map);
245 		return EINVAL;
246 	}
247 
248 	DRM_LOCK();
249 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
250 done:
251 	DRM_UNLOCK();
252 
253 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
254 	    map->size);
255 
256 	*map_ptr = map;
257 
258 	return 0;
259 }
260 
261 int
262 drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
263 {
264 	struct drm_map		*request = data;
265 	struct drm_local_map	*map;
266 	int			 err;
267 
268 	if (!(file_priv->flags & (FREAD|FWRITE)))
269 		return EACCES; /* Require read/write */
270 
271 	err = drm_addmap(dev, request->offset, request->size, request->type,
272 	    request->flags, &map);
273 	if (err != 0)
274 		return err;
275 
276 	request->offset = map->offset;
277 	request->size = map->size;
278 	request->type = map->type;
279 	request->flags = map->flags;
280 	request->mtrr = map->mtrr;
281 	request->handle = map->handle;
282 
283 	request->handle = (void *)map->ext;
284 
285 	return 0;
286 }
287 
288 void
289 drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
290 {
291 	DRM_LOCK();
292 	drm_rmmap_locked(dev, map);
293 	DRM_UNLOCK();
294 }
295 
296 
297 void
298 drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
299 {
300 	TAILQ_REMOVE(&dev->maplist, map, link);
301 
302 	switch (map->type) {
303 	case _DRM_REGISTERS:
304 		/* FALLTHROUGH */
305 	case _DRM_FRAME_BUFFER:
306 		if (map->mtrr) {
307 			int retcode;
308 
309 			retcode = drm_mtrr_del(0, map->offset, map->size,
310 			    DRM_MTRR_WC);
311 			DRM_DEBUG("mtrr_del = %d\n", retcode);
312 		}
313 		break;
314 	case _DRM_AGP:
315 		/* FALLTHROUGH */
316 	case _DRM_SCATTER_GATHER:
317 		break;
318 	case _DRM_SHM:
319 		/* FALLTHROUGH */
320 	case _DRM_CONSISTENT:
321 		drm_dmamem_free(dev->dmat, map->dmamem);
322 		break;
323 	default:
324 		DRM_ERROR("Bad map type %d\n", map->type);
325 		break;
326 	}
327 
328 	/* NOCOALESCE set, can't fail */
329 	extent_free(dev->handle_ext, map->ext, map->size, EX_NOWAIT);
330 
331 	drm_free(map);
332 }
333 
334 /* Remove a map private from list and deallocate resources if the mapping
335  * isn't in use.
336  */
337 
338 int
339 drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
340 {
341 	struct drm_local_map	*map;
342 	struct drm_map		*request = data;
343 
344 	DRM_LOCK();
345 	TAILQ_FOREACH(map, &dev->maplist, link) {
346 		if (map->handle == request->handle &&
347 		    map->flags & _DRM_REMOVABLE)
348 			break;
349 	}
350 
351 	/* No match found. */
352 	if (map == NULL) {
353 		DRM_UNLOCK();
354 		return (EINVAL);
355 	}
356 
357 	drm_rmmap_locked(dev, map);
358 
359 	DRM_UNLOCK();
360 
361 	return 0;
362 }
363 
364 /*
365  * DMA buffers api.
366  *
367  * The implementation used to be significantly more complicated, but the
368  * complexity has been moved into the drivers as different buffer management
369  * schemes evolved.
370  *
371  * This api is going to die eventually.
372  */
373 
374 int
375 drm_dma_setup(struct drm_device *dev)
376 {
377 
378 	dev->dma = drm_calloc(1, sizeof(*dev->dma));
379 	if (dev->dma == NULL)
380 		return (ENOMEM);
381 
382 	rw_init(&dev->dma->dma_lock, "drmdma");
383 
384 	return (0);
385 }
386 
387 void
388 drm_cleanup_buf(struct drm_device *dev, struct drm_buf_entry *entry)
389 {
390 	int i;
391 
392 	if (entry->seg_count) {
393 		for (i = 0; i < entry->seg_count; i++)
394 			drm_dmamem_free(dev->dmat, entry->seglist[i]);
395 		drm_free(entry->seglist);
396 
397 		entry->seg_count = 0;
398 	}
399 
400    	if (entry->buf_count) {
401 	   	for (i = 0; i < entry->buf_count; i++) {
402 			drm_free(entry->buflist[i].dev_private);
403 		}
404 		drm_free(entry->buflist);
405 
406 		entry->buf_count = 0;
407 	}
408 }
409 
410 void
411 drm_dma_takedown(struct drm_device *dev)
412 {
413 	struct drm_device_dma	*dma = dev->dma;
414 	int			 i;
415 
416 	if (dma == NULL)
417 		return;
418 
419 	/* Clear dma buffers */
420 	for (i = 0; i <= DRM_MAX_ORDER; i++)
421 		drm_cleanup_buf(dev, &dma->bufs[i]);
422 
423 	drm_free(dma->buflist);
424 	drm_free(dma->pagelist);
425 	drm_free(dev->dma);
426 	dev->dma = NULL;
427 }
428 
429 
430 void
431 drm_free_buffer(struct drm_device *dev, struct drm_buf *buf)
432 {
433 	if (buf == NULL)
434 		return;
435 
436 	buf->pending = 0;
437 	buf->file_priv= NULL;
438 	buf->used = 0;
439 }
440 
441 void
442 drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
443 {
444 	struct drm_device_dma	*dma = dev->dma;
445 	int			 i;
446 
447 	if (dma == NULL)
448 		return;
449 	for (i = 0; i < dma->buf_count; i++) {
450 		if (dma->buflist[i]->file_priv == file_priv)
451 				drm_free_buffer(dev, dma->buflist[i]);
452 	}
453 }
454 
455 /* Call into the driver-specific DMA handler */
456 int
457 drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
458 {
459 	struct drm_device_dma	*dma = dev->dma;
460 	struct drm_dma		*d = data;
461 	int			 ret = 0;
462 
463 	if (dev->driver->dma_ioctl == NULL) {
464 		DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
465 		return (EINVAL);
466 	}
467 
468 	LOCK_TEST_WITH_RETURN(dev, file_priv);
469 
470 	/* Please don't send us buffers.
471 	 */
472 	if (d->send_count != 0) {
473 		DRM_ERROR("process trying to send %d buffers via drmDMA\n",
474 		    d->send_count);
475 		return (EINVAL);
476 	}
477 
478 	/* We'll send you buffers.
479 	 */
480 	if (d->request_count < 0 || d->request_count > dma->buf_count) {
481 		DRM_ERROR("Process trying to get %d buffers (of %d max)\n",
482 			  curproc->p_pid, d->request_count, dma->buf_count);
483 		return (EINVAL);
484 	}
485 	d->granted_count = 0;
486 
487 	if (d->request_count)
488 		ret = dev->driver->dma_ioctl(dev, d, file_priv);
489 	return (ret);
490 }
491 
492 int
493 drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
494 {
495 	struct drm_device_dma	*dma = dev->dma;
496 	struct drm_buf_entry	*entry;
497 	struct drm_buf		*buf, **temp_buflist;
498 	unsigned long		 agp_offset, offset;
499 	int			 alignment, count, order, page_order, size;
500 	int			 total, byte_count, i;
501 #if 0 /* disabled for now */
502 	struct drm_agp_mem	*agp_entry;
503 	int			 valid;
504 #endif
505 
506 	count = request->count;
507 	order = drm_order(request->size);
508 	size = 1 << order;
509 
510 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
511 	    ? round_page(size) : size;
512 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
513 	total = PAGE_SIZE << page_order;
514 
515 	byte_count = 0;
516 	agp_offset = dev->agp->base + request->agp_start;
517 
518 	DRM_DEBUG("count:      %d\n",  count);
519 	DRM_DEBUG("order:      %d\n",  order);
520 	DRM_DEBUG("size:       %d\n",  size);
521 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
522 	DRM_DEBUG("alignment:  %d\n",  alignment);
523 	DRM_DEBUG("page_order: %d\n",  page_order);
524 	DRM_DEBUG("total:      %d\n",  total);
525 
526 	/* Make sure buffers are located in AGP memory that we own */
527 
528 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
529 	 * memory.  Safe to ignore for now because these ioctls are still
530 	 * root-only.
531 	 */
532 #if 0 /* disabled for now */
533 	valid = 0;
534 	DRM_LOCK();
535 	TAILQ_FOREACH(agp_entry, &dev->agp->memory, link) {
536 		if ((agp_offset >= agp_entry->bound) &&
537 		    (agp_offset + total * count <=
538 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
539 			valid = 1;
540 			break;
541 		}
542 	}
543 	if (!TAILQ_EMPTY(&dev->agp->memory) && !valid) {
544 		DRM_DEBUG("zone invalid\n");
545 		DRM_UNLOCK();
546 		return (EINVAL);
547 	}
548 	DRM_UNLOCK();
549 #endif
550 
551 	entry = &dma->bufs[order];
552 
553 	entry->buflist = drm_calloc(count, sizeof(*entry->buflist));
554 	if (entry->buflist == NULL)
555 		return ENOMEM;
556 
557 	entry->buf_size = size;
558 	entry->page_order = page_order;
559 
560 	offset = 0;
561 
562 	while (entry->buf_count < count) {
563 		buf = &entry->buflist[entry->buf_count];
564 		buf->idx = dma->buf_count + entry->buf_count;
565 		buf->total = alignment;
566 		buf->used = 0;
567 
568 		buf->offset = (dma->byte_count + offset);
569 		buf->bus_address = agp_offset + offset;
570 		buf->pending = 0;
571 		buf->file_priv = NULL;
572 
573 		buf->dev_private = drm_calloc(1, dev->driver->buf_priv_size);
574 		if (buf->dev_private == NULL) {
575 			/* Set count correctly so we free the proper amount. */
576 			entry->buf_count = count;
577 			drm_cleanup_buf(dev, entry);
578 			return ENOMEM;
579 		}
580 
581 		offset += alignment;
582 		entry->buf_count++;
583 		byte_count += PAGE_SIZE << page_order;
584 	}
585 
586 	DRM_DEBUG("byte_count: %d\n", byte_count);
587 
588 	/* OpenBSD lacks realloc in kernel */
589 	temp_buflist = drm_realloc(dma->buflist,
590 	    dma->buf_count * sizeof(*dma->buflist),
591 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist));
592 	if (temp_buflist == NULL) {
593 		/* Free the entry because it isn't valid */
594 		drm_cleanup_buf(dev, entry);
595 		return ENOMEM;
596 	}
597 	dma->buflist = temp_buflist;
598 
599 	for (i = 0; i < entry->buf_count; i++)
600 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
601 
602 	dma->buf_count += entry->buf_count;
603 	dma->byte_count += byte_count;
604 
605 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
606 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
607 
608 	request->count = entry->buf_count;
609 	request->size = size;
610 
611 	dma->flags = _DRM_DMA_USE_AGP;
612 
613 	return 0;
614 }
615 
616 int
617 drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
618 {
619 	struct drm_device_dma	*dma = dev->dma;
620 	struct drm_buf		*buf, **temp_buflist;
621 	struct drm_buf_entry	*entry;
622 	int			 alignment, byte_count, count, i, order;
623 	int			 page_count, page_order, size, total;
624 	unsigned long		 offset, *temp_pagelist;
625 
626 	count = request->count;
627 	order = drm_order(request->size);
628 	size = 1 << order;
629 
630 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
631 	    request->count, request->size, size, order);
632 
633 	alignment = (request->flags & _DRM_PAGE_ALIGN)
634 	    ? round_page(size) : size;
635 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
636 	total = PAGE_SIZE << page_order;
637 
638 	entry = &dma->bufs[order];
639 
640 	entry->buflist = drm_calloc(count, sizeof(*entry->buflist));
641 	entry->seglist = drm_calloc(count, sizeof(*entry->seglist));
642 
643 	/* Keep the original pagelist until we know all the allocations
644 	 * have succeeded
645 	 */
646 	temp_pagelist = drm_calloc((dma->page_count + (count << page_order)),
647 	    sizeof(*dma->pagelist));
648 
649 	if (entry->buflist == NULL || entry->seglist == NULL ||
650 	    temp_pagelist == NULL) {
651 		drm_free(temp_pagelist);
652 		drm_free(entry->seglist);
653 		drm_free(entry->buflist);
654 		return ENOMEM;
655 	}
656 
657 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
658 	    sizeof(*dma->pagelist));
659 
660 	DRM_DEBUG("pagelist: %d entries\n",
661 	    dma->page_count + (count << page_order));
662 
663 	entry->buf_size	= size;
664 	entry->page_order = page_order;
665 	byte_count = 0;
666 	page_count = 0;
667 
668 	while (entry->buf_count < count) {
669 		struct drm_dmamem *mem = drm_dmamem_alloc(dev->dmat, size,
670 		    alignment, 1, size, 0, 0);
671 		if (mem == NULL) {
672 			/* Set count correctly so we free the proper amount. */
673 			entry->buf_count = count;
674 			entry->seg_count = count;
675 			drm_cleanup_buf(dev, entry);
676 			drm_free(temp_pagelist);
677 			return ENOMEM;
678 		}
679 
680 		entry->seglist[entry->seg_count++] = mem;
681 		for (i = 0; i < (1 << page_order); i++) {
682 			DRM_DEBUG("page %d @ %p\n", dma->page_count +
683 			    page_count, mem->kva + PAGE_SIZE * i);
684 			temp_pagelist[dma->page_count + page_count++] =
685 			    (long)mem->kva + PAGE_SIZE * i;
686 		}
687 		for (offset = 0;
688 		    offset + size <= total && entry->buf_count < count;
689 		    offset += alignment, ++entry->buf_count) {
690 			buf = &entry->buflist[entry->buf_count];
691 			buf->idx = dma->buf_count + entry->buf_count;
692 			buf->total = alignment;
693 			buf->used = 0;
694 			buf->offset = (dma->byte_count + byte_count + offset);
695 			buf->address = mem->kva + offset;
696 			buf->bus_address = mem->map->dm_segs[0].ds_addr +
697 			    offset;
698 			buf->pending = 0;
699 			buf->file_priv = NULL;
700 
701 			buf->dev_private = drm_calloc(1,
702 			    dev->driver->buf_priv_size);
703 			if (buf->dev_private == NULL) {
704 				/* Set count so we free the proper amount. */
705 				entry->buf_count = count;
706 				entry->seg_count = count;
707 				drm_cleanup_buf(dev, entry);
708 				drm_free(temp_pagelist);
709 				return ENOMEM;
710 			}
711 
712 			DRM_DEBUG("buffer %d\n",
713 			    entry->buf_count);
714 		}
715 		byte_count += PAGE_SIZE << page_order;
716 	}
717 
718 	temp_buflist = drm_realloc(dma->buflist,
719 	    dma->buf_count * sizeof(*dma->buflist),
720 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist));
721 	if (temp_buflist == NULL) {
722 		/* Free the entry because it isn't valid */
723 		drm_cleanup_buf(dev, entry);
724 		drm_free(temp_pagelist);
725 		return ENOMEM;
726 	}
727 	dma->buflist = temp_buflist;
728 
729 	for (i = 0; i < entry->buf_count; i++)
730 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
731 
732 	/* No allocations failed, so now we can replace the orginal pagelist
733 	 * with the new one.
734 	 */
735 	drm_free(dma->pagelist);
736 	dma->pagelist = temp_pagelist;
737 
738 	dma->buf_count += entry->buf_count;
739 	dma->seg_count += entry->seg_count;
740 	dma->page_count += entry->seg_count << page_order;
741 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
742 
743 	request->count = entry->buf_count;
744 	request->size = size;
745 
746 	return 0;
747 
748 }
749 
750 int
751 drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
752 {
753 	struct drm_device_dma	*dma = dev->dma;
754 	struct drm_buf_entry	*entry;
755 	struct drm_buf		*buf, **temp_buflist;
756 	unsigned long		 agp_offset, offset;
757 	int			 alignment, byte_count, count, i, order;
758 	int			 page_order, size, total;
759 
760 	count = request->count;
761 	order = drm_order(request->size);
762 	size = 1 << order;
763 
764 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
765 	    ? round_page(size) : size;
766 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
767 	total = PAGE_SIZE << page_order;
768 
769 	byte_count = 0;
770 	agp_offset = request->agp_start;
771 
772 	DRM_DEBUG("count:      %d\n",  count);
773 	DRM_DEBUG("order:      %d\n",  order);
774 	DRM_DEBUG("size:       %d\n",  size);
775 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
776 	DRM_DEBUG("alignment:  %d\n",  alignment);
777 	DRM_DEBUG("page_order: %d\n",  page_order);
778 	DRM_DEBUG("total:      %d\n",  total);
779 
780 	entry = &dma->bufs[order];
781 
782 	entry->buflist = drm_calloc(count, sizeof(*entry->buflist));
783 	if (entry->buflist == NULL)
784 		return ENOMEM;
785 
786 	entry->buf_size = size;
787 	entry->page_order = page_order;
788 
789 	offset = 0;
790 
791 	while (entry->buf_count < count) {
792 		buf = &entry->buflist[entry->buf_count];
793 		buf->idx = dma->buf_count + entry->buf_count;
794 		buf->total = alignment;
795 		buf->used = 0;
796 
797 		buf->offset = (dma->byte_count + offset);
798 		buf->bus_address = agp_offset + offset;
799 		buf->pending = 0;
800 		buf->file_priv = NULL;
801 
802 		buf->dev_private = drm_calloc(1, dev->driver->buf_priv_size);
803 		if (buf->dev_private == NULL) {
804 			/* Set count correctly so we free the proper amount. */
805 			entry->buf_count = count;
806 			drm_cleanup_buf(dev, entry);
807 			return ENOMEM;
808 		}
809 
810 		DRM_DEBUG("buffer %d\n", entry->buf_count);
811 
812 		offset += alignment;
813 		entry->buf_count++;
814 		byte_count += PAGE_SIZE << page_order;
815 	}
816 
817 	DRM_DEBUG("byte_count: %d\n", byte_count);
818 
819 	temp_buflist = drm_realloc(dma->buflist,
820 	    dma->buf_count * sizeof(*dma->buflist),
821 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist));
822 	if (temp_buflist == NULL) {
823 		/* Free the entry because it isn't valid */
824 		drm_cleanup_buf(dev, entry);
825 		return ENOMEM;
826 	}
827 	dma->buflist = temp_buflist;
828 
829 	for (i = 0; i < entry->buf_count; i++)
830 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
831 
832 	dma->buf_count += entry->buf_count;
833 	dma->byte_count += byte_count;
834 
835 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
836 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
837 
838 	request->count = entry->buf_count;
839 	request->size = size;
840 
841 	dma->flags = _DRM_DMA_USE_SG;
842 
843 	return 0;
844 }
845 
846 int
847 drm_addbufs(struct drm_device *dev, struct drm_buf_desc *request)
848 {
849 	struct drm_device_dma	*dma = dev->dma;
850 	int			 order, ret;
851 
852 	if (request->count < 0 || request->count > 4096)
853 		return (EINVAL);
854 
855 	order = drm_order(request->size);
856 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
857 		return (EINVAL);
858 
859 	rw_enter_write(&dma->dma_lock);
860 
861 	/* No more allocations after first buffer-using ioctl. */
862 	if (dma->buf_use != 0) {
863 		rw_exit_write(&dma->dma_lock);
864 		return (EBUSY);
865 	}
866 	/* No more than one allocation per order */
867 	if (dma->bufs[order].buf_count != 0) {
868 		rw_exit_write(&dma->dma_lock);
869 		return (ENOMEM);
870 	}
871 
872 	if (request->flags & _DRM_AGP_BUFFER)
873 		ret = drm_addbufs_agp(dev, request);
874 	else if (request->flags & _DRM_SG_BUFFER)
875 		ret = drm_addbufs_sg(dev, request);
876 	else
877 		ret = drm_addbufs_pci(dev, request);
878 
879 	rw_exit_write(&dma->dma_lock);
880 
881 	return (ret);
882 }
883 
884 int
885 drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
886 {
887 	struct drm_device_dma	*dma = dev->dma;
888 	struct drm_buf_free	*request = data;
889 	struct drm_buf		*buf;
890 	int			 i, idx, retcode = 0;
891 
892 	DRM_DEBUG("%d\n", request->count);
893 
894 	rw_enter_write(&dma->dma_lock);
895 	for (i = 0; i < request->count; i++) {
896 		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
897 			retcode = EFAULT;
898 			break;
899 		}
900 		if (idx < 0 || idx >= dma->buf_count) {
901 			DRM_ERROR("Index %d (of %d max)\n", idx,
902 			    dma->buf_count - 1);
903 			retcode = EINVAL;
904 			break;
905 		}
906 		buf = dma->buflist[idx];
907 		if (buf->file_priv != file_priv) {
908 			DRM_ERROR("Process %d freeing buffer not owned\n",
909 			    DRM_CURRENTPID);
910 			retcode = EINVAL;
911 			break;
912 		}
913 		drm_free_buffer(dev, buf);
914 	}
915 	rw_exit_write(&dma->dma_lock);
916 
917 	return retcode;
918 }
919 
920 int
921 drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
922 {
923 	struct drm_device_dma	*dma = dev->dma;
924 	struct drm_buf_map	*request = data;
925 	struct vmspace		*vms;
926 	struct vnode		*vn;
927 	vaddr_t			 address, vaddr;
928 	voff_t			 foff;
929 	vsize_t			 size;
930 	const int		 zero = 0;
931 	int			 i, retcode = 0;
932 
933 	if (!vfinddev(file_priv->kdev, VCHR, &vn))
934 		return EINVAL;
935 
936 	vms = curproc->p_vmspace;
937 
938 	rw_enter_write(&dma->dma_lock);
939 	dev->dma->buf_use++;	/* Can't allocate more after this call */
940 	rw_exit_write(&dma->dma_lock);
941 
942 	if (request->count < dma->buf_count)
943 		goto done;
944 
945 	if ((dev->driver->flags & DRIVER_AGP &&
946 	    (dma->flags & _DRM_DMA_USE_AGP)) ||
947 	    (dev->driver->flags & DRIVER_SG &&
948 	    (dma->flags & _DRM_DMA_USE_SG))) {
949 		struct drm_local_map *map = dev->agp_buffer_map;
950 
951 		if (map == NULL) {
952 			DRM_DEBUG("couldn't find agp buffer map\n");
953 			retcode = EINVAL;
954 			goto done;
955 		}
956 		size = round_page(map->size);
957 		foff = map->ext;
958 	} else {
959 		size = round_page(dma->byte_count),
960 		foff = 0;
961 	}
962 
963 	vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
964 	retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
965 	    UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
966 	    (caddr_t)vn, foff, curproc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur,
967 	    curproc);
968 	if (retcode) {
969 		DRM_DEBUG("uvm_mmap failed\n");
970 		goto done;
971 	}
972 
973 	request->virtual = (void *)vaddr;
974 
975 	for (i = 0; i < dma->buf_count; i++) {
976 		if (DRM_COPY_TO_USER(&request->list[i].idx,
977 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
978 			retcode = EFAULT;
979 			goto done;
980 		}
981 		if (DRM_COPY_TO_USER(&request->list[i].total,
982 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
983 			retcode = EFAULT;
984 			goto done;
985 		}
986 		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
987 		    sizeof(zero))) {
988 			retcode = EFAULT;
989 			goto done;
990 		}
991 		address = vaddr + dma->buflist[i]->offset; /* *** */
992 		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
993 		    sizeof(address))) {
994 			retcode = EFAULT;
995 			goto done;
996 		}
997 	}
998 
999  done:
1000 	request->count = dma->buf_count;
1001 
1002 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1003 
1004 	return retcode;
1005 }
1006