xref: /dflybsd-src/sys/dev/drm/drm_bufs.c (revision 0d5acd7467c4e95f792ef49fceb3ab8e917ce86b)
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
30  */
31 
32 /** @file drm_bufs.c
33  * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
34  */
35 
36 #include <sys/conf.h>
37 #include <bus/pci/pcireg.h>
38 
39 #include <drm/drmP.h>
40 
41 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
42  * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
43  * address for accessing them.  Cleaned up at unload.
44  */
45 static int drm_alloc_resource(struct drm_device *dev, int resource)
46 {
47 	struct resource *res;
48 	int rid;
49 
50 	DRM_LOCK_ASSERT(dev);
51 
52 	if (resource >= DRM_MAX_PCI_RESOURCE) {
53 		DRM_ERROR("Resource %d too large\n", resource);
54 		return 1;
55 	}
56 
57 	if (dev->pcir[resource] != NULL) {
58 		return 0;
59 	}
60 
61 	DRM_UNLOCK(dev);
62 	rid = PCIR_BAR(resource);
63 	res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid,
64 	    RF_SHAREABLE);
65 	DRM_LOCK(dev);
66 	if (res == NULL) {
67 		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
68 		return 1;
69 	}
70 
71 	if (dev->pcir[resource] == NULL) {
72 		dev->pcirid[resource] = rid;
73 		dev->pcir[resource] = res;
74 	}
75 
76 	return 0;
77 }
78 
79 unsigned long drm_get_resource_start(struct drm_device *dev,
80 				     unsigned int resource)
81 {
82 	if (drm_alloc_resource(dev, resource) != 0)
83 		return 0;
84 
85 	return rman_get_start(dev->pcir[resource]);
86 }
87 
88 unsigned long drm_get_resource_len(struct drm_device *dev,
89 				   unsigned int resource)
90 {
91 	if (drm_alloc_resource(dev, resource) != 0)
92 		return 0;
93 
94 	return rman_get_size(dev->pcir[resource]);
95 }
96 
97 int drm_addmap(struct drm_device * dev, unsigned long offset,
98 	       unsigned long size,
99     enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
100 {
101 	drm_local_map_t *map;
102 	int align;
103 	/*drm_agp_mem_t *entry;
104 	int valid;*/
105 
106 	/* Only allow shared memory to be removable since we only keep enough
107 	 * book keeping information about shared memory to allow for removal
108 	 * when processes fork.
109 	 */
110 	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
111 		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
112 		return EINVAL;
113 	}
114 	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
115 		DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
116 		    offset, size);
117 		return EINVAL;
118 	}
119 	if (offset + size < offset) {
120 		DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
121 		    offset, size);
122 		return EINVAL;
123 	}
124 
125 	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
126 	    size, type);
127 
128 	/* Check if this is just another version of a kernel-allocated map, and
129 	 * just hand that back if so.
130 	 */
131 	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
132 	    type == _DRM_SHM) {
133 		TAILQ_FOREACH(map, &dev->maplist, link) {
134 			if (map->type == type && (map->offset == offset ||
135 			    (map->type == _DRM_SHM &&
136 			    map->flags == _DRM_CONTAINS_LOCK))) {
137 				map->size = size;
138 				DRM_DEBUG("Found kernel map %d\n", type);
139 				goto done;
140 			}
141 		}
142 	}
143 	DRM_UNLOCK(dev);
144 
145 	/* Allocate a new map structure, fill it in, and do any type-specific
146 	 * initialization necessary.
147 	 */
148 	map = kmalloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
149 	if (!map) {
150 		DRM_LOCK(dev);
151 		return ENOMEM;
152 	}
153 
154 	map->offset = offset;
155 	map->size = size;
156 	map->type = type;
157 	map->flags = flags;
158 	map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) <<
159 	    DRM_MAP_HANDLE_SHIFT);
160 
161 	switch (map->type) {
162 	case _DRM_REGISTERS:
163 		map->virtual = drm_ioremap(dev, map);
164 		if (!(map->flags & _DRM_WRITE_COMBINING))
165 			break;
166 		/* FALLTHROUGH */
167 	case _DRM_FRAME_BUFFER:
168 		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
169 			map->mtrr = 1;
170 		break;
171 	case _DRM_SHM:
172 		map->virtual = kmalloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
173 		DRM_DEBUG("%lu %d %p\n",
174 		    map->size, drm_order(map->size), map->virtual);
175 		if (!map->virtual) {
176 			drm_free(map, DRM_MEM_MAPS);
177 			DRM_LOCK(dev);
178 			return ENOMEM;
179 		}
180 		map->offset = (unsigned long)map->virtual;
181 		if (map->flags & _DRM_CONTAINS_LOCK) {
182 			/* Prevent a 2nd X Server from creating a 2nd lock */
183 			DRM_LOCK(dev);
184 			if (dev->lock.hw_lock != NULL) {
185 				DRM_UNLOCK(dev);
186 				drm_free(map->virtual, DRM_MEM_MAPS);
187 				drm_free(map, DRM_MEM_MAPS);
188 				return EBUSY;
189 			}
190 			dev->lock.hw_lock = map->virtual; /* Pointer to lock */
191 			DRM_UNLOCK(dev);
192 		}
193 		break;
194 	case _DRM_AGP:
195 		/*valid = 0;*/
196 		/* In some cases (i810 driver), user space may have already
197 		 * added the AGP base itself, because dev->agp->base previously
198 		 * only got set during AGP enable.  So, only add the base
199 		 * address if the map's offset isn't already within the
200 		 * aperture.
201 		 */
202 		if (map->offset < dev->agp->base ||
203 		    map->offset > dev->agp->base +
204 		    dev->agp->info.ai_aperture_size - 1) {
205 			map->offset += dev->agp->base;
206 		}
207 		map->mtrr   = dev->agp->mtrr; /* for getmap */
208 		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
209 			if ((map->offset >= entry->bound) &&
210 			    (map->offset + map->size <=
211 			    entry->bound + entry->pages * PAGE_SIZE)) {
212 				valid = 1;
213 				break;
214 			}
215 		}
216 		if (!valid) {
217 			drm_free(map, DRM_MEM_MAPS);
218 			DRM_LOCK(dev);
219 			return EACCES;
220 		}*/
221 		break;
222 	case _DRM_SCATTER_GATHER:
223 		if (!dev->sg) {
224 			drm_free(map, DRM_MEM_MAPS);
225 			DRM_LOCK(dev);
226 			return EINVAL;
227 		}
228 		map->virtual = (void *)(dev->sg->vaddr + offset);
229 		map->offset = dev->sg->vaddr + offset;
230 		break;
231 	case _DRM_CONSISTENT:
232 		/* Unfortunately, we don't get any alignment specification from
233 		 * the caller, so we have to guess.  drm_pci_alloc requires
234 		 * a power-of-two alignment, so try to align the bus address of
235 		 * the map to it size if possible, otherwise just assume
236 		 * PAGE_SIZE alignment.
237 		 */
238 		align = map->size;
239 		if ((align & (align - 1)) != 0)
240 			align = PAGE_SIZE;
241 		map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
242 		if (map->dmah == NULL) {
243 			drm_free(map, DRM_MEM_MAPS);
244 			DRM_LOCK(dev);
245 			return ENOMEM;
246 		}
247 		map->virtual = map->dmah->vaddr;
248 		map->offset = map->dmah->busaddr;
249 		break;
250 	default:
251 		DRM_ERROR("Bad map type %d\n", map->type);
252 		drm_free(map, DRM_MEM_MAPS);
253 		DRM_LOCK(dev);
254 		return EINVAL;
255 	}
256 
257 	DRM_LOCK(dev);
258 	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
259 
260 done:
261 	/* Jumped to, with lock held, when a kernel map is found. */
262 
263 	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
264 	    map->size);
265 
266 	*map_ptr = map;
267 
268 	return 0;
269 }
270 
271 int drm_addmap_ioctl(struct drm_device *dev, void *data,
272 		     struct drm_file *file_priv)
273 {
274 	struct drm_map *request = data;
275 	drm_local_map_t *map;
276 	int err;
277 
278 	if (!(dev->flags & (FREAD|FWRITE)))
279 		return EACCES; /* Require read/write */
280 
281 	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
282 		return EACCES;
283 
284 	DRM_LOCK(dev);
285 	err = drm_addmap(dev, request->offset, request->size, request->type,
286 	    request->flags, &map);
287 	DRM_UNLOCK(dev);
288 	if (err != 0)
289 		return err;
290 
291 	request->offset = map->offset;
292 	request->size = map->size;
293 	request->type = map->type;
294 	request->flags = map->flags;
295 	request->mtrr   = map->mtrr;
296 	request->handle = (void *)map->handle;
297 
298 	return 0;
299 }
300 
301 void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
302 {
303 	DRM_LOCK_ASSERT(dev);
304 
305 	if (map == NULL)
306 		return;
307 
308 	TAILQ_REMOVE(&dev->maplist, map, link);
309 
310 	switch (map->type) {
311 	case _DRM_REGISTERS:
312 		if (map->bsr == NULL)
313 			drm_ioremapfree(map);
314 		/* FALLTHROUGH */
315 	case _DRM_FRAME_BUFFER:
316 		if (map->mtrr) {
317 			int __unused retcode;
318 
319 			retcode = drm_mtrr_del(0, map->offset, map->size,
320 			    DRM_MTRR_WC);
321 			DRM_DEBUG("mtrr_del = %d\n", retcode);
322 		}
323 		break;
324 	case _DRM_SHM:
325 		drm_free(map->virtual, DRM_MEM_MAPS);
326 		break;
327 	case _DRM_AGP:
328 	case _DRM_SCATTER_GATHER:
329 		break;
330 	case _DRM_CONSISTENT:
331 		drm_pci_free(dev, map->dmah);
332 		break;
333 	default:
334 		DRM_ERROR("Bad map type %d\n", map->type);
335 		break;
336 	}
337 
338 	if (map->bsr != NULL) {
339 		bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
340 		    map->bsr);
341 	}
342 
343 	DRM_UNLOCK(dev);
344 	if (map->handle)
345 		free_unr(dev->map_unrhdr, (unsigned long)map->handle >>
346 		    DRM_MAP_HANDLE_SHIFT);
347 	DRM_LOCK(dev);
348 
349 	drm_free(map, DRM_MEM_MAPS);
350 }
351 
352 /* Remove a map private from list and deallocate resources if the mapping
353  * isn't in use.
354  */
355 
356 int drm_rmmap_ioctl(struct drm_device *dev, void *data,
357 		    struct drm_file *file_priv)
358 {
359 	drm_local_map_t *map;
360 	struct drm_map *request = data;
361 
362 	DRM_LOCK(dev);
363 	TAILQ_FOREACH(map, &dev->maplist, link) {
364 		if (map->handle == request->handle &&
365 		    map->flags & _DRM_REMOVABLE)
366 			break;
367 	}
368 
369 	/* No match found. */
370 	if (map == NULL) {
371 		DRM_UNLOCK(dev);
372 		return EINVAL;
373 	}
374 
375 	drm_rmmap(dev, map);
376 
377 	DRM_UNLOCK(dev);
378 
379 	return 0;
380 }
381 
382 
383 static void drm_cleanup_buf_error(struct drm_device *dev,
384 				  drm_buf_entry_t *entry)
385 {
386 	int i;
387 
388 	if (entry->seg_count) {
389 		for (i = 0; i < entry->seg_count; i++) {
390 			drm_pci_free(dev, entry->seglist[i]);
391 		}
392 		drm_free(entry->seglist, DRM_MEM_SEGS);
393 
394 		entry->seg_count = 0;
395 	}
396 
397    	if (entry->buf_count) {
398 	   	for (i = 0; i < entry->buf_count; i++) {
399 			drm_free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
400 		}
401 		drm_free(entry->buflist, DRM_MEM_BUFS);
402 
403 		entry->buf_count = 0;
404 	}
405 }
406 
407 static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
408 {
409 	drm_device_dma_t *dma = dev->dma;
410 	drm_buf_entry_t *entry;
411 	/*drm_agp_mem_t *agp_entry;
412 	int valid*/
413 	drm_buf_t *buf;
414 	unsigned long offset;
415 	unsigned long agp_offset;
416 	int count;
417 	int order;
418 	int size;
419 	int alignment;
420 	int page_order;
421 	int total;
422 	int byte_count;
423 	int i;
424 	drm_buf_t **temp_buflist;
425 
426 	count = request->count;
427 	order = drm_order(request->size);
428 	size = 1 << order;
429 
430 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
431 	    ? round_page(size) : size;
432 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
433 	total = PAGE_SIZE << page_order;
434 
435 	byte_count = 0;
436 	agp_offset = dev->agp->base + request->agp_start;
437 
438 	DRM_DEBUG("count:      %d\n",  count);
439 	DRM_DEBUG("order:      %d\n",  order);
440 	DRM_DEBUG("size:       %d\n",  size);
441 	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
442 	DRM_DEBUG("alignment:  %d\n",  alignment);
443 	DRM_DEBUG("page_order: %d\n",  page_order);
444 	DRM_DEBUG("total:      %d\n",  total);
445 
446 	/* Make sure buffers are located in AGP memory that we own */
447 	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
448 	 * memory.  Safe to ignore for now because these ioctls are still
449 	 * root-only.
450 	 */
451 	/*valid = 0;
452 	for (agp_entry = dev->agp->memory; agp_entry;
453 	    agp_entry = agp_entry->next) {
454 		if ((agp_offset >= agp_entry->bound) &&
455 		    (agp_offset + total * count <=
456 		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
457 			valid = 1;
458 			break;
459 		}
460 	}
461 	if (!valid) {
462 		DRM_DEBUG("zone invalid\n");
463 		return EINVAL;
464 	}*/
465 
466 	entry = &dma->bufs[order];
467 
468 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
469 	    M_NOWAIT | M_ZERO);
470 	if (!entry->buflist) {
471 		return ENOMEM;
472 	}
473 
474 	entry->buf_size = size;
475 	entry->page_order = page_order;
476 
477 	offset = 0;
478 
479 	while (entry->buf_count < count) {
480 		buf          = &entry->buflist[entry->buf_count];
481 		buf->idx     = dma->buf_count + entry->buf_count;
482 		buf->total   = alignment;
483 		buf->order   = order;
484 		buf->used    = 0;
485 
486 		buf->offset  = (dma->byte_count + offset);
487 		buf->bus_address = agp_offset + offset;
488 		buf->address = (void *)(agp_offset + offset);
489 		buf->next    = NULL;
490 		buf->pending = 0;
491 		buf->file_priv = NULL;
492 
493 		buf->dev_priv_size = dev->driver->buf_priv_size;
494 		buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
495 		    M_NOWAIT | M_ZERO);
496 		if (buf->dev_private == NULL) {
497 			/* Set count correctly so we free the proper amount. */
498 			entry->buf_count = count;
499 			drm_cleanup_buf_error(dev, entry);
500 			return ENOMEM;
501 		}
502 
503 		offset += alignment;
504 		entry->buf_count++;
505 		byte_count += PAGE_SIZE << page_order;
506 	}
507 
508 	DRM_DEBUG("byte_count: %d\n", byte_count);
509 
510 	temp_buflist = krealloc(dma->buflist,
511 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
512 	    DRM_MEM_BUFS, M_NOWAIT);
513 	if (temp_buflist == NULL) {
514 		/* Free the entry because it isn't valid */
515 		drm_cleanup_buf_error(dev, entry);
516 		return ENOMEM;
517 	}
518 	dma->buflist = temp_buflist;
519 
520 	for (i = 0; i < entry->buf_count; i++) {
521 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
522 	}
523 
524 	dma->buf_count += entry->buf_count;
525 	dma->byte_count += byte_count;
526 
527 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
528 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
529 
530 	request->count = entry->buf_count;
531 	request->size = size;
532 
533 	dma->flags = _DRM_DMA_USE_AGP;
534 
535 	return 0;
536 }
537 
538 static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
539 {
540 	drm_device_dma_t *dma = dev->dma;
541 	int count;
542 	int order;
543 	int size;
544 	int total;
545 	int page_order;
546 	drm_buf_entry_t *entry;
547 	drm_buf_t *buf;
548 	int alignment;
549 	unsigned long offset;
550 	int i;
551 	int byte_count;
552 	int page_count;
553 	unsigned long *temp_pagelist;
554 	drm_buf_t **temp_buflist;
555 
556 	count = request->count;
557 	order = drm_order(request->size);
558 	size = 1 << order;
559 
560 	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
561 	    request->count, request->size, size, order);
562 
563 	alignment = (request->flags & _DRM_PAGE_ALIGN)
564 	    ? round_page(size) : size;
565 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
566 	total = PAGE_SIZE << page_order;
567 
568 	entry = &dma->bufs[order];
569 
570 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
571 	    M_NOWAIT | M_ZERO);
572 	entry->seglist = kmalloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
573 	    M_NOWAIT | M_ZERO);
574 
575 	/* Keep the original pagelist until we know all the allocations
576 	 * have succeeded
577 	 */
578 	temp_pagelist = kmalloc((dma->page_count + (count << page_order)) *
579 	    sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
580 
581 	if (entry->buflist == NULL || entry->seglist == NULL ||
582 	    temp_pagelist == NULL) {
583 		drm_free(temp_pagelist, DRM_MEM_PAGES);
584 		drm_free(entry->seglist, DRM_MEM_SEGS);
585 		drm_free(entry->buflist, DRM_MEM_BUFS);
586 		return ENOMEM;
587 	}
588 
589 	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
590 	    sizeof(*dma->pagelist));
591 
592 	DRM_DEBUG("pagelist: %d entries\n",
593 	    dma->page_count + (count << page_order));
594 
595 	entry->buf_size	= size;
596 	entry->page_order = page_order;
597 	byte_count = 0;
598 	page_count = 0;
599 
600 	while (entry->buf_count < count) {
601 		spin_unlock(&dev->dma_lock);
602 		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
603 		    0xfffffffful);
604 		spin_lock(&dev->dma_lock);
605 		if (dmah == NULL) {
606 			/* Set count correctly so we free the proper amount. */
607 			entry->buf_count = count;
608 			entry->seg_count = count;
609 			drm_cleanup_buf_error(dev, entry);
610 			drm_free(temp_pagelist, DRM_MEM_PAGES);
611 			return ENOMEM;
612 		}
613 
614 		entry->seglist[entry->seg_count++] = dmah;
615 		for (i = 0; i < (1 << page_order); i++) {
616 			DRM_DEBUG("page %d @ %p\n",
617 			    dma->page_count + page_count,
618 			    (char *)dmah->vaddr + PAGE_SIZE * i);
619 			temp_pagelist[dma->page_count + page_count++] =
620 			    (long)dmah->vaddr + PAGE_SIZE * i;
621 		}
622 		for (offset = 0;
623 		    offset + size <= total && entry->buf_count < count;
624 		    offset += alignment, ++entry->buf_count) {
625 			buf	     = &entry->buflist[entry->buf_count];
626 			buf->idx     = dma->buf_count + entry->buf_count;
627 			buf->total   = alignment;
628 			buf->order   = order;
629 			buf->used    = 0;
630 			buf->offset  = (dma->byte_count + byte_count + offset);
631 			buf->address = ((char *)dmah->vaddr + offset);
632 			buf->bus_address = dmah->busaddr + offset;
633 			buf->next    = NULL;
634 			buf->pending = 0;
635 			buf->file_priv = NULL;
636 
637 			buf->dev_priv_size = dev->driver->buf_priv_size;
638 			buf->dev_private = kmalloc(buf->dev_priv_size,
639 			    DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
640 			if (buf->dev_private == NULL) {
641 				/* Set count correctly so we free the proper amount. */
642 				entry->buf_count = count;
643 				entry->seg_count = count;
644 				drm_cleanup_buf_error(dev, entry);
645 				drm_free(temp_pagelist, DRM_MEM_PAGES);
646 				return ENOMEM;
647 			}
648 
649 			DRM_DEBUG("buffer %d @ %p\n",
650 			    entry->buf_count, buf->address);
651 		}
652 		byte_count += PAGE_SIZE << page_order;
653 	}
654 
655 	temp_buflist = krealloc(dma->buflist,
656 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
657 	    DRM_MEM_BUFS, M_NOWAIT);
658 	if (temp_buflist == NULL) {
659 		/* Free the entry because it isn't valid */
660 		drm_cleanup_buf_error(dev, entry);
661 		drm_free(temp_pagelist, DRM_MEM_PAGES);
662 		return ENOMEM;
663 	}
664 	dma->buflist = temp_buflist;
665 
666 	for (i = 0; i < entry->buf_count; i++) {
667 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
668 	}
669 
670 	/* No allocations failed, so now we can replace the orginal pagelist
671 	 * with the new one.
672 	 */
673 	drm_free(dma->pagelist, DRM_MEM_PAGES);
674 	dma->pagelist = temp_pagelist;
675 
676 	dma->buf_count += entry->buf_count;
677 	dma->seg_count += entry->seg_count;
678 	dma->page_count += entry->seg_count << page_order;
679 	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
680 
681 	request->count = entry->buf_count;
682 	request->size = size;
683 
684 	return 0;
685 
686 }
687 
688 static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
689 {
690 	drm_device_dma_t *dma = dev->dma;
691 	drm_buf_entry_t *entry;
692 	drm_buf_t *buf;
693 	unsigned long offset;
694 	unsigned long agp_offset;
695 	int count;
696 	int order;
697 	int size;
698 	int alignment;
699 	int page_order;
700 	int total;
701 	int byte_count;
702 	int i;
703 	drm_buf_t **temp_buflist;
704 
705 	count = request->count;
706 	order = drm_order(request->size);
707 	size = 1 << order;
708 
709 	alignment  = (request->flags & _DRM_PAGE_ALIGN)
710 	    ? round_page(size) : size;
711 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
712 	total = PAGE_SIZE << page_order;
713 
714 	byte_count = 0;
715 	agp_offset = request->agp_start;
716 
717 	DRM_DEBUG("count:      %d\n",  count);
718 	DRM_DEBUG("order:      %d\n",  order);
719 	DRM_DEBUG("size:       %d\n",  size);
720 	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
721 	DRM_DEBUG("alignment:  %d\n",  alignment);
722 	DRM_DEBUG("page_order: %d\n",  page_order);
723 	DRM_DEBUG("total:      %d\n",  total);
724 
725 	entry = &dma->bufs[order];
726 
727 	entry->buflist = kmalloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
728 	    M_NOWAIT | M_ZERO);
729 	if (entry->buflist == NULL)
730 		return ENOMEM;
731 
732 	entry->buf_size = size;
733 	entry->page_order = page_order;
734 
735 	offset = 0;
736 
737 	while (entry->buf_count < count) {
738 		buf          = &entry->buflist[entry->buf_count];
739 		buf->idx     = dma->buf_count + entry->buf_count;
740 		buf->total   = alignment;
741 		buf->order   = order;
742 		buf->used    = 0;
743 
744 		buf->offset  = (dma->byte_count + offset);
745 		buf->bus_address = agp_offset + offset;
746 		buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
747 		buf->next    = NULL;
748 		buf->pending = 0;
749 		buf->file_priv = NULL;
750 
751 		buf->dev_priv_size = dev->driver->buf_priv_size;
752 		buf->dev_private = kmalloc(buf->dev_priv_size, DRM_MEM_BUFS,
753 		    M_NOWAIT | M_ZERO);
754 		if (buf->dev_private == NULL) {
755 			/* Set count correctly so we free the proper amount. */
756 			entry->buf_count = count;
757 			drm_cleanup_buf_error(dev, entry);
758 			return ENOMEM;
759 		}
760 
761 		DRM_DEBUG("buffer %d @ %p\n",
762 		    entry->buf_count, buf->address);
763 
764 		offset += alignment;
765 		entry->buf_count++;
766 		byte_count += PAGE_SIZE << page_order;
767 	}
768 
769 	DRM_DEBUG("byte_count: %d\n", byte_count);
770 
771 	temp_buflist = krealloc(dma->buflist,
772 	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
773 	    DRM_MEM_BUFS, M_NOWAIT);
774 	if (temp_buflist == NULL) {
775 		/* Free the entry because it isn't valid */
776 		drm_cleanup_buf_error(dev, entry);
777 		return ENOMEM;
778 	}
779 	dma->buflist = temp_buflist;
780 
781 	for (i = 0; i < entry->buf_count; i++) {
782 		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
783 	}
784 
785 	dma->buf_count += entry->buf_count;
786 	dma->byte_count += byte_count;
787 
788 	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
789 	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
790 
791 	request->count = entry->buf_count;
792 	request->size = size;
793 
794 	dma->flags = _DRM_DMA_USE_SG;
795 
796 	return 0;
797 }
798 
799 int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
800 {
801 	int order, ret;
802 
803 	if (request->count < 0 || request->count > 4096)
804 		return EINVAL;
805 
806 	order = drm_order(request->size);
807 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
808 		return EINVAL;
809 
810 	spin_lock(&dev->dma_lock);
811 
812 	/* No more allocations after first buffer-using ioctl. */
813 	if (dev->buf_use != 0) {
814 		spin_unlock(&dev->dma_lock);
815 		return EBUSY;
816 	}
817 	/* No more than one allocation per order */
818 	if (dev->dma->bufs[order].buf_count != 0) {
819 		spin_unlock(&dev->dma_lock);
820 		return ENOMEM;
821 	}
822 
823 	ret = drm_do_addbufs_agp(dev, request);
824 
825 	spin_unlock(&dev->dma_lock);
826 
827 	return ret;
828 }
829 
830 int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
831 {
832 	int order, ret;
833 
834 	if (!DRM_SUSER(DRM_CURPROC))
835 		return EACCES;
836 
837 	if (request->count < 0 || request->count > 4096)
838 		return EINVAL;
839 
840 	order = drm_order(request->size);
841 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
842 		return EINVAL;
843 
844 	spin_lock(&dev->dma_lock);
845 
846 	/* No more allocations after first buffer-using ioctl. */
847 	if (dev->buf_use != 0) {
848 		spin_unlock(&dev->dma_lock);
849 		return EBUSY;
850 	}
851 	/* No more than one allocation per order */
852 	if (dev->dma->bufs[order].buf_count != 0) {
853 		spin_unlock(&dev->dma_lock);
854 		return ENOMEM;
855 	}
856 
857 	ret = drm_do_addbufs_sg(dev, request);
858 
859 	spin_unlock(&dev->dma_lock);
860 
861 	return ret;
862 }
863 
864 int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
865 {
866 	int order, ret;
867 
868 	if (!DRM_SUSER(DRM_CURPROC))
869 		return EACCES;
870 
871 	if (request->count < 0 || request->count > 4096)
872 		return EINVAL;
873 
874 	order = drm_order(request->size);
875 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
876 		return EINVAL;
877 
878 	spin_lock(&dev->dma_lock);
879 
880 	/* No more allocations after first buffer-using ioctl. */
881 	if (dev->buf_use != 0) {
882 		spin_unlock(&dev->dma_lock);
883 		return EBUSY;
884 	}
885 	/* No more than one allocation per order */
886 	if (dev->dma->bufs[order].buf_count != 0) {
887 		spin_unlock(&dev->dma_lock);
888 		return ENOMEM;
889 	}
890 
891 	ret = drm_do_addbufs_pci(dev, request);
892 
893 	spin_unlock(&dev->dma_lock);
894 
895 	return ret;
896 }
897 
898 int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
899 {
900 	struct drm_buf_desc *request = data;
901 	int err;
902 
903 	if (request->flags & _DRM_AGP_BUFFER)
904 		err = drm_addbufs_agp(dev, request);
905 	else if (request->flags & _DRM_SG_BUFFER)
906 		err = drm_addbufs_sg(dev, request);
907 	else
908 		err = drm_addbufs_pci(dev, request);
909 
910 	return err;
911 }
912 
913 int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
914 {
915 	drm_device_dma_t *dma = dev->dma;
916 	struct drm_buf_info *request = data;
917 	int i;
918 	int count;
919 	int retcode = 0;
920 
921 	spin_lock(&dev->dma_lock);
922 	++dev->buf_use;		/* Can't allocate more after this call */
923 	spin_unlock(&dev->dma_lock);
924 
925 	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
926 		if (dma->bufs[i].buf_count)
927 			++count;
928 	}
929 
930 	DRM_DEBUG("count = %d\n", count);
931 
932 	if (request->count >= count) {
933 		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
934 			if (dma->bufs[i].buf_count) {
935 				struct drm_buf_desc from;
936 
937 				from.count = dma->bufs[i].buf_count;
938 				from.size = dma->bufs[i].buf_size;
939 				from.low_mark = dma->bufs[i].freelist.low_mark;
940 				from.high_mark = dma->bufs[i].freelist.high_mark;
941 
942 				if (DRM_COPY_TO_USER(&request->list[count], &from,
943 				    sizeof(struct drm_buf_desc)) != 0) {
944 					retcode = EFAULT;
945 					break;
946 				}
947 
948 				DRM_DEBUG("%d %d %d %d %d\n",
949 				    i, dma->bufs[i].buf_count,
950 				    dma->bufs[i].buf_size,
951 				    dma->bufs[i].freelist.low_mark,
952 				    dma->bufs[i].freelist.high_mark);
953 				++count;
954 			}
955 		}
956 	}
957 	request->count = count;
958 
959 	return retcode;
960 }
961 
962 int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
963 {
964 	drm_device_dma_t *dma = dev->dma;
965 	struct drm_buf_desc *request = data;
966 	int order;
967 
968 	DRM_DEBUG("%d, %d, %d\n",
969 		  request->size, request->low_mark, request->high_mark);
970 
971 
972 	order = drm_order(request->size);
973 	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
974 	    request->low_mark < 0 || request->high_mark < 0) {
975 		return EINVAL;
976 	}
977 
978 	spin_lock(&dev->dma_lock);
979 	if (request->low_mark > dma->bufs[order].buf_count ||
980 	    request->high_mark > dma->bufs[order].buf_count) {
981 		spin_unlock(&dev->dma_lock);
982 		return EINVAL;
983 	}
984 
985 	dma->bufs[order].freelist.low_mark  = request->low_mark;
986 	dma->bufs[order].freelist.high_mark = request->high_mark;
987 	spin_unlock(&dev->dma_lock);
988 
989 	return 0;
990 }
991 
992 int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
993 {
994 	drm_device_dma_t *dma = dev->dma;
995 	struct drm_buf_free *request = data;
996 	int i;
997 	int idx;
998 	drm_buf_t *buf;
999 	int retcode = 0;
1000 
1001 	DRM_DEBUG("%d\n", request->count);
1002 
1003 	spin_lock(&dev->dma_lock);
1004 	for (i = 0; i < request->count; i++) {
1005 		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1006 			retcode = EFAULT;
1007 			break;
1008 		}
1009 		if (idx < 0 || idx >= dma->buf_count) {
1010 			DRM_ERROR("Index %d (of %d max)\n",
1011 			    idx, dma->buf_count - 1);
1012 			retcode = EINVAL;
1013 			break;
1014 		}
1015 		buf = dma->buflist[idx];
1016 		if (buf->file_priv != file_priv) {
1017 			DRM_ERROR("Process %d freeing buffer not owned\n",
1018 			    DRM_CURRENTPID);
1019 			retcode = EINVAL;
1020 			break;
1021 		}
1022 		drm_free_buffer(dev, buf);
1023 	}
1024 	spin_unlock(&dev->dma_lock);
1025 
1026 	return retcode;
1027 }
1028 
1029 int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1030 {
1031 	drm_device_dma_t *dma = dev->dma;
1032 	int retcode = 0;
1033 	const int zero = 0;
1034 	vm_offset_t address;
1035 	struct vmspace *vms;
1036 	vm_ooffset_t foff;
1037 	vm_size_t size;
1038 	vm_offset_t vaddr;
1039 	struct drm_buf_map *request = data;
1040 	int i;
1041 
1042 	vms = DRM_CURPROC->td_proc->p_vmspace;
1043 
1044 	spin_lock(&dev->dma_lock);
1045 	dev->buf_use++;		/* Can't allocate more after this call */
1046 	spin_unlock(&dev->dma_lock);
1047 
1048 	if (request->count < dma->buf_count)
1049 		goto done;
1050 
1051 	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1052 	    (drm_core_check_feature(dev, DRIVER_SG) &&
1053 	    (dma->flags & _DRM_DMA_USE_SG))) {
1054 		drm_local_map_t *map = dev->agp_buffer_map;
1055 
1056 		if (map == NULL) {
1057 			retcode = EINVAL;
1058 			goto done;
1059 		}
1060 		size = round_page(map->size);
1061 		foff = (unsigned long)map->handle;
1062 	} else {
1063 		size = round_page(dma->byte_count),
1064 		foff = 0;
1065 	}
1066 
1067 	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1068 	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1069 	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1070 	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1071 	if (retcode)
1072 		goto done;
1073 
1074 	request->virtual = (void *)vaddr;
1075 
1076 	for (i = 0; i < dma->buf_count; i++) {
1077 		if (DRM_COPY_TO_USER(&request->list[i].idx,
1078 		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1079 			retcode = EFAULT;
1080 			goto done;
1081 		}
1082 		if (DRM_COPY_TO_USER(&request->list[i].total,
1083 		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1084 			retcode = EFAULT;
1085 			goto done;
1086 		}
1087 		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1088 		    sizeof(zero))) {
1089 			retcode = EFAULT;
1090 			goto done;
1091 		}
1092 		address = vaddr + dma->buflist[i]->offset; /* *** */
1093 		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1094 		    sizeof(address))) {
1095 			retcode = EFAULT;
1096 			goto done;
1097 		}
1098 	}
1099 
1100  done:
1101 	request->count = dma->buf_count;
1102 
1103 	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1104 
1105 	return retcode;
1106 }
1107 
1108 /*
1109  * Compute order.  Can be made faster.
1110  */
1111 int drm_order(unsigned long size)
1112 {
1113 	int order;
1114 
1115 	if (size == 0)
1116 		return 0;
1117 
1118 	order = flsl(size) - 1;
1119 	if (size & ~(1ul << order))
1120 		++order;
1121 
1122 	return order;
1123 }
1124