xref: /openbsd-src/sys/dev/pci/agp.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /* $OpenBSD: agp.c,v 1.34 2010/12/26 15:41:00 miod Exp $ */
2 /*-
3  * Copyright (c) 2000 Doug Rabson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  *	$FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/malloc.h>
32 #include <sys/agpio.h>
33 #include <sys/fcntl.h>
34 #include <sys/ioctl.h>
35 
36 #include <uvm/uvm.h>
37 
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <dev/ic/mc6845reg.h>
42 #include <dev/ic/pcdisplayvar.h>
43 #include <dev/ic/vgareg.h>
44 #include <dev/ic/vgavar.h>
45 
46 #include <dev/pci/agpvar.h>
47 #include <dev/pci/agpreg.h>
48 
49 /*
50  * the enable and {alloc, free, bind, unbind} memory routines have default
51  * fallbacks, these macros do the right thing. The callbacks with no fallback
52  * are called directly. These are mostly hacks around the weirdness of intel
53  * integrated graphics, since they are not technically a true agp chipset,
54  * but provide an almost identical interface.
55  */
56 int	agp_generic_enable(struct agp_softc *, u_int32_t);
57 struct agp_memory *
58 	agp_generic_alloc_memory(struct agp_softc *, int, vsize_t size);
59 int	agp_generic_free_memory(struct agp_softc *, struct agp_memory *);
60 void	agp_attach(struct device *, struct device *, void *);
61 int	agp_probe(struct device *, void *, void *);
62 int	agpbusprint(void *, const char *);
63 paddr_t	agpmmap(void *, off_t, int);
64 int	agpioctl(dev_t, u_long, caddr_t, int, struct proc *);
65 int	agpopen(dev_t, int, int, struct proc *);
66 int	agpclose(dev_t, int, int , struct proc *);
67 
68 struct agp_memory *agp_find_memory(struct agp_softc *sc, int id);
69 /* userland ioctl functions */
70 int	agpvga_match(struct pci_attach_args *);
71 int	agp_info_user(void *, agp_info *);
72 int	agp_setup_user(void *, agp_setup *);
73 int	agp_allocate_user(void *, agp_allocate *);
74 int	agp_deallocate_user(void *, int);
75 int	agp_bind_user(void *, agp_bind *);
76 int	agp_unbind_user(void *, agp_unbind *);
77 int	agp_acquire_helper(void *dev, enum agp_acquire_state state);
78 int	agp_release_helper(void *dev, enum agp_acquire_state state);
79 
80 int
81 agpdev_print(void *aux, const char *pnp)
82 {
83 	if (pnp) {
84 		printf("agp at %s", pnp);
85 	}
86 	return (UNCONF);
87 }
88 
89 int
90 agpbus_probe(struct agp_attach_args *aa)
91 {
92 	struct pci_attach_args	*pa = aa->aa_pa;
93 
94 	if (strncmp(aa->aa_busname, "agp", 3) == 0 &&
95 	    PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
96 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_HOST)
97 		return (1);
98 	return (0);
99 }
100 
101 /*
102  * Find the video card hanging off the agp bus XXX assumes only one bus
103  */
104 int
105 agpvga_match(struct pci_attach_args *pa)
106 {
107 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
108 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA) {
109 		if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP,
110 		    NULL, NULL))
111 			return (1);
112 	}
113 	return (0);
114 }
115 
116 struct device *
117 agp_attach_bus(struct pci_attach_args *pa, const struct agp_methods *methods,
118     bus_addr_t apaddr, bus_size_t apsize, struct device *dev)
119 {
120 	struct agpbus_attach_args arg;
121 
122 	arg.aa_methods = methods;
123 	arg.aa_pa = pa;
124 	arg.aa_apaddr = apaddr;
125 	arg.aa_apsize = apsize;
126 
127 	printf("\n"); /* newline from the driver that called us */
128 	return (config_found(dev, &arg, agpdev_print));
129 }
130 
131 int
132 agp_probe(struct device *parent, void *match, void *aux)
133 {
134 	/*
135 	 * we don't do any checking here, driver we're attaching this
136 	 * interface to should have already done it.
137 	 */
138 	return (1);
139 }
140 
141 void
142 agp_attach(struct device *parent, struct device *self, void *aux)
143 {
144 	struct agpbus_attach_args *aa = aux;
145 	struct pci_attach_args *pa = aa->aa_pa;
146 	struct agp_softc *sc = (struct agp_softc *)self;
147 	u_int memsize;
148 	int i;
149 
150 	sc->sc_chipc = parent;
151 	sc->sc_methods = aa->aa_methods;
152 	sc->sc_apaddr = aa->aa_apaddr;
153 	sc->sc_apsize = aa->aa_apsize;
154 
155 	static const int agp_max[][2] = {
156 		{0,		0},
157 		{32,		4},
158 		{64,		28},
159 		{128,		96},
160 		{256,		204},
161 		{512,		440},
162 		{1024,		942},
163 		{2048,		1920},
164 		{4096,		3932}
165 	};
166 
167 	/*
168 	 * Work out an upper bound for agp memory allocation. This
169 	 * uses a heuristic table from the Linux driver.
170 	 */
171 	memsize = ptoa(physmem) >> 20;
172 
173 	for (i = 0; i < nitems(agp_max) && memsize > agp_max[i][0]; i++)
174 		;
175 	if (i == nitems(agp_max))
176 		i = nitems(agp_max) - 1;
177 	sc->sc_maxmem = agp_max[i][1] << 20;
178 
179 	/*
180 	 * The lock is used to prevent re-entry to
181 	 * agp_generic_bind_memory() since that function can sleep.
182 	 */
183 	rw_init(&sc->sc_lock, "agplk");
184 
185 	TAILQ_INIT(&sc->sc_memory);
186 
187 	sc->sc_pcitag = pa->pa_tag;
188 	sc->sc_pc = pa->pa_pc;
189 	sc->sc_id = pa->pa_id;
190 	sc->sc_dmat = pa->pa_dmat;
191 
192 	pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_AGP,
193 	    &sc->sc_capoff, NULL);
194 
195 	printf(": aperture at 0x%lx, size 0x%lx\n", (u_long)sc->sc_apaddr,
196 	    (u_long)sc->sc_apsize);
197 }
198 
199 struct cfattach agp_ca = {
200 	sizeof(struct agp_softc), agp_probe, agp_attach,
201 	NULL, NULL
202 };
203 
204 struct cfdriver agp_cd = {
205 	NULL, "agp", DV_DULL
206 };
207 
208 paddr_t
209 agpmmap(void *v, off_t off, int prot)
210 {
211 	struct agp_softc* sc = (struct agp_softc *)v;
212 
213 	if (sc->sc_apaddr) {
214 
215 		if (off > sc->sc_apsize)
216 			return (-1);
217 
218 		/*
219 		 * XXX this should use bus_space_mmap() but it's not
220 		 * availiable on all archs.
221 		 */
222 		return (sc->sc_apaddr + off);
223 	}
224 	return (-1);
225 }
226 
227 int
228 agpopen(dev_t dev, int oflags, int devtype, struct proc *p)
229 {
230         struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
231 
232         if (sc == NULL || sc->sc_chipc == NULL)
233                 return (ENXIO);
234 
235         if (!sc->sc_opened)
236                 sc->sc_opened = 1;
237         else
238                 return (EBUSY);
239 
240         return (0);
241 }
242 
243 
244 int
245 agpioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *pb)
246 {
247 	struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
248 
249 	if (sc == NULL)
250 		return (ENODEV);
251 
252 	if (sc->sc_methods == NULL || sc->sc_chipc == NULL)
253 		return (ENXIO);
254 
255 	if (cmd != AGPIOC_INFO && !(flag & FWRITE))
256 		return (EPERM);
257 
258 	switch(cmd) {
259 	case AGPIOC_INFO:
260 		return (agp_info_user(sc, (agp_info *)addr));
261 
262 	case AGPIOC_ACQUIRE:
263 		return (agp_acquire_helper(sc, AGP_ACQUIRE_USER));
264 
265 	case AGPIOC_RELEASE:
266 		return (agp_release_helper(sc, AGP_ACQUIRE_USER));
267 
268 	case AGPIOC_SETUP:
269 		return (agp_setup_user(sc, (agp_setup *)addr));
270 
271 	case AGPIOC_ALLOCATE:
272 		return (agp_allocate_user(sc, (agp_allocate *)addr));
273 
274 	case AGPIOC_DEALLOCATE:
275 		return (agp_deallocate_user(sc, *(int *)addr));
276 
277 	case AGPIOC_BIND:
278 		return (agp_bind_user(sc, (agp_bind *)addr));
279 
280 	case AGPIOC_UNBIND:
281 		return (agp_unbind_user(sc, (agp_unbind *)addr));
282 
283 	default:
284 		return (ENOTTY);
285 	}
286 
287 }
288 
289 int
290 agpclose(dev_t dev, int flags, int devtype, struct proc *p)
291 {
292 	struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
293 	struct agp_memory *mem;
294 
295 	/*
296          * Clear the GATT and force release on last close
297          */
298 	if (sc->sc_state == AGP_ACQUIRE_USER) {
299 		while ((mem = TAILQ_FIRST(&sc->sc_memory)) != 0) {
300 			if (mem->am_is_bound) {
301 				agp_unbind_memory(sc, mem);
302 			}
303 			agp_free_memory(sc, mem);
304 		}
305                 agp_release_helper(sc, AGP_ACQUIRE_USER);
306 	}
307         sc->sc_opened = 0;
308 
309 	return (0);
310 }
311 
312 struct agp_memory *
313 agp_find_memory(struct agp_softc *sc, int id)
314 {
315 	struct agp_memory *mem;
316 
317 	AGP_DPF("searching for memory block %d\n", id);
318 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
319 		AGP_DPF("considering memory block %d\n", mem->am_id);
320 		if (mem->am_id == id)
321 			return (mem);
322 	}
323 	return (0);
324 }
325 
326 struct agp_gatt *
327 agp_alloc_gatt(bus_dma_tag_t dmat, u_int32_t apsize)
328 {
329 	struct agp_gatt		*gatt;
330 	u_int32_t	 	 entries = apsize >> AGP_PAGE_SHIFT;
331 
332 	gatt = malloc(sizeof(*gatt), M_AGP, M_NOWAIT | M_ZERO);
333 	if (!gatt)
334 		return (NULL);
335 	gatt->ag_entries = entries;
336 	gatt->ag_size = entries * sizeof(u_int32_t);
337 
338 	if (agp_alloc_dmamem(dmat, gatt->ag_size, &gatt->ag_dmamap,
339 	    &gatt->ag_physical, &gatt->ag_dmaseg) != 0)
340 		return (NULL);
341 
342 	if (bus_dmamem_map(dmat, &gatt->ag_dmaseg, 1, gatt->ag_size,
343 	    (caddr_t *)&gatt->ag_virtual, BUS_DMA_NOWAIT) != 0) {
344 		agp_free_dmamem(dmat, gatt->ag_size, gatt->ag_dmamap,
345 		    &gatt->ag_dmaseg);
346 		return (NULL);
347 	}
348 
349 	agp_flush_cache();
350 
351 	return (gatt);
352 }
353 
354 void
355 agp_free_gatt(bus_dma_tag_t dmat, struct agp_gatt *gatt)
356 {
357 	bus_dmamem_unmap(dmat, (caddr_t)gatt->ag_virtual, gatt->ag_size);
358 	agp_free_dmamem(dmat, gatt->ag_size, gatt->ag_dmamap, &gatt->ag_dmaseg);
359 	free(gatt, M_AGP);
360 }
361 
362 int
363 agp_generic_enable(struct agp_softc *sc, u_int32_t mode)
364 {
365 	struct pci_attach_args	pa;
366 	pcireg_t		tstatus, mstatus, command;
367 	int			rq, sba, fw, rate, capoff;
368 
369 	if (pci_find_device(&pa, agpvga_match) == 0 ||
370 	    pci_get_capability(pa.pa_pc, pa.pa_tag, PCI_CAP_AGP,
371 	    &capoff, NULL) == 0) {
372 		printf("agp_generic_enable: not an AGP capable device\n");
373 		return (-1);
374 	}
375 
376 	tstatus = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
377 	    sc->sc_capoff + AGP_STATUS);
378 	/* display agp mode */
379 	mstatus = pci_conf_read(pa.pa_pc, pa.pa_tag,
380 	    capoff + AGP_STATUS);
381 
382 	/* Set RQ to the min of mode, tstatus and mstatus */
383 	rq = AGP_MODE_GET_RQ(mode);
384 	if (AGP_MODE_GET_RQ(tstatus) < rq)
385 		rq = AGP_MODE_GET_RQ(tstatus);
386 	if (AGP_MODE_GET_RQ(mstatus) < rq)
387 		rq = AGP_MODE_GET_RQ(mstatus);
388 
389 	/* Set SBA if all three can deal with SBA */
390 	sba = (AGP_MODE_GET_SBA(tstatus)
391 	    & AGP_MODE_GET_SBA(mstatus)
392 	    & AGP_MODE_GET_SBA(mode));
393 
394 	/* Similar for FW */
395 	fw = (AGP_MODE_GET_FW(tstatus)
396 	    & AGP_MODE_GET_FW(mstatus)
397 	    & AGP_MODE_GET_FW(mode));
398 
399 	/* Figure out the max rate */
400 	rate = (AGP_MODE_GET_RATE(tstatus)
401 	    & AGP_MODE_GET_RATE(mstatus)
402 	    & AGP_MODE_GET_RATE(mode));
403 	if (rate & AGP_MODE_RATE_4x)
404 		rate = AGP_MODE_RATE_4x;
405 	else if (rate & AGP_MODE_RATE_2x)
406 		rate = AGP_MODE_RATE_2x;
407 	else
408 		rate = AGP_MODE_RATE_1x;
409 
410 	/* Construct the new mode word and tell the hardware  */
411 	command = AGP_MODE_SET_RQ(0, rq);
412 	command = AGP_MODE_SET_SBA(command, sba);
413 	command = AGP_MODE_SET_FW(command, fw);
414 	command = AGP_MODE_SET_RATE(command, rate);
415 	command = AGP_MODE_SET_AGP(command, 1);
416 
417 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
418 	    sc->sc_capoff + AGP_COMMAND, command);
419 	pci_conf_write(pa.pa_pc, pa.pa_tag, capoff + AGP_COMMAND, command);
420 	return (0);
421 }
422 
423 struct agp_memory *
424 agp_generic_alloc_memory(struct agp_softc *sc, int type, vsize_t size)
425 {
426 	struct agp_memory *mem;
427 
428 	if (type != 0) {
429 		printf("agp_generic_alloc_memory: unsupported type %d\n", type);
430 		return (0);
431 	}
432 
433 	mem = malloc(sizeof *mem, M_AGP, M_WAITOK | M_ZERO);
434 
435 	if (bus_dmamap_create(sc->sc_dmat, size, size / PAGE_SIZE + 1,
436 	    size, 0, BUS_DMA_NOWAIT, &mem->am_dmamap) != 0) {
437 		free(mem, M_AGP);
438 		return (NULL);
439 	}
440 
441 	mem->am_id = sc->sc_nextid++;
442 	mem->am_size = size;
443 	TAILQ_INSERT_TAIL(&sc->sc_memory, mem, am_link);
444 	sc->sc_allocated += size;
445 
446 	return (mem);
447 }
448 
449 int
450 agp_generic_free_memory(struct agp_softc *sc, struct agp_memory *mem)
451 {
452 	if (mem->am_is_bound)
453 		return (EBUSY);
454 
455 	sc->sc_allocated -= mem->am_size;
456 	TAILQ_REMOVE(&sc->sc_memory, mem, am_link);
457 	bus_dmamap_destroy(sc->sc_dmat, mem->am_dmamap);
458 	free(mem, M_AGP);
459 	return (0);
460 }
461 
462 int
463 agp_generic_bind_memory(struct agp_softc *sc, struct agp_memory *mem,
464     bus_size_t offset)
465 {
466 	bus_dma_segment_t	*segs, *seg;
467 	bus_addr_t		 apaddr = sc->sc_apaddr + offset;
468 	bus_size_t		 done, i, j;
469 	int			 nseg, error;
470 
471 	rw_enter_write(&sc->sc_lock);
472 
473 	if (mem->am_is_bound) {
474 		printf("AGP: memory already bound\n");
475 		rw_exit_write(&sc->sc_lock);
476 		return (EINVAL);
477 	}
478 
479 	if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 ||
480 	    offset + mem->am_size > sc->sc_apsize) {
481 		printf("AGP: binding memory at bad offset %#lx\n",
482 		    (unsigned long) offset);
483 		rw_exit_write(&sc->sc_lock);
484 		return (EINVAL);
485 	}
486 
487 	/*
488 	 * The memory here needs to be directly accessable from the
489 	 * AGP video card, so it should be allocated using bus_dma.
490 	 * However, it need not be contiguous, since individual pages
491 	 * are translated using the GATT.
492 	 */
493 
494 	nseg = (mem->am_size + PAGE_SIZE - 1) / PAGE_SIZE;
495 	segs = malloc(nseg * sizeof *segs, M_AGP, M_WAITOK);
496 	if ((error = bus_dmamem_alloc(sc->sc_dmat, mem->am_size, PAGE_SIZE, 0,
497 	    segs, nseg, &mem->am_nseg, BUS_DMA_ZERO | BUS_DMA_WAITOK)) != 0) {
498 		free(segs, M_AGP);
499 		rw_exit_write(&sc->sc_lock);
500 		AGP_DPF("bus_dmamem_alloc failed %d\n", error);
501 		return (error);
502 	}
503 	if ((error = bus_dmamap_load_raw(sc->sc_dmat, mem->am_dmamap, segs,
504 	    mem->am_nseg, mem->am_size, BUS_DMA_WAITOK)) != 0) {
505 		bus_dmamem_free(sc->sc_dmat, segs, mem->am_nseg);
506 		free(segs, M_AGP);
507 		rw_exit_write(&sc->sc_lock);
508 		AGP_DPF("bus_dmamap_load failed %d\n", error);
509 		return (error);
510 	}
511 	mem->am_dmaseg = segs;
512 
513 	/*
514 	 * Install entries in the GATT, making sure that if
515 	 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
516 	 * aligned to PAGE_SIZE, we don't modify too many GATT
517 	 * entries. Flush chipset tlb when done.
518 	 */
519 	done = 0;
520 	for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) {
521 		seg = &mem->am_dmamap->dm_segs[i];
522 		for (j = 0; j < seg->ds_len && (done + j) < mem->am_size;
523 		    j += AGP_PAGE_SIZE) {
524 			AGP_DPF("binding offset %#lx to pa %#lx\n",
525 			    (unsigned long)(offset + done + j),
526 			    (unsigned long)seg->ds_addr + j);
527 			sc->sc_methods->bind_page(sc->sc_chipc,
528 			    apaddr + done + j, seg->ds_addr + j, 0);
529 		}
530 		done += seg->ds_len;
531 	}
532 
533 	/*
534 	 * Flush the cpu cache since we are providing a new mapping
535 	 * for these pages.
536 	 */
537 	agp_flush_cache();
538 
539 	/*
540 	 * Make sure the chipset gets the new mappings.
541 	 */
542 	sc->sc_methods->flush_tlb(sc->sc_chipc);
543 
544 	mem->am_offset = offset;
545 	mem->am_is_bound = 1;
546 
547 	rw_exit_write(&sc->sc_lock);
548 
549 	return (0);
550 }
551 
552 int
553 agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem)
554 {
555 	bus_addr_t	apaddr = sc->sc_apaddr + mem->am_offset;
556 	bus_size_t	i;
557 
558 	rw_enter_write(&sc->sc_lock);
559 
560 	if (mem->am_is_bound == 0) {
561 		printf("AGP: memory is not bound\n");
562 		rw_exit_write(&sc->sc_lock);
563 		return (EINVAL);
564 	}
565 
566 
567 	/*
568 	 * Unbind the individual pages and flush the chipset's
569 	 * TLB. Unwire the pages so they can be swapped.
570 	 */
571 	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
572 		sc->sc_methods->unbind_page(sc->sc_chipc, apaddr + i);
573 
574 	agp_flush_cache();
575 	sc->sc_methods->flush_tlb(sc->sc_chipc);
576 
577 	bus_dmamap_unload(sc->sc_dmat, mem->am_dmamap);
578 	bus_dmamem_free(sc->sc_dmat, mem->am_dmaseg, mem->am_nseg);
579 
580 	free(mem->am_dmaseg, M_AGP);
581 
582 	mem->am_offset = 0;
583 	mem->am_is_bound = 0;
584 
585 	rw_exit_write(&sc->sc_lock);
586 
587 	return (0);
588 }
589 
590 /*
591  * Allocates a single-segment block of zeroed, wired dma memory.
592  */
593 int
594 agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t *mapp,
595     bus_addr_t *baddr, bus_dma_segment_t *seg)
596 {
597 	int error, level = 0, nseg;
598 
599 	if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0,
600 	    seg, 1, &nseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) != 0)
601 		goto out;
602 	level++;
603 
604 	if ((error = bus_dmamap_create(tag, size, nseg, size, 0,
605 	    BUS_DMA_NOWAIT, mapp)) != 0)
606 		goto out;
607 	level++;
608 
609 	if ((error = bus_dmamap_load_raw(tag, *mapp, seg, nseg, size,
610 	    BUS_DMA_NOWAIT)) != 0)
611 		goto out;
612 
613 	*baddr = (*mapp)->dm_segs[0].ds_addr;
614 
615 	return (0);
616 out:
617 	switch (level) {
618 	case 2:
619 		bus_dmamap_destroy(tag, *mapp);
620 		/* FALLTHROUGH */
621 	case 1:
622 		bus_dmamem_free(tag, seg, nseg);
623 		break;
624 	default:
625 		break;
626 	}
627 
628 	return (error);
629 }
630 
631 void
632 agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map,
633     bus_dma_segment_t *seg)
634 {
635 	bus_dmamap_unload(tag, map);
636 	bus_dmamap_destroy(tag, map);
637 	bus_dmamem_free(tag, seg, 1);
638 }
639 
640 /* Helper functions used in both user and kernel APIs */
641 
642 int
643 agp_acquire_helper(void *dev, enum agp_acquire_state state)
644 {
645 	struct agp_softc *sc = (struct agp_softc *)dev;
646 
647 	if (sc->sc_chipc == NULL)
648 		return (EINVAL);
649 
650 	if (sc->sc_state != AGP_ACQUIRE_FREE)
651 		return (EBUSY);
652 	sc->sc_state = state;
653 
654 	return (0);
655 }
656 
657 int
658 agp_release_helper(void *dev, enum agp_acquire_state state)
659 {
660 	struct agp_softc *sc = (struct agp_softc *)dev;
661 	struct agp_memory* mem;
662 
663 	if (sc->sc_state == AGP_ACQUIRE_FREE)
664 		return (0);
665 
666 	if (sc->sc_state != state)
667 		return (EBUSY);
668 
669 	/*
670 	 * Clear out the aperture and free any
671 	 * outstanding memory blocks.
672 	 */
673 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
674 		if (mem->am_is_bound) {
675 			printf("agp_release_helper: mem %d is bound\n",
676 			    mem->am_id);
677 			agp_unbind_memory(sc, mem);
678 		}
679 	}
680 	sc->sc_state = AGP_ACQUIRE_FREE;
681 	return (0);
682 }
683 
684 /* Implementation of the userland ioctl API */
685 
686 int
687 agp_info_user(void *dev, agp_info *info)
688 {
689 	struct agp_softc *sc = (struct agp_softc *) dev;
690 
691 	if (!sc->sc_chipc)
692 		return (ENXIO);
693 
694 	bzero(info, sizeof *info);
695 	info->bridge_id = sc->sc_id;
696 	if (sc->sc_capoff != 0)
697 		info->agp_mode = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
698 		    AGP_STATUS + sc->sc_capoff);
699 	else
700 		info->agp_mode = 0; /* i810 doesn't have real AGP */
701 	info->aper_base = sc->sc_apaddr;
702 	info->aper_size = sc->sc_apsize >> 20;
703 	info->pg_total =
704 	info->pg_system = sc->sc_maxmem >> AGP_PAGE_SHIFT;
705 	info->pg_used = sc->sc_allocated >> AGP_PAGE_SHIFT;
706 
707 	return (0);
708 }
709 
710 int
711 agp_setup_user(void *dev, agp_setup *setup)
712 {
713 	struct agp_softc	*sc = dev;
714 
715 	return (agp_enable(sc, setup->agp_mode));
716 }
717 
718 int
719 agp_allocate_user(void *dev, agp_allocate *alloc)
720 {
721 	struct agp_softc	*sc = dev;
722 	struct agp_memory	*mem;
723 	size_t			 size = alloc->pg_count << AGP_PAGE_SHIFT;
724 
725 	if (sc->sc_allocated + size > sc->sc_maxmem)
726 		return (EINVAL);
727 
728 	mem = agp_alloc_memory(sc, alloc->type, size);
729 	if (mem) {
730 		alloc->key = mem->am_id;
731 		alloc->physical = mem->am_physical;
732 		return (0);
733 	} else
734 		return (ENOMEM);
735 }
736 
737 int
738 agp_deallocate_user(void *dev, int id)
739 {
740 	struct agp_softc	*sc = dev;
741 	struct agp_memory	*mem;
742 
743 	if ((mem = agp_find_memory(sc, id)) != NULL) {
744 		agp_free_memory(sc, mem);
745 		return (0);
746 	} else
747 		return (ENOENT);
748 }
749 
750 int
751 agp_bind_user(void *dev, agp_bind *bind)
752 {
753 	struct agp_softc	*sc = dev;
754 	struct agp_memory	*mem;
755 
756 	if ((mem = agp_find_memory(sc, bind->key)) == NULL)
757 		return (ENOENT);
758 	return (agp_bind_memory(sc, mem, bind->pg_start << AGP_PAGE_SHIFT));
759 }
760 
761 
762 int
763 agp_unbind_user(void *dev, agp_unbind *unbind)
764 {
765 	struct agp_softc	*sc = dev;
766 	struct agp_memory	*mem;
767 
768 	if ((mem = agp_find_memory(sc, unbind->key)) == NULL)
769 		return (ENOENT);
770 
771 	return (agp_unbind_memory(sc, mem));
772 }
773 
774 /* Implementation of the kernel api */
775 
776 void *
777 agp_find_device(int unit)
778 {
779 	if (unit >= agp_cd.cd_ndevs || unit < 0)
780 		return (NULL);
781 	return (agp_cd.cd_devs[unit]);
782 }
783 
784 enum agp_acquire_state
785 agp_state(void *dev)
786 {
787 	struct agp_softc *sc = (struct agp_softc *) dev;
788         return (sc->sc_state);
789 }
790 
791 void
792 agp_get_info(void *dev, struct agp_info *info)
793 {
794 	struct agp_softc *sc = (struct agp_softc *)dev;
795 
796 	if (sc->sc_capoff != 0)
797 		info->ai_mode = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
798 		    AGP_STATUS + sc->sc_capoff);
799 	else
800 		info->ai_mode = 0; /* i810 doesn't have real AGP */
801 	info->ai_aperture_base = sc->sc_apaddr;
802 	info->ai_aperture_size = sc->sc_apsize;
803         info->ai_memory_allowed = sc->sc_maxmem;
804         info->ai_memory_used = sc->sc_allocated;
805 }
806 
807 int
808 agp_acquire(void *dev)
809 {
810 	struct agp_softc *sc = (struct agp_softc *)dev;
811 
812         return (agp_acquire_helper(sc, AGP_ACQUIRE_KERNEL));
813 }
814 
815 int
816 agp_release(void *dev)
817 {
818 	struct agp_softc *sc = (struct agp_softc *)dev;
819 
820         return (agp_release_helper(sc, AGP_ACQUIRE_KERNEL));
821 }
822 
823 int
824 agp_enable(void *dev, u_int32_t mode)
825 {
826 	struct agp_softc	*sc = dev;
827 	int			 ret;
828 
829 	if (sc->sc_methods->enable != NULL) {
830 		ret = sc->sc_methods->enable(sc->sc_chipc, mode);
831 	} else {
832 		ret = agp_generic_enable(sc, mode);
833 	}
834 	return (ret);
835 }
836 
837 void *
838 agp_alloc_memory(void *dev, int type, vsize_t bytes)
839 {
840 	struct agp_softc	*sc = dev;
841 	struct agp_memory	*mem;
842 
843 	if (sc->sc_methods->alloc_memory != NULL) {
844 		mem = sc->sc_methods->alloc_memory(sc->sc_chipc, type, bytes);
845 	} else {
846 		mem = agp_generic_alloc_memory(sc, type, bytes);
847 	}
848         return  (mem);
849 }
850 
851 void
852 agp_free_memory(void *dev, void *handle)
853 {
854 	struct agp_softc *sc = dev;
855         struct agp_memory *mem = handle;
856 
857 	if (sc->sc_methods->free_memory != NULL) {
858 		sc->sc_methods->free_memory(sc->sc_chipc, mem);
859 	} else {
860 		agp_generic_free_memory(sc, mem);
861 	}
862 }
863 
864 int
865 agp_bind_memory(void *dev, void *handle, off_t offset)
866 {
867 	struct agp_softc	*sc = dev;
868 	struct agp_memory	*mem = handle;
869 	int			 ret;
870 
871 	if (sc->sc_methods->bind_memory != NULL) {
872 		ret = sc->sc_methods->bind_memory(sc->sc_chipc, mem, offset);
873 	} else {
874 		ret = agp_generic_bind_memory(sc, mem, offset);
875 	}
876 	return (ret);
877 }
878 
879 int
880 agp_unbind_memory(void *dev, void *handle)
881 {
882 	struct agp_softc	*sc = dev;
883         struct agp_memory	*mem = handle;
884 	int			 ret;
885 
886 	if (sc->sc_methods->unbind_memory != NULL) {
887 		ret = sc->sc_methods->unbind_memory(sc->sc_chipc, mem);
888 	} else {
889 		ret = agp_generic_unbind_memory(sc, mem);
890 	}
891 	return (ret);
892 }
893 
894 void
895 agp_memory_info(void *dev, void *handle, struct agp_memory_info *mi)
896 {
897         struct agp_memory *mem = (struct agp_memory *) handle;
898 
899         mi->ami_size = mem->am_size;
900         mi->ami_physical = mem->am_physical;
901         mi->ami_offset = mem->am_offset;
902         mi->ami_is_bound = mem->am_is_bound;
903 }
904