xref: /openbsd-src/sys/dev/pci/agp.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /* $OpenBSD: agp.c,v 1.26 2008/09/26 21:15:53 mikeb Exp $ */
2 /*-
3  * Copyright (c) 2000 Doug Rabson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  *	$FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/malloc.h>
32 #include <sys/agpio.h>
33 #include <sys/fcntl.h>
34 #include <sys/ioctl.h>
35 
36 #include <uvm/uvm.h>
37 
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <dev/ic/mc6845reg.h>
42 #include <dev/ic/pcdisplayvar.h>
43 #include <dev/ic/vgareg.h>
44 #include <dev/ic/vgavar.h>
45 
46 #include <dev/pci/agpvar.h>
47 #include <dev/pci/agpreg.h>
48 
49 #include "agp_ali.h"
50 #include "agp_amd.h"
51 #include "agp_amd64.h"
52 #include "agp_apple.h"
53 #include "agp_i810.h"
54 #include "agp_intel.h"
55 #include "agp_sis.h"
56 #include "agp_via.h"
57 
58 struct agp_memory *agp_find_memory(struct agp_softc *sc, int id);
59 const struct agp_product *agp_lookup(struct pci_attach_args *pa);
60 /* userland ioctl functions */
61 int	agp_info_user(void *, agp_info *);
62 int	agp_setup_user(void *, agp_setup *);
63 int	agp_allocate_user(void *, agp_allocate *);
64 int	agp_deallocate_user(void *, int);
65 int	agp_bind_user(void *, agp_bind *);
66 int	agp_unbind_user(void *, agp_unbind *);
67 int	agp_acquire_helper(void *dev, enum agp_acquire_state state);
68 int	agp_release_helper(void *dev, enum agp_acquire_state state);
69 
70 const struct agp_product agp_products[] = {
71 #if NAGP_ALI > 0
72 	{ PCI_VENDOR_ALI, -1, agp_ali_attach },
73 #endif
74 #if NAGP_AMD > 0
75 	{ PCI_VENDOR_AMD, -1, agp_amd_attach },
76 #endif
77 #if NAGP_I810 > 0
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_HB, agp_i810_attach },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_DC100_HB, agp_i810_attach },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_HB, agp_i810_attach },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_HB, agp_i810_attach },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830M_HB, agp_i810_attach },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82845G_HB, agp_i810_attach },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82855GM_HB, agp_i810_attach },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82865G_HB, agp_i810_attach },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915G_HB, agp_i810_attach },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915GM_HB, agp_i810_attach },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945G_HB, agp_i810_attach },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GM_HB, agp_i810_attach },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GME_HB, agp_i810_attach },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G965_HB, agp_i810_attach },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q965_HB, agp_i810_attach },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GM965_HB, agp_i810_attach },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G33_HB, agp_i810_attach },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G35_HB, agp_i810_attach },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q35_HB, agp_i810_attach },
97 #endif
98 #if NAGP_INTEL > 0
99 	{ PCI_VENDOR_INTEL, -1, agp_intel_attach },
100 #endif
101 #if NAGP_SIS > 0
102 	{ PCI_VENDOR_SIS, -1, agp_sis_attach },
103 #endif
104 #if NAGP_VIA > 0
105 	{ PCI_VENDOR_VIATECH, -1, agp_via_attach },
106 #endif
107 	{ 0, 0, NULL }
108 };
109 
110 
111 int
112 agp_probe(struct device *parent, void *match, void *aux)
113 {
114 	struct agpbus_attach_args *aaa = aux;
115 	struct pci_attach_args *pa = &aaa->apa_pci_args;
116 
117 	/* pci_args must be a pchb */
118 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE ||
119 	    PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_HOST)
120 		return (0);
121 
122 	if (agp_lookup(pa) == NULL)
123 		return (0);
124 
125 	return (1);
126 }
127 
128 void
129 agp_attach(struct device *parent, struct device *self, void *aux)
130 {
131 	struct agpbus_attach_args *aaa = aux;
132 	struct pci_attach_args *pa = &aaa->apa_pci_args;
133 	struct agp_softc *sc = (struct agp_softc *)self;
134 	const struct agp_product *ap;
135 	u_int memsize;
136 	int i, ret;
137 
138 	ap = agp_lookup(pa);
139 	if (ap) {
140 		static const int agp_max[][2] = {
141 			{0,		0},
142 			{32,		4},
143 			{64,		28},
144 			{128,		96},
145 			{256,		204},
146 			{512,		440},
147 			{1024,		942},
148 			{2048,		1920},
149 			{4096,		3932}
150 		};
151 #define	agp_max_size	 (sizeof(agp_max)/sizeof(agp_max[0]))
152 
153 		/*
154 		 * Work out an upper bound for agp memory allocation. This
155 		 * uses a heuristic table from the Linux driver.
156 		 */
157 		memsize = ptoa(physmem) >> 20;
158 
159 		for (i = 0; i < agp_max_size && memsize > agp_max[i][0]; i++)
160 			;
161 		if (i == agp_max_size)
162 			i = agp_max_size - 1;
163 		sc->sc_maxmem = agp_max[i][1] << 20;
164 
165 		/*
166 		 * The lock is used to prevent re-entry to
167 		 * agp_generic_bind_memory() since that function can sleep.
168 		 */
169 
170 		rw_init(&sc->sc_lock, "agplk");
171 
172 		TAILQ_INIT(&sc->sc_memory);
173 
174 		sc->sc_pcitag = pa->pa_tag;
175 		sc->sc_pc = pa->pa_pc;
176 		sc->sc_id = pa->pa_id;
177 		sc->sc_dmat = pa->pa_dmat;
178 		sc->sc_memt = pa->pa_memt;
179 		sc->sc_vgapcitag = aaa->apa_vga_args.pa_tag;
180 		sc->sc_vgapc = aaa->apa_vga_args.pa_pc;
181 
182 		pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_AGP,
183 		    &sc->sc_capoff, NULL);
184 
185 		sc->vga_softc = (struct vga_pci_softc *)parent;
186 
187 		printf(": ");
188 		ret = (*ap->ap_attach)(sc, pa);
189 		if (ret == 0)
190 			printf("aperture at 0x%lx, size 0x%lx\n",
191 			    (u_long)sc->sc_apaddr,
192 			    (u_long)AGP_GET_APERTURE(sc));
193 		else {
194 			sc->sc_chipc = NULL;
195 		}
196 	}
197 }
198 
199 struct cfattach agp_ca = {
200         sizeof (struct agp_softc), agp_probe, agp_attach,
201 	NULL, NULL
202 };
203 
204 struct cfdriver agp_cd = {
205 	NULL, "agp", DV_DULL
206 };
207 
208 paddr_t
209 agpmmap(void *v, off_t off, int prot)
210 {
211 	struct agp_softc* sc = (struct agp_softc *)v;
212 
213 	if (sc->sc_apaddr) {
214 
215 		if (off > AGP_GET_APERTURE(sc))
216 			return (-1);
217 
218 		/*
219 		 * XXX this should use bus_space_mmap() but it's not
220 		 * availiable on all archs.
221 		 */
222 		return atop(sc->sc_apaddr + off);
223 	}
224 	return (-1);
225 }
226 
227 int
228 agpopen(dev_t dev, int oflags, int devtype, struct proc *p)
229 {
230         struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
231 
232         if (sc == NULL)
233                 return (ENXIO);
234 
235         if (sc->sc_chipc == NULL)
236                 return (ENXIO);
237 
238         if (!sc->sc_opened)
239                 sc->sc_opened = 1;
240         else
241                 return (EBUSY);
242 
243         return (0);
244 }
245 
246 
247 int
248 agpioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *pb)
249 {
250 	struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
251 
252 	if (sc ==NULL)
253 		return (ENODEV);
254 
255 	if (sc->sc_methods == NULL || sc->sc_chipc == NULL)
256 		return (ENXIO);
257 
258 	if (cmd != AGPIOC_INFO && !(flag & FWRITE))
259 		return (EPERM);
260 
261 	switch(cmd) {
262 	case AGPIOC_INFO:
263 		return (agp_info_user(sc, (agp_info *)addr));
264 
265 	case AGPIOC_ACQUIRE:
266 		return (agp_acquire_helper(sc, AGP_ACQUIRE_USER));
267 
268 	case AGPIOC_RELEASE:
269 		return (agp_release_helper(sc, AGP_ACQUIRE_USER));
270 
271 	case AGPIOC_SETUP:
272 		return (agp_setup_user(sc, (agp_setup *)addr));
273 
274 	case AGPIOC_ALLOCATE:
275 		return (agp_allocate_user(sc, (agp_allocate *)addr));
276 
277 	case AGPIOC_DEALLOCATE:
278 		return (agp_deallocate_user(sc, *(int *)addr));
279 
280 	case AGPIOC_BIND:
281 		return (agp_bind_user(sc, (agp_bind *)addr));
282 
283 	case AGPIOC_UNBIND:
284 		return (agp_unbind_user(sc, (agp_unbind *)addr));
285 
286 	default:
287 		return (ENOTTY);
288 	}
289 
290 }
291 
292 int
293 agpclose(dev_t dev, int flags, int devtype, struct proc *p)
294 {
295 	struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
296 	struct agp_memory *mem;
297 
298 	/*
299          * Clear the GATT and force release on last close
300          */
301 	if (sc->sc_state == AGP_ACQUIRE_USER) {
302 		while ((mem = TAILQ_FIRST(&sc->sc_memory)) != 0) {
303 			if (mem->am_is_bound)
304 				AGP_UNBIND_MEMORY(sc, mem);
305 			AGP_FREE_MEMORY(sc, mem);
306 		}
307                 agp_release_helper(sc, AGP_ACQUIRE_USER);
308 	}
309         sc->sc_opened = 0;
310 
311 	return (0);
312 }
313 
314 struct agp_memory *
315 agp_find_memory(struct agp_softc *sc, int id)
316 {
317 	struct agp_memory *mem;
318 
319 	AGP_DPF("searching for memory block %d\n", id);
320 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
321 		AGP_DPF("considering memory block %d\n", mem->am_id);
322 		if (mem->am_id == id)
323 			return (mem);
324 	}
325 	return (0);
326 }
327 
328 const struct agp_product *
329 agp_lookup(struct pci_attach_args *pa)
330 {
331 	const struct agp_product *ap;
332 
333 	/* First find the vendor. */
334 	for (ap = agp_products; ap->ap_attach != NULL; ap++)
335 		if (ap->ap_vendor == PCI_VENDOR(pa->pa_id))
336 			break;
337 
338 	if (ap->ap_attach == NULL)
339 		return (NULL);
340 
341 	/* Now find the product within the vendor's domain. */
342 	for (; ap->ap_attach != NULL; ap++) {
343 		/* Ran out of this vendor's section of the table. */
344 		if (ap->ap_vendor != PCI_VENDOR(pa->pa_id))
345 			return (NULL);
346 
347 		if (ap->ap_product == PCI_PRODUCT(pa->pa_id))
348 			break;		/* Exact match. */
349 		if (ap->ap_product == (u_int32_t) -1)
350 			break;		/* Wildcard match. */
351 	}
352 
353 	if (ap->ap_attach == NULL)
354 		ap = NULL;
355 
356 	return (ap);
357 }
358 
359 int
360 agp_map_aperture(struct pci_attach_args *pa, struct agp_softc *sc, u_int32_t bar, u_int32_t memtype)
361 {
362 	/* Find the aperture. Don't map it (yet), this would eat KVA */
363 	if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, bar,
364 	    memtype, &sc->sc_apaddr, &sc->sc_apsize,
365 	    &sc->sc_apflags) != 0)
366 		return (ENXIO);
367 
368 	return (0);
369 }
370 
371 u_int32_t
372 agp_generic_get_aperture(struct agp_softc *sc)
373 {
374 	return (sc->sc_apsize);
375 }
376 
377 int
378 agp_generic_set_aperture(struct agp_softc *sc, u_int32_t aperture)
379 {
380 	if (aperture != AGP_GET_APERTURE(sc))
381 		return (EINVAL);
382 
383 	return (0);
384 }
385 
386 struct agp_gatt *
387 agp_alloc_gatt(struct agp_softc *sc)
388 {
389 	u_int32_t apsize = AGP_GET_APERTURE(sc);
390 	u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
391 	struct agp_gatt *gatt;
392 	int nseg;
393 
394 	gatt = malloc(sizeof(*gatt), M_AGP, M_NOWAIT | M_ZERO);
395 	if (!gatt)
396 		return (NULL);
397 	gatt->ag_entries = entries;
398 
399 	if (agp_alloc_dmamem(sc->sc_dmat, entries * sizeof(u_int32_t),
400 	    0, &gatt->ag_dmamap, (caddr_t *)&gatt->ag_virtual,
401 	    &gatt->ag_physical, &gatt->ag_dmaseg, 1, &nseg) != 0)
402 		return (NULL);
403 
404 	gatt->ag_size = entries * sizeof(u_int32_t);
405 	memset(gatt->ag_virtual, 0, gatt->ag_size);
406 	agp_flush_cache();
407 
408 	return (gatt);
409 }
410 
411 void
412 agp_free_gatt(struct agp_softc *sc, struct agp_gatt *gatt)
413 {
414 	agp_free_dmamem(sc->sc_dmat, gatt->ag_size, gatt->ag_dmamap,
415 	    (caddr_t)gatt->ag_virtual, &gatt->ag_dmaseg, 1);
416 	free(gatt, M_AGP);
417 }
418 
419 int
420 agp_generic_detach(struct agp_softc *sc)
421 {
422 	agp_flush_cache();
423 	return (0);
424 }
425 
426 int
427 agp_generic_enable(struct agp_softc *sc, u_int32_t mode)
428 {
429 	pcireg_t tstatus, mstatus;
430 	pcireg_t command;
431 	int rq, sba, fw, rate, capoff;
432 
433 	if (pci_get_capability(sc->sc_vgapc, sc->sc_vgapcitag, PCI_CAP_AGP,
434 	    &capoff, NULL) == 0) {
435 		printf("agp_generic_enable: not an AGP capable device\n");
436 		return (-1);
437 	}
438 
439 	tstatus = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
440 	    sc->sc_capoff + AGP_STATUS);
441 	/* display agp mode */
442 	mstatus = pci_conf_read(sc->sc_vgapc, sc->sc_vgapcitag,
443 	    capoff + AGP_STATUS);
444 
445 	/* Set RQ to the min of mode, tstatus and mstatus */
446 	rq = AGP_MODE_GET_RQ(mode);
447 	if (AGP_MODE_GET_RQ(tstatus) < rq)
448 		rq = AGP_MODE_GET_RQ(tstatus);
449 	if (AGP_MODE_GET_RQ(mstatus) < rq)
450 		rq = AGP_MODE_GET_RQ(mstatus);
451 
452 	/* Set SBA if all three can deal with SBA */
453 	sba = (AGP_MODE_GET_SBA(tstatus)
454 	    & AGP_MODE_GET_SBA(mstatus)
455 	    & AGP_MODE_GET_SBA(mode));
456 
457 	/* Similar for FW */
458 	fw = (AGP_MODE_GET_FW(tstatus)
459 	    & AGP_MODE_GET_FW(mstatus)
460 	    & AGP_MODE_GET_FW(mode));
461 
462 	/* Figure out the max rate */
463 	rate = (AGP_MODE_GET_RATE(tstatus)
464 	    & AGP_MODE_GET_RATE(mstatus)
465 	    & AGP_MODE_GET_RATE(mode));
466 	if (rate & AGP_MODE_RATE_4x)
467 		rate = AGP_MODE_RATE_4x;
468 	else if (rate & AGP_MODE_RATE_2x)
469 		rate = AGP_MODE_RATE_2x;
470 	else
471 		rate = AGP_MODE_RATE_1x;
472 
473 	/* Construct the new mode word and tell the hardware  */
474 	command = AGP_MODE_SET_RQ(0, rq);
475 	command = AGP_MODE_SET_SBA(command, sba);
476 	command = AGP_MODE_SET_FW(command, fw);
477 	command = AGP_MODE_SET_RATE(command, rate);
478 	command = AGP_MODE_SET_AGP(command, 1);
479 
480 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
481 	    sc->sc_capoff + AGP_COMMAND, command);
482 	pci_conf_write(sc->sc_vgapc, sc->sc_vgapcitag, capoff + AGP_COMMAND,
483 	    command);
484 	return (0);
485 }
486 
487 struct agp_memory *
488 agp_generic_alloc_memory(struct agp_softc *sc, int type, vsize_t size)
489 {
490 	struct agp_memory *mem;
491 
492 	if (type != 0) {
493 		printf("agp_generic_alloc_memory: unsupported type %d\n", type);
494 		return (0);
495 	}
496 
497 	mem = malloc(sizeof *mem, M_AGP, M_WAITOK | M_ZERO);
498 
499 	if (bus_dmamap_create(sc->sc_dmat, size, size / PAGE_SIZE + 1,
500 	    size, 0, BUS_DMA_NOWAIT, &mem->am_dmamap) != 0) {
501 		free(mem, M_AGP);
502 		return (NULL);
503 	}
504 
505 	mem->am_id = sc->sc_nextid++;
506 	mem->am_size = size;
507 	TAILQ_INSERT_TAIL(&sc->sc_memory, mem, am_link);
508 	sc->sc_allocated += size;
509 
510 	return (mem);
511 }
512 
513 int
514 agp_generic_free_memory(struct agp_softc *sc, struct agp_memory *mem)
515 {
516 	if (mem->am_is_bound)
517 		return (EBUSY);
518 
519 	sc->sc_allocated -= mem->am_size;
520 	TAILQ_REMOVE(&sc->sc_memory, mem, am_link);
521 	bus_dmamap_destroy(sc->sc_dmat, mem->am_dmamap);
522 	free(mem, M_AGP);
523 	return (0);
524 }
525 
526 int
527 agp_generic_bind_memory(struct agp_softc *sc, struct agp_memory *mem,
528 			off_t offset)
529 {
530 	bus_dma_segment_t *segs, *seg;
531 	bus_size_t done, j;
532 	bus_addr_t pa;
533 	off_t i, k;
534 	int nseg, error;
535 
536 	rw_enter_write(&sc->sc_lock);
537 
538 	if (mem->am_is_bound) {
539 		printf("AGP: memory already bound\n");
540 		rw_exit_write(&sc->sc_lock);
541 		return (EINVAL);
542 	}
543 
544 	if (offset < 0
545 	    || (offset & (AGP_PAGE_SIZE - 1)) != 0
546 	    || offset + mem->am_size > AGP_GET_APERTURE(sc)) {
547 		printf("AGP: binding memory at bad offset %#lx\n",
548 		    (unsigned long) offset);
549 		rw_exit_write(&sc->sc_lock);
550 		return (EINVAL);
551 	}
552 
553 	/*
554 	 * The memory here needs to be directly accessable from the
555 	 * AGP video card, so it should be allocated using bus_dma.
556 	 * However, it need not be contiguous, since individual pages
557 	 * are translated using the GATT.
558 	 */
559 
560 	nseg = (mem->am_size + PAGE_SIZE - 1) / PAGE_SIZE;
561 	segs = malloc(nseg * sizeof *segs, M_AGP, M_WAITOK);
562 	if ((error = bus_dmamem_alloc(sc->sc_dmat, mem->am_size, PAGE_SIZE, 0,
563 	    segs, nseg, &mem->am_nseg, BUS_DMA_WAITOK)) != 0) {
564 		free(segs, M_AGP);
565 		rw_exit_write(&sc->sc_lock);
566 		AGP_DPF("bus_dmamem_alloc failed %d\n", error);
567 		return (error);
568 	}
569 	if ((error = bus_dmamem_map(sc->sc_dmat, segs, mem->am_nseg,
570 	    mem->am_size, &mem->am_virtual, BUS_DMA_WAITOK)) != 0) {
571 		bus_dmamem_free(sc->sc_dmat, segs, mem->am_nseg);
572 		free(segs, M_AGP);
573 		rw_exit_write(&sc->sc_lock);
574 		AGP_DPF("bus_dmamem_map failed %d\n", error);
575 		return (error);
576 	}
577 	if ((error = bus_dmamap_load(sc->sc_dmat, mem->am_dmamap,
578 	    mem->am_virtual, mem->am_size, NULL,
579 	    BUS_DMA_WAITOK)) != 0) {
580 		bus_dmamem_unmap(sc->sc_dmat, mem->am_virtual,
581 		    mem->am_size);
582 		bus_dmamem_free(sc->sc_dmat, segs, mem->am_nseg);
583 		free(segs, M_AGP);
584 		rw_exit_write(&sc->sc_lock);
585 		AGP_DPF("bus_dmamap_load failed %d\n", error);
586 		return (error);
587 	}
588 	mem->am_dmaseg = segs;
589 
590 	/*
591 	 * Bind the individual pages and flush the chipset's
592 	 * TLB.
593 	 */
594 	done = 0;
595 	for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) {
596 		seg = &mem->am_dmamap->dm_segs[i];
597 		/*
598 		 * Install entries in the GATT, making sure that if
599 		 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
600 		 * aligned to PAGE_SIZE, we don't modify too many GATT
601 		 * entries.
602 		 */
603 		for (j = 0; j < seg->ds_len && (done + j) < mem->am_size;
604 		    j += AGP_PAGE_SIZE) {
605 			pa = seg->ds_addr + j;
606 			AGP_DPF("binding offset %#lx to pa %#lx\n",
607 			    (unsigned long)(offset + done + j),
608 			    (unsigned long)pa);
609 			error = AGP_BIND_PAGE(sc, offset + done + j, pa);
610 			if (error) {
611 				/*
612 				 * Bail out. Reverse all the mappings
613 				 * and unwire the pages.
614 				 */
615 				for (k = 0; k < done + j; k += AGP_PAGE_SIZE)
616 					AGP_UNBIND_PAGE(sc, offset + k);
617 
618 				bus_dmamap_unload(sc->sc_dmat, mem->am_dmamap);
619 				bus_dmamem_unmap(sc->sc_dmat, mem->am_virtual,
620 				    mem->am_size);
621 				bus_dmamem_free(sc->sc_dmat, mem->am_dmaseg,
622 				    mem->am_nseg);
623 				free(mem->am_dmaseg, M_AGP);
624 				rw_exit_write(&sc->sc_lock);
625 				AGP_DPF("AGP_BIND_PAGE failed %d\n", error);
626 				return (error);
627 			}
628 		}
629 		done += seg->ds_len;
630 	}
631 
632 	/*
633 	 * Flush the cpu cache since we are providing a new mapping
634 	 * for these pages.
635 	 */
636 	agp_flush_cache();
637 
638 	/*
639 	 * Make sure the chipset gets the new mappings.
640 	 */
641 	AGP_FLUSH_TLB(sc);
642 
643 	mem->am_offset = offset;
644 	mem->am_is_bound = 1;
645 
646 	rw_exit_write(&sc->sc_lock);
647 
648 	return (0);
649 }
650 
651 int
652 agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem)
653 {
654 	int i;
655 
656 	rw_enter_write(&sc->sc_lock);
657 
658 	if (!mem->am_is_bound) {
659 		printf("AGP: memory is not bound\n");
660 		rw_exit_write(&sc->sc_lock);
661 		return (EINVAL);
662 	}
663 
664 
665 	/*
666 	 * Unbind the individual pages and flush the chipset's
667 	 * TLB. Unwire the pages so they can be swapped.
668 	 */
669 	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
670 		AGP_UNBIND_PAGE(sc, mem->am_offset + i);
671 
672 	agp_flush_cache();
673 	AGP_FLUSH_TLB(sc);
674 
675 	bus_dmamap_unload(sc->sc_dmat, mem->am_dmamap);
676 	bus_dmamem_unmap(sc->sc_dmat, mem->am_virtual, mem->am_size);
677 	bus_dmamem_free(sc->sc_dmat, mem->am_dmaseg, mem->am_nseg);
678 
679 	free(mem->am_dmaseg, M_AGP);
680 
681 	mem->am_offset = 0;
682 	mem->am_is_bound = 0;
683 
684 	rw_exit_write(&sc->sc_lock);
685 
686 	return (0);
687 }
688 
689 int
690 agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, int flags,
691     bus_dmamap_t *mapp, caddr_t *vaddr, bus_addr_t *baddr,
692     bus_dma_segment_t *seg, int nseg, int *rseg)
693 
694 {
695 	int error, level = 0;
696 
697 	if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0,
698 	    seg, nseg, rseg, BUS_DMA_NOWAIT)) != 0)
699 		goto out;
700 	level++;
701 
702 	if ((error = bus_dmamem_map(tag, seg, *rseg, size, vaddr,
703 	    BUS_DMA_NOWAIT | flags)) != 0)
704 		goto out;
705 	level++;
706 
707 	if ((error = bus_dmamap_create(tag, size, *rseg, size, 0,
708 	    BUS_DMA_NOWAIT, mapp)) != 0)
709 		goto out;
710 	level++;
711 
712 	if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL,
713 	    BUS_DMA_NOWAIT)) != 0)
714 		goto out;
715 
716 	*baddr = (*mapp)->dm_segs[0].ds_addr;
717 
718 	return (0);
719 out:
720 	switch (level) {
721 	case 3:
722 		bus_dmamap_destroy(tag, *mapp);
723 		/* FALLTHROUGH */
724 	case 2:
725 		bus_dmamem_unmap(tag, *vaddr, size);
726 		/* FALLTHROUGH */
727 	case 1:
728 		bus_dmamem_free(tag, seg, *rseg);
729 		break;
730 	default:
731 		break;
732 	}
733 
734 	return (error);
735 }
736 
737 void
738 agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map,
739     caddr_t vaddr, bus_dma_segment_t *seg, int nseg)
740 {
741 
742 	bus_dmamap_unload(tag, map);
743 	bus_dmamap_destroy(tag, map);
744 	bus_dmamem_unmap(tag, vaddr, size);
745 	bus_dmamem_free(tag, seg, nseg);
746 }
747 
748 /* Helper functions used in both user and kernel APIs */
749 
750 int
751 agp_acquire_helper(void *dev, enum agp_acquire_state state)
752 {
753 	struct agp_softc *sc = (struct agp_softc *)dev;
754 
755 	if (sc->sc_chipc == NULL)
756 		return (EINVAL);
757 
758 	if (sc->sc_state != AGP_ACQUIRE_FREE)
759 		return (EBUSY);
760 	sc->sc_state = state;
761 
762 	return (0);
763 }
764 
765 int
766 agp_release_helper(void *dev, enum agp_acquire_state state)
767 {
768 	struct agp_softc *sc = (struct agp_softc *)dev;
769 	struct agp_memory* mem;
770 
771 	if (sc->sc_state == AGP_ACQUIRE_FREE)
772 		return (0);
773 
774 	if (sc->sc_state != state)
775 		return (EBUSY);
776 
777 	/*
778 	 * Clear out the aperture and free any
779 	 * outstanding memory blocks.
780 	 */
781 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
782 		if (mem->am_is_bound) {
783 			printf("agp_release_helper: mem %d is bound\n",
784 			    mem->am_id);
785 			AGP_UNBIND_MEMORY(sc, mem);
786 		}
787 	}
788 	sc->sc_state = AGP_ACQUIRE_FREE;
789 	return (0);
790 }
791 
792 /* Implementation of the userland ioctl API */
793 
794 int
795 agp_info_user(void *dev, agp_info *info)
796 {
797 	struct agp_softc *sc = (struct agp_softc *) dev;
798 
799 	if (!sc->sc_chipc)
800 		return (ENXIO);
801 
802 	bzero(info, sizeof *info);
803 	info->bridge_id = sc->sc_id;
804 	if (sc->sc_capoff != 0)
805 		info->agp_mode = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
806 		    AGP_STATUS + sc->sc_capoff);
807 	else
808 		info->agp_mode = 0; /* i810 doesn't have real AGP */
809 	info->aper_base = sc->sc_apaddr;
810 	info->aper_size = AGP_GET_APERTURE(sc) >> 20;
811 	info->pg_total =
812 	info->pg_system = sc->sc_maxmem >> AGP_PAGE_SHIFT;
813 	info->pg_used = sc->sc_allocated >> AGP_PAGE_SHIFT;
814 
815 	return (0);
816 }
817 
818 int
819 agp_setup_user(void *dev, agp_setup *setup)
820 {
821 	struct agp_softc *sc = (struct agp_softc *) dev;
822 
823 	return (AGP_ENABLE(sc, setup->agp_mode));
824 }
825 
826 int
827 agp_allocate_user(void *dev, agp_allocate *alloc)
828 {
829 	struct agp_softc *sc = (struct agp_softc *) dev;
830 	struct agp_memory* mem;
831 	size_t size = alloc->pg_count << AGP_PAGE_SHIFT;
832 
833 	if (sc->sc_allocated + size > sc->sc_maxmem)
834 		return (EINVAL);
835 
836 	mem = AGP_ALLOC_MEMORY(sc, alloc->type, size);
837 	if (mem) {
838 		alloc->key = mem->am_id;
839 		alloc->physical = mem->am_physical;
840 		return (0);
841 	} else
842 		return (ENOMEM);
843 }
844 
845 int
846 agp_deallocate_user(void *dev, int id)
847 {
848 	struct agp_softc *sc = (struct agp_softc *) dev;
849 	struct agp_memory *mem = agp_find_memory(sc, id);
850 	if (mem) {
851 		AGP_FREE_MEMORY(sc, mem);
852 		return (0);
853 	} else
854 		return (ENOENT);
855 }
856 
857 int
858 agp_bind_user(void *dev, agp_bind *bind)
859 {
860 	struct agp_softc *sc = (struct agp_softc *) dev;
861 	struct agp_memory *mem = agp_find_memory(sc, bind->key);
862 
863 	if (!mem)
864 		return (ENOENT);
865 
866 	return (AGP_BIND_MEMORY(sc, mem, bind->pg_start << AGP_PAGE_SHIFT));
867 }
868 
869 
870 int
871 agp_unbind_user(void *dev, agp_unbind *unbind)
872 {
873 	struct agp_softc *sc = (struct agp_softc *) dev;
874 	struct agp_memory *mem = agp_find_memory(sc, unbind->key);
875 
876 	if (!mem)
877 		return (ENOENT);
878 
879 	return (AGP_UNBIND_MEMORY(sc, mem));
880 }
881 
882 /* Implementation of the kernel api */
883 
884 void *
885 agp_find_device(int unit)
886 {
887 	if (unit >= agp_cd.cd_ndevs || unit < 0)
888 		return (NULL);
889 	return (agp_cd.cd_devs[unit]);
890 }
891 
892 enum agp_acquire_state
893 agp_state(void *dev)
894 {
895 	struct agp_softc *sc = (struct agp_softc *) dev;
896         return (sc->sc_state);
897 }
898 
899 void
900 agp_get_info(void *dev, struct agp_info *info)
901 {
902 	struct agp_softc *sc = (struct agp_softc *)dev;
903 
904         info->ai_mode = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
905 	    sc->sc_capoff + AGP_STATUS);
906 	info->ai_aperture_base = sc->sc_apaddr;
907 	info->ai_aperture_size = sc->sc_apsize;
908         info->ai_memory_allowed = sc->sc_maxmem;
909         info->ai_memory_used = sc->sc_allocated;
910 }
911 
912 int
913 agp_acquire(void *dev)
914 {
915 	struct agp_softc *sc = (struct agp_softc *)dev;
916 
917         return (agp_acquire_helper(sc, AGP_ACQUIRE_KERNEL));
918 }
919 
920 int
921 agp_release(void *dev)
922 {
923 	struct agp_softc *sc = (struct agp_softc *)dev;
924 
925         return (agp_release_helper(sc, AGP_ACQUIRE_KERNEL));
926 }
927 
928 int
929 agp_enable(void *dev, u_int32_t mode)
930 {
931 	struct agp_softc *sc = (struct agp_softc *) dev;
932 
933         return (AGP_ENABLE(sc, mode));
934 }
935 
936 void *
937 agp_alloc_memory(void *dev, int type, vsize_t bytes)
938 {
939 	struct agp_softc *sc = (struct agp_softc *)dev;
940 
941         return  ((void *) AGP_ALLOC_MEMORY(sc, type, bytes));
942 }
943 
944 void
945 agp_free_memory(void *dev, void *handle)
946 {
947 	struct agp_softc *sc = (struct agp_softc *) dev;
948         struct agp_memory *mem = (struct agp_memory *) handle;
949 
950         AGP_FREE_MEMORY(sc, mem);
951 }
952 
953 int
954 agp_bind_memory(void *dev, void *handle, off_t offset)
955 {
956 	struct agp_softc *sc = (struct agp_softc *) dev;
957 	struct agp_memory *mem = (struct agp_memory *) handle;
958 
959 	return (AGP_BIND_MEMORY(sc, mem, offset));
960 }
961 
962 int
963 agp_unbind_memory(void *dev, void *handle)
964 {
965 	struct agp_softc *sc = (struct agp_softc *) dev;
966         struct agp_memory *mem = (struct agp_memory *) handle;
967 
968         return (AGP_UNBIND_MEMORY(sc, mem));
969 }
970 
971 void
972 agp_memory_info(void *dev, void *handle, struct agp_memory_info *mi)
973 {
974         struct agp_memory *mem = (struct agp_memory *) handle;
975 
976         mi->ami_size = mem->am_size;
977         mi->ami_physical = mem->am_physical;
978         mi->ami_offset = mem->am_offset;
979         mi->ami_is_bound = mem->am_is_bound;
980 }
981