xref: /openbsd-src/sys/dev/pci/agp.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /* $OpenBSD: agp.c,v 1.27 2008/11/09 15:11:19 oga Exp $ */
2 /*-
3  * Copyright (c) 2000 Doug Rabson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  *	$FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/malloc.h>
32 #include <sys/agpio.h>
33 #include <sys/fcntl.h>
34 #include <sys/ioctl.h>
35 
36 #include <uvm/uvm.h>
37 
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <dev/ic/mc6845reg.h>
42 #include <dev/ic/pcdisplayvar.h>
43 #include <dev/ic/vgareg.h>
44 #include <dev/ic/vgavar.h>
45 
46 #include <dev/pci/agpvar.h>
47 #include <dev/pci/agpreg.h>
48 
49 /*
50  * the enable and {alloc, free, bind, unbind} memory routines have default
51  * fallbacks, these macros do the right thing. The callbacks with no fallback
52  * are called directly. These are mostly hacks around the weirdness of intel
53  * integrated graphics, since they are not technically a true agp chipset,
54  * but provide an almost identical interface.
55  */
56 #define AGP_ENABLE(sc, m) ((sc->sc_methods->enable != NULL) ?	\
57 	sc->sc_methods->enable(sc->sc_chipc, m) :		    	\
58 	agp_generic_enable(sc, m))
59 
60 #define AGP_ALLOC_MEMORY(sc, t, s) ((sc->sc_methods->alloc_memory != NULL) ? \
61 	sc->sc_methods->alloc_memory(sc->sc_chipc, t, s) :		    \
62 	agp_generic_alloc_memory(sc, t, s))
63 
64 #define AGP_FREE_MEMORY(sc, m) ((sc->sc_methods->free_memory != NULL) ?	\
65 	sc->sc_methods->free_memory(sc->sc_chipc, m) :			\
66 	agp_generic_free_memory(sc, m))
67 
68 #define AGP_BIND_MEMORY(sc, m, o) ((sc->sc_methods->bind_memory != NULL) ? \
69 	sc->sc_methods->bind_memory(sc->sc_chipc, m, o)	:	 	  \
70 	agp_generic_bind_memory(sc, m, o))
71 
72 #define AGP_UNBIND_MEMORY(sc, m) ((sc->sc_methods->unbind_memory != NULL) ? \
73 	sc->sc_methods->unbind_memory(sc->sc_chipc, m) :		   \
74 	agp_generic_unbind_memory(sc, m))
75 
76 int	agp_generic_enable(struct agp_softc *, u_int32_t);
77 struct agp_memory *
78 	agp_generic_alloc_memory(struct agp_softc *, int, vsize_t size);
79 int	agp_generic_free_memory(struct agp_softc *, struct agp_memory *);
80 void	agp_attach(struct device *, struct device *, void *);
81 int	agp_probe(struct device *, void *, void *);
82 int	agpbusprint(void *, const char *);
83 paddr_t	agpmmap(void *, off_t, int);
84 int	agpioctl(dev_t, u_long, caddr_t, int, struct proc *);
85 int	agpopen(dev_t, int, int, struct proc *);
86 int	agpclose(dev_t, int, int , struct proc *);
87 
88 struct agp_memory *agp_find_memory(struct agp_softc *sc, int id);
89 /* userland ioctl functions */
90 int	agpvga_match(struct pci_attach_args *);
91 int	agp_info_user(void *, agp_info *);
92 int	agp_setup_user(void *, agp_setup *);
93 int	agp_allocate_user(void *, agp_allocate *);
94 int	agp_deallocate_user(void *, int);
95 int	agp_bind_user(void *, agp_bind *);
96 int	agp_unbind_user(void *, agp_unbind *);
97 int	agp_acquire_helper(void *dev, enum agp_acquire_state state);
98 int	agp_release_helper(void *dev, enum agp_acquire_state state);
99 
100 int
101 agpdev_print(void *aux, const char *pnp)
102 {
103 	if (pnp) {
104 		printf("agp at %s", pnp);
105 	}
106 	return (UNCONF);
107 }
108 
109 int
110 agpbus_probe(struct agp_attach_args *aa)
111 {
112 	struct pci_attach_args	*pa = aa->aa_pa;
113 
114 	if (strncmp(aa->aa_busname, "agp", 3) == 0 &&
115 	    PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
116 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_HOST)
117 		return (1);
118 	return (0);
119 }
120 
121 /*
122  * Find the video card hanging off the agp bus XXX assumes only one bus
123  */
124 int
125 agpvga_match(struct pci_attach_args *pa)
126 {
127 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
128 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA) {
129 		if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP,
130 		    NULL, NULL))
131 			return (1);
132 	}
133 	return (0);
134 }
135 
136 struct device *
137 agp_attach_bus(struct pci_attach_args *pa, const struct agp_methods *methods,
138     int bar, pcireg_t type, struct device *dev)
139 {
140 	struct agpbus_attach_args arg;
141 
142 	arg.aa_methods = methods;
143 	arg.aa_pa = pa;
144 	arg.aa_bar = bar;
145 	arg.aa_type = type;
146 
147 	printf("\n"); /* newline from the driver that called us */
148 	return (config_found(dev, &arg, agpdev_print));
149 }
150 
151 int
152 agp_probe(struct device *parent, void *match, void *aux)
153 {
154 	/*
155 	 * we don't do any checking here, driver we're attaching this
156 	 * interface to should have already done it.
157 	 */
158 	return (1);
159 }
160 
161 void
162 agp_attach(struct device *parent, struct device *self, void *aux)
163 {
164 	struct agpbus_attach_args *aa = aux;
165 	struct pci_attach_args *pa = aa->aa_pa;
166 	struct agp_softc *sc = (struct agp_softc *)self;
167 	u_int memsize;
168 	int i;
169 
170 	sc->sc_chipc = parent;
171 	sc->sc_methods = aa->aa_methods;
172 
173 	static const int agp_max[][2] = {
174 		{0,		0},
175 		{32,		4},
176 		{64,		28},
177 		{128,		96},
178 		{256,		204},
179 		{512,		440},
180 		{1024,		942},
181 		{2048,		1920},
182 		{4096,		3932}
183 	};
184 #define	agp_max_size	 (sizeof(agp_max)/sizeof(agp_max[0]))
185 
186 	/*
187 	 * Work out an upper bound for agp memory allocation. This
188 	 * uses a heuristic table from the Linux driver.
189 	 */
190 	memsize = ptoa(physmem) >> 20;
191 
192 	for (i = 0; i < agp_max_size && memsize > agp_max[i][0]; i++)
193 		;
194 	if (i == agp_max_size)
195 		i = agp_max_size - 1;
196 	sc->sc_maxmem = agp_max[i][1] << 20;
197 
198 	/*
199 	 * The lock is used to prevent re-entry to
200 	 * agp_generic_bind_memory() since that function can sleep.
201 	 */
202 	rw_init(&sc->sc_lock, "agplk");
203 
204 	TAILQ_INIT(&sc->sc_memory);
205 
206 	sc->sc_pcitag = pa->pa_tag;
207 	sc->sc_pc = pa->pa_pc;
208 	sc->sc_id = pa->pa_id;
209 	sc->sc_dmat = pa->pa_dmat;
210 
211 	pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_AGP,
212 	    &sc->sc_capoff, NULL);
213 
214 	printf(": ");
215 	if (agp_map_aperture(pa, sc, aa->aa_bar, aa->aa_type) != 0) {
216 		printf("can't map aperture\n");
217 		sc->sc_chipc = NULL;
218 		return;
219 	}
220 
221 	printf("aperture at 0x%lx, size 0x%lx\n", (u_long)sc->sc_apaddr,
222 	    (u_long)sc->sc_methods->get_aperture(sc->sc_chipc));
223 }
224 
225 struct cfattach agp_ca = {
226 	sizeof(struct agp_softc), agp_probe, agp_attach,
227 	NULL, NULL
228 };
229 
230 struct cfdriver agp_cd = {
231 	NULL, "agp", DV_DULL
232 };
233 
234 paddr_t
235 agpmmap(void *v, off_t off, int prot)
236 {
237 	struct agp_softc* sc = (struct agp_softc *)v;
238 
239 	if (sc->sc_apaddr) {
240 
241 		if (off > sc->sc_methods->get_aperture(sc->sc_chipc))
242 			return (-1);
243 
244 		/*
245 		 * XXX this should use bus_space_mmap() but it's not
246 		 * availiable on all archs.
247 		 */
248 		return atop(sc->sc_apaddr + off);
249 	}
250 	return (-1);
251 }
252 
253 int
254 agpopen(dev_t dev, int oflags, int devtype, struct proc *p)
255 {
256         struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
257 
258         if (sc == NULL || sc->sc_chipc == NULL)
259                 return (ENXIO);
260 
261         if (!sc->sc_opened)
262                 sc->sc_opened = 1;
263         else
264                 return (EBUSY);
265 
266         return (0);
267 }
268 
269 
270 int
271 agpioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *pb)
272 {
273 	struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
274 
275 	if (sc == NULL)
276 		return (ENODEV);
277 
278 	if (sc->sc_methods == NULL || sc->sc_chipc == NULL)
279 		return (ENXIO);
280 
281 	if (cmd != AGPIOC_INFO && !(flag & FWRITE))
282 		return (EPERM);
283 
284 	switch(cmd) {
285 	case AGPIOC_INFO:
286 		return (agp_info_user(sc, (agp_info *)addr));
287 
288 	case AGPIOC_ACQUIRE:
289 		return (agp_acquire_helper(sc, AGP_ACQUIRE_USER));
290 
291 	case AGPIOC_RELEASE:
292 		return (agp_release_helper(sc, AGP_ACQUIRE_USER));
293 
294 	case AGPIOC_SETUP:
295 		return (agp_setup_user(sc, (agp_setup *)addr));
296 
297 	case AGPIOC_ALLOCATE:
298 		return (agp_allocate_user(sc, (agp_allocate *)addr));
299 
300 	case AGPIOC_DEALLOCATE:
301 		return (agp_deallocate_user(sc, *(int *)addr));
302 
303 	case AGPIOC_BIND:
304 		return (agp_bind_user(sc, (agp_bind *)addr));
305 
306 	case AGPIOC_UNBIND:
307 		return (agp_unbind_user(sc, (agp_unbind *)addr));
308 
309 	default:
310 		return (ENOTTY);
311 	}
312 
313 }
314 
315 int
316 agpclose(dev_t dev, int flags, int devtype, struct proc *p)
317 {
318 	struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
319 	struct agp_memory *mem;
320 
321 	/*
322          * Clear the GATT and force release on last close
323          */
324 	if (sc->sc_state == AGP_ACQUIRE_USER) {
325 		while ((mem = TAILQ_FIRST(&sc->sc_memory)) != 0) {
326 			if (mem->am_is_bound) {
327 				AGP_UNBIND_MEMORY(sc, mem);
328 			}
329 			AGP_FREE_MEMORY(sc, mem);
330 		}
331                 agp_release_helper(sc, AGP_ACQUIRE_USER);
332 	}
333         sc->sc_opened = 0;
334 
335 	return (0);
336 }
337 
338 struct agp_memory *
339 agp_find_memory(struct agp_softc *sc, int id)
340 {
341 	struct agp_memory *mem;
342 
343 	AGP_DPF("searching for memory block %d\n", id);
344 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
345 		AGP_DPF("considering memory block %d\n", mem->am_id);
346 		if (mem->am_id == id)
347 			return (mem);
348 	}
349 	return (0);
350 }
351 
352 int
353 agp_map_aperture(struct pci_attach_args *pa, struct agp_softc *sc, u_int32_t bar, u_int32_t memtype)
354 {
355 	/* Find the aperture. Don't map it (yet), this would eat KVA */
356 	if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, bar, memtype,
357 	    &sc->sc_apaddr, NULL, NULL) != 0)
358 		return (ENXIO);
359 
360 	return (0);
361 }
362 
363 struct agp_gatt *
364 agp_alloc_gatt(bus_dma_tag_t dmat, u_int32_t apsize)
365 {
366 	struct agp_gatt	*gatt;
367 	u_int32_t	 entries = apsize >> AGP_PAGE_SHIFT;
368 	int		 nseg;
369 
370 	gatt = malloc(sizeof(*gatt), M_AGP, M_NOWAIT | M_ZERO);
371 	if (!gatt)
372 		return (NULL);
373 	gatt->ag_entries = entries;
374 
375 	if (agp_alloc_dmamem(dmat, entries * sizeof(u_int32_t),
376 	    0, &gatt->ag_dmamap, (caddr_t *)&gatt->ag_virtual,
377 	    &gatt->ag_physical, &gatt->ag_dmaseg, 1, &nseg) != 0)
378 		return (NULL);
379 
380 	gatt->ag_size = entries * sizeof(u_int32_t);
381 	memset(gatt->ag_virtual, 0, gatt->ag_size);
382 	agp_flush_cache();
383 
384 	return (gatt);
385 }
386 
387 void
388 agp_free_gatt(bus_dma_tag_t dmat, struct agp_gatt *gatt)
389 {
390 	agp_free_dmamem(dmat, gatt->ag_size, gatt->ag_dmamap,
391 	    (caddr_t)gatt->ag_virtual, &gatt->ag_dmaseg, 1);
392 	free(gatt, M_AGP);
393 }
394 
395 int
396 agp_generic_enable(struct agp_softc *sc, u_int32_t mode)
397 {
398 	struct pci_attach_args	pa;
399 	pcireg_t		tstatus, mstatus, command;
400 	int			rq, sba, fw, rate, capoff;
401 
402 	if (pci_find_device(&pa, agpvga_match) == 0 ||
403 	    pci_get_capability(pa.pa_pc, pa.pa_tag, PCI_CAP_AGP,
404 	    &capoff, NULL) == 0) {
405 		printf("agp_generic_enable: not an AGP capable device\n");
406 		return (-1);
407 	}
408 
409 	tstatus = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
410 	    sc->sc_capoff + AGP_STATUS);
411 	/* display agp mode */
412 	mstatus = pci_conf_read(pa.pa_pc, pa.pa_tag,
413 	    capoff + AGP_STATUS);
414 
415 	/* Set RQ to the min of mode, tstatus and mstatus */
416 	rq = AGP_MODE_GET_RQ(mode);
417 	if (AGP_MODE_GET_RQ(tstatus) < rq)
418 		rq = AGP_MODE_GET_RQ(tstatus);
419 	if (AGP_MODE_GET_RQ(mstatus) < rq)
420 		rq = AGP_MODE_GET_RQ(mstatus);
421 
422 	/* Set SBA if all three can deal with SBA */
423 	sba = (AGP_MODE_GET_SBA(tstatus)
424 	    & AGP_MODE_GET_SBA(mstatus)
425 	    & AGP_MODE_GET_SBA(mode));
426 
427 	/* Similar for FW */
428 	fw = (AGP_MODE_GET_FW(tstatus)
429 	    & AGP_MODE_GET_FW(mstatus)
430 	    & AGP_MODE_GET_FW(mode));
431 
432 	/* Figure out the max rate */
433 	rate = (AGP_MODE_GET_RATE(tstatus)
434 	    & AGP_MODE_GET_RATE(mstatus)
435 	    & AGP_MODE_GET_RATE(mode));
436 	if (rate & AGP_MODE_RATE_4x)
437 		rate = AGP_MODE_RATE_4x;
438 	else if (rate & AGP_MODE_RATE_2x)
439 		rate = AGP_MODE_RATE_2x;
440 	else
441 		rate = AGP_MODE_RATE_1x;
442 
443 	/* Construct the new mode word and tell the hardware  */
444 	command = AGP_MODE_SET_RQ(0, rq);
445 	command = AGP_MODE_SET_SBA(command, sba);
446 	command = AGP_MODE_SET_FW(command, fw);
447 	command = AGP_MODE_SET_RATE(command, rate);
448 	command = AGP_MODE_SET_AGP(command, 1);
449 
450 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
451 	    sc->sc_capoff + AGP_COMMAND, command);
452 	pci_conf_write(pa.pa_pc, pa.pa_tag, capoff + AGP_COMMAND, command);
453 	return (0);
454 }
455 
456 struct agp_memory *
457 agp_generic_alloc_memory(struct agp_softc *sc, int type, vsize_t size)
458 {
459 	struct agp_memory *mem;
460 
461 	if (type != 0) {
462 		printf("agp_generic_alloc_memory: unsupported type %d\n", type);
463 		return (0);
464 	}
465 
466 	mem = malloc(sizeof *mem, M_AGP, M_WAITOK | M_ZERO);
467 
468 	if (bus_dmamap_create(sc->sc_dmat, size, size / PAGE_SIZE + 1,
469 	    size, 0, BUS_DMA_NOWAIT, &mem->am_dmamap) != 0) {
470 		free(mem, M_AGP);
471 		return (NULL);
472 	}
473 
474 	mem->am_id = sc->sc_nextid++;
475 	mem->am_size = size;
476 	TAILQ_INSERT_TAIL(&sc->sc_memory, mem, am_link);
477 	sc->sc_allocated += size;
478 
479 	return (mem);
480 }
481 
482 int
483 agp_generic_free_memory(struct agp_softc *sc, struct agp_memory *mem)
484 {
485 	if (mem->am_is_bound)
486 		return (EBUSY);
487 
488 	sc->sc_allocated -= mem->am_size;
489 	TAILQ_REMOVE(&sc->sc_memory, mem, am_link);
490 	bus_dmamap_destroy(sc->sc_dmat, mem->am_dmamap);
491 	free(mem, M_AGP);
492 	return (0);
493 }
494 
495 int
496 agp_generic_bind_memory(struct agp_softc *sc, struct agp_memory *mem,
497 			off_t offset)
498 {
499 	bus_dma_segment_t *segs, *seg;
500 	bus_size_t done, j;
501 	bus_addr_t pa;
502 	off_t i, k;
503 	int nseg, error;
504 
505 	rw_enter_write(&sc->sc_lock);
506 
507 	if (mem->am_is_bound) {
508 		printf("AGP: memory already bound\n");
509 		rw_exit_write(&sc->sc_lock);
510 		return (EINVAL);
511 	}
512 
513 	if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0
514 	    || offset + mem->am_size >
515 	    sc->sc_methods->get_aperture(sc->sc_chipc)) {
516 		printf("AGP: binding memory at bad offset %#lx\n",
517 		    (unsigned long) offset);
518 		rw_exit_write(&sc->sc_lock);
519 		return (EINVAL);
520 	}
521 
522 	/*
523 	 * The memory here needs to be directly accessable from the
524 	 * AGP video card, so it should be allocated using bus_dma.
525 	 * However, it need not be contiguous, since individual pages
526 	 * are translated using the GATT.
527 	 */
528 
529 	nseg = (mem->am_size + PAGE_SIZE - 1) / PAGE_SIZE;
530 	segs = malloc(nseg * sizeof *segs, M_AGP, M_WAITOK);
531 	if ((error = bus_dmamem_alloc(sc->sc_dmat, mem->am_size, PAGE_SIZE, 0,
532 	    segs, nseg, &mem->am_nseg, BUS_DMA_WAITOK)) != 0) {
533 		free(segs, M_AGP);
534 		rw_exit_write(&sc->sc_lock);
535 		AGP_DPF("bus_dmamem_alloc failed %d\n", error);
536 		return (error);
537 	}
538 	if ((error = bus_dmamem_map(sc->sc_dmat, segs, mem->am_nseg,
539 	    mem->am_size, &mem->am_virtual, BUS_DMA_WAITOK)) != 0) {
540 		bus_dmamem_free(sc->sc_dmat, segs, mem->am_nseg);
541 		free(segs, M_AGP);
542 		rw_exit_write(&sc->sc_lock);
543 		AGP_DPF("bus_dmamem_map failed %d\n", error);
544 		return (error);
545 	}
546 	if ((error = bus_dmamap_load(sc->sc_dmat, mem->am_dmamap,
547 	    mem->am_virtual, mem->am_size, NULL,
548 	    BUS_DMA_WAITOK)) != 0) {
549 		bus_dmamem_unmap(sc->sc_dmat, mem->am_virtual,
550 		    mem->am_size);
551 		bus_dmamem_free(sc->sc_dmat, segs, mem->am_nseg);
552 		free(segs, M_AGP);
553 		rw_exit_write(&sc->sc_lock);
554 		AGP_DPF("bus_dmamap_load failed %d\n", error);
555 		return (error);
556 	}
557 	mem->am_dmaseg = segs;
558 
559 	/*
560 	 * Bind the individual pages and flush the chipset's
561 	 * TLB.
562 	 */
563 	done = 0;
564 	for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) {
565 		seg = &mem->am_dmamap->dm_segs[i];
566 		/*
567 		 * Install entries in the GATT, making sure that if
568 		 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
569 		 * aligned to PAGE_SIZE, we don't modify too many GATT
570 		 * entries.
571 		 */
572 		for (j = 0; j < seg->ds_len && (done + j) < mem->am_size;
573 		    j += AGP_PAGE_SIZE) {
574 			pa = seg->ds_addr + j;
575 			AGP_DPF("binding offset %#lx to pa %#lx\n",
576 			    (unsigned long)(offset + done + j),
577 			    (unsigned long)pa);
578 			error = sc->sc_methods->bind_page(sc->sc_chipc,
579 			    offset + done + j, pa);
580 			if (error) {
581 				/*
582 				 * Bail out. Reverse all the mappings
583 				 * and unwire the pages.
584 				 */
585 				for (k = 0; k < done + j; k += AGP_PAGE_SIZE)
586 					sc->sc_methods->unbind_page(
587 					    sc->sc_chipc, offset + k);
588 
589 				bus_dmamap_unload(sc->sc_dmat, mem->am_dmamap);
590 				bus_dmamem_unmap(sc->sc_dmat, mem->am_virtual,
591 				    mem->am_size);
592 				bus_dmamem_free(sc->sc_dmat, mem->am_dmaseg,
593 				    mem->am_nseg);
594 				free(mem->am_dmaseg, M_AGP);
595 				rw_exit_write(&sc->sc_lock);
596 				AGP_DPF("AGP_BIND_PAGE failed %d\n", error);
597 				return (error);
598 			}
599 		}
600 		done += seg->ds_len;
601 	}
602 
603 	/*
604 	 * Flush the cpu cache since we are providing a new mapping
605 	 * for these pages.
606 	 */
607 	agp_flush_cache();
608 
609 	/*
610 	 * Make sure the chipset gets the new mappings.
611 	 */
612 	sc->sc_methods->flush_tlb(sc->sc_chipc);
613 
614 	mem->am_offset = offset;
615 	mem->am_is_bound = 1;
616 
617 	rw_exit_write(&sc->sc_lock);
618 
619 	return (0);
620 }
621 
622 int
623 agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem)
624 {
625 	int i;
626 
627 	rw_enter_write(&sc->sc_lock);
628 
629 	if (!mem->am_is_bound) {
630 		printf("AGP: memory is not bound\n");
631 		rw_exit_write(&sc->sc_lock);
632 		return (EINVAL);
633 	}
634 
635 
636 	/*
637 	 * Unbind the individual pages and flush the chipset's
638 	 * TLB. Unwire the pages so they can be swapped.
639 	 */
640 	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
641 		sc->sc_methods->unbind_page(sc->sc_chipc, mem->am_offset + i);
642 
643 	agp_flush_cache();
644 	sc->sc_methods->flush_tlb(sc->sc_chipc);
645 
646 	bus_dmamap_unload(sc->sc_dmat, mem->am_dmamap);
647 	bus_dmamem_unmap(sc->sc_dmat, mem->am_virtual, mem->am_size);
648 	bus_dmamem_free(sc->sc_dmat, mem->am_dmaseg, mem->am_nseg);
649 
650 	free(mem->am_dmaseg, M_AGP);
651 
652 	mem->am_offset = 0;
653 	mem->am_is_bound = 0;
654 
655 	rw_exit_write(&sc->sc_lock);
656 
657 	return (0);
658 }
659 
660 int
661 agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, int flags,
662     bus_dmamap_t *mapp, caddr_t *vaddr, bus_addr_t *baddr,
663     bus_dma_segment_t *seg, int nseg, int *rseg)
664 
665 {
666 	int error, level = 0;
667 
668 	if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0,
669 	    seg, nseg, rseg, BUS_DMA_NOWAIT)) != 0)
670 		goto out;
671 	level++;
672 
673 	if ((error = bus_dmamem_map(tag, seg, *rseg, size, vaddr,
674 	    BUS_DMA_NOWAIT | flags)) != 0)
675 		goto out;
676 	level++;
677 
678 	if ((error = bus_dmamap_create(tag, size, *rseg, size, 0,
679 	    BUS_DMA_NOWAIT, mapp)) != 0)
680 		goto out;
681 	level++;
682 
683 	if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL,
684 	    BUS_DMA_NOWAIT)) != 0)
685 		goto out;
686 
687 	*baddr = (*mapp)->dm_segs[0].ds_addr;
688 
689 	return (0);
690 out:
691 	switch (level) {
692 	case 3:
693 		bus_dmamap_destroy(tag, *mapp);
694 		/* FALLTHROUGH */
695 	case 2:
696 		bus_dmamem_unmap(tag, *vaddr, size);
697 		/* FALLTHROUGH */
698 	case 1:
699 		bus_dmamem_free(tag, seg, *rseg);
700 		break;
701 	default:
702 		break;
703 	}
704 
705 	return (error);
706 }
707 
708 void
709 agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map,
710     caddr_t vaddr, bus_dma_segment_t *seg, int nseg)
711 {
712 
713 	bus_dmamap_unload(tag, map);
714 	bus_dmamap_destroy(tag, map);
715 	bus_dmamem_unmap(tag, vaddr, size);
716 	bus_dmamem_free(tag, seg, nseg);
717 }
718 
719 /* Helper functions used in both user and kernel APIs */
720 
721 int
722 agp_acquire_helper(void *dev, enum agp_acquire_state state)
723 {
724 	struct agp_softc *sc = (struct agp_softc *)dev;
725 
726 	if (sc->sc_chipc == NULL)
727 		return (EINVAL);
728 
729 	if (sc->sc_state != AGP_ACQUIRE_FREE)
730 		return (EBUSY);
731 	sc->sc_state = state;
732 
733 	return (0);
734 }
735 
736 int
737 agp_release_helper(void *dev, enum agp_acquire_state state)
738 {
739 	struct agp_softc *sc = (struct agp_softc *)dev;
740 	struct agp_memory* mem;
741 
742 	if (sc->sc_state == AGP_ACQUIRE_FREE)
743 		return (0);
744 
745 	if (sc->sc_state != state)
746 		return (EBUSY);
747 
748 	/*
749 	 * Clear out the aperture and free any
750 	 * outstanding memory blocks.
751 	 */
752 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
753 		if (mem->am_is_bound) {
754 			printf("agp_release_helper: mem %d is bound\n",
755 			    mem->am_id);
756 			AGP_UNBIND_MEMORY(sc, mem);
757 		}
758 	}
759 	sc->sc_state = AGP_ACQUIRE_FREE;
760 	return (0);
761 }
762 
763 /* Implementation of the userland ioctl API */
764 
765 int
766 agp_info_user(void *dev, agp_info *info)
767 {
768 	struct agp_softc *sc = (struct agp_softc *) dev;
769 
770 	if (!sc->sc_chipc)
771 		return (ENXIO);
772 
773 	bzero(info, sizeof *info);
774 	info->bridge_id = sc->sc_id;
775 	if (sc->sc_capoff != 0)
776 		info->agp_mode = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
777 		    AGP_STATUS + sc->sc_capoff);
778 	else
779 		info->agp_mode = 0; /* i810 doesn't have real AGP */
780 	info->aper_base = sc->sc_apaddr;
781 	info->aper_size = sc->sc_methods->get_aperture(sc->sc_chipc) >> 20;
782 	info->pg_total =
783 	info->pg_system = sc->sc_maxmem >> AGP_PAGE_SHIFT;
784 	info->pg_used = sc->sc_allocated >> AGP_PAGE_SHIFT;
785 
786 	return (0);
787 }
788 
789 int
790 agp_setup_user(void *dev, agp_setup *setup)
791 {
792 	struct agp_softc *sc = (struct agp_softc *)dev;
793 
794 	return (AGP_ENABLE(sc, setup->agp_mode));
795 }
796 
797 int
798 agp_allocate_user(void *dev, agp_allocate *alloc)
799 {
800 	struct agp_softc *sc = (struct agp_softc *)dev;
801 	struct agp_memory* mem;
802 	size_t size = alloc->pg_count << AGP_PAGE_SHIFT;
803 
804 	if (sc->sc_allocated + size > sc->sc_maxmem)
805 		return (EINVAL);
806 
807 	mem = AGP_ALLOC_MEMORY(sc, alloc->type, size);
808 	if (mem) {
809 		alloc->key = mem->am_id;
810 		alloc->physical = mem->am_physical;
811 		return (0);
812 	} else
813 		return (ENOMEM);
814 }
815 
816 int
817 agp_deallocate_user(void *dev, int id)
818 {
819 	struct agp_softc *sc = (struct agp_softc *) dev;
820 	struct agp_memory *mem = agp_find_memory(sc, id);
821 	if (mem) {
822 		AGP_FREE_MEMORY(sc, mem);
823 		return (0);
824 	} else
825 		return (ENOENT);
826 }
827 
828 int
829 agp_bind_user(void *dev, agp_bind *bind)
830 {
831 	struct agp_softc *sc = (struct agp_softc *) dev;
832 	struct agp_memory *mem = agp_find_memory(sc, bind->key);
833 
834 	if (!mem)
835 		return (ENOENT);
836 
837 	return (AGP_BIND_MEMORY(sc, mem, bind->pg_start << AGP_PAGE_SHIFT));
838 }
839 
840 
841 int
842 agp_unbind_user(void *dev, agp_unbind *unbind)
843 {
844 	struct agp_softc *sc = (struct agp_softc *) dev;
845 	struct agp_memory *mem = agp_find_memory(sc, unbind->key);
846 
847 	if (!mem)
848 		return (ENOENT);
849 
850 	return (AGP_UNBIND_MEMORY(sc, mem));
851 }
852 
853 /* Implementation of the kernel api */
854 
855 void *
856 agp_find_device(int unit)
857 {
858 	if (unit >= agp_cd.cd_ndevs || unit < 0)
859 		return (NULL);
860 	return (agp_cd.cd_devs[unit]);
861 }
862 
863 enum agp_acquire_state
864 agp_state(void *dev)
865 {
866 	struct agp_softc *sc = (struct agp_softc *) dev;
867         return (sc->sc_state);
868 }
869 
870 void
871 agp_get_info(void *dev, struct agp_info *info)
872 {
873 	struct agp_softc *sc = (struct agp_softc *)dev;
874 
875 	if (sc->sc_capoff != 0)
876 		info->ai_mode = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
877 		    AGP_STATUS + sc->sc_capoff);
878 	else
879 		info->ai_mode = 0; /* i810 doesn't have real AGP */
880 	info->ai_aperture_base = sc->sc_apaddr;
881 	info->ai_aperture_size = sc->sc_methods->get_aperture(sc->sc_chipc);
882         info->ai_memory_allowed = sc->sc_maxmem;
883         info->ai_memory_used = sc->sc_allocated;
884 }
885 
886 int
887 agp_acquire(void *dev)
888 {
889 	struct agp_softc *sc = (struct agp_softc *)dev;
890 
891         return (agp_acquire_helper(sc, AGP_ACQUIRE_KERNEL));
892 }
893 
894 int
895 agp_release(void *dev)
896 {
897 	struct agp_softc *sc = (struct agp_softc *)dev;
898 
899         return (agp_release_helper(sc, AGP_ACQUIRE_KERNEL));
900 }
901 
902 int
903 agp_enable(void *dev, u_int32_t mode)
904 {
905 	struct agp_softc *sc = (struct agp_softc *) dev;
906 
907         return (AGP_ENABLE(sc, mode));
908 }
909 
910 void *
911 agp_alloc_memory(void *dev, int type, vsize_t bytes)
912 {
913 	struct agp_softc *sc = (struct agp_softc *)dev;
914 
915         return  (AGP_ALLOC_MEMORY(sc, type, bytes));
916 }
917 
918 void
919 agp_free_memory(void *dev, void *handle)
920 {
921 	struct agp_softc *sc = (struct agp_softc *) dev;
922         struct agp_memory *mem = (struct agp_memory *) handle;
923 
924         AGP_FREE_MEMORY(sc, mem);
925 }
926 
927 int
928 agp_bind_memory(void *dev, void *handle, off_t offset)
929 {
930 	struct agp_softc *sc = (struct agp_softc *) dev;
931 	struct agp_memory *mem = (struct agp_memory *) handle;
932 
933 	return (AGP_BIND_MEMORY(sc, mem, offset));
934 }
935 
936 int
937 agp_unbind_memory(void *dev, void *handle)
938 {
939 	struct agp_softc *sc = (struct agp_softc *) dev;
940         struct agp_memory *mem = (struct agp_memory *) handle;
941 
942         return (AGP_UNBIND_MEMORY(sc, mem));
943 }
944 
945 void
946 agp_memory_info(void *dev, void *handle, struct agp_memory_info *mi)
947 {
948         struct agp_memory *mem = (struct agp_memory *) handle;
949 
950         mi->ami_size = mem->am_size;
951         mi->ami_physical = mem->am_physical;
952         mi->ami_offset = mem->am_offset;
953         mi->ami_is_bound = mem->am_is_bound;
954 }
955