xref: /openbsd-src/sys/dev/pci/agp.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /* $OpenBSD: agp.c,v 1.45 2014/07/13 23:10:23 deraadt Exp $ */
2 /*-
3  * Copyright (c) 2000 Doug Rabson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  *	$FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/rwlock.h>
34 
35 #include <uvm/uvm_extern.h>
36 
37 #include <dev/pci/pcivar.h>
38 #include <dev/pci/pcidevs.h>
39 
40 #include <dev/ic/mc6845reg.h>
41 #include <dev/ic/pcdisplayvar.h>
42 #include <dev/ic/vgareg.h>
43 #include <dev/ic/vgavar.h>
44 
45 #include <dev/pci/agpvar.h>
46 #include <dev/pci/agpreg.h>
47 
48 /*
49  * the enable and {alloc, free, bind, unbind} memory routines have default
50  * fallbacks, these macros do the right thing. The callbacks with no fallback
51  * are called directly. These are mostly hacks around the weirdness of intel
52  * integrated graphics, since they are not technically a true agp chipset,
53  * but provide an almost identical interface.
54  */
55 int	agp_generic_enable(struct agp_softc *, u_int32_t);
56 struct agp_memory *
57 	agp_generic_alloc_memory(struct agp_softc *, int, vsize_t size);
58 int	agp_generic_free_memory(struct agp_softc *, struct agp_memory *);
59 void	agp_attach(struct device *, struct device *, void *);
60 int	agp_probe(struct device *, void *, void *);
61 
62 struct agp_memory *agp_find_memory(struct agp_softc *, int);
63 struct agp_memory *agp_lookup_memory(struct agp_softc *, off_t);
64 
65 int	agpvga_match(struct pci_attach_args *);
66 int	agp_acquire_helper(void *dev, enum agp_acquire_state state);
67 int	agp_release_helper(void *dev, enum agp_acquire_state state);
68 
69 int
70 agpdev_print(void *aux, const char *pnp)
71 {
72 	if (pnp) {
73 		printf("agp at %s", pnp);
74 	}
75 	return (UNCONF);
76 }
77 
78 int
79 agpbus_probe(struct agp_attach_args *aa)
80 {
81 	struct pci_attach_args	*pa = aa->aa_pa;
82 
83 	if (strncmp(aa->aa_busname, "agp", 3) == 0 &&
84 	    PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
85 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_HOST)
86 		return (1);
87 	return (0);
88 }
89 
90 /*
91  * Find the video card hanging off the agp bus XXX assumes only one bus
92  */
93 int
94 agpvga_match(struct pci_attach_args *pa)
95 {
96 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
97 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA) {
98 		if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP,
99 		    NULL, NULL))
100 			return (1);
101 	}
102 	return (0);
103 }
104 
105 struct device *
106 agp_attach_bus(struct pci_attach_args *pa, const struct agp_methods *methods,
107     bus_addr_t apaddr, bus_size_t apsize, struct device *dev)
108 {
109 	struct agpbus_attach_args arg;
110 
111 	arg.aa_methods = methods;
112 	arg.aa_pa = pa;
113 	arg.aa_apaddr = apaddr;
114 	arg.aa_apsize = apsize;
115 
116 	printf("\n"); /* newline from the driver that called us */
117 	return (config_found(dev, &arg, agpdev_print));
118 }
119 
120 int
121 agp_probe(struct device *parent, void *match, void *aux)
122 {
123 	/*
124 	 * we don't do any checking here, driver we're attaching this
125 	 * interface to should have already done it.
126 	 */
127 	return (1);
128 }
129 
130 void
131 agp_attach(struct device *parent, struct device *self, void *aux)
132 {
133 	struct agpbus_attach_args *aa = aux;
134 	struct pci_attach_args *pa = aa->aa_pa;
135 	struct agp_softc *sc = (struct agp_softc *)self;
136 	u_int memsize;
137 	int i;
138 
139 	sc->sc_chipc = parent;
140 	sc->sc_methods = aa->aa_methods;
141 	sc->sc_apaddr = aa->aa_apaddr;
142 	sc->sc_apsize = aa->aa_apsize;
143 
144 	static const int agp_max[][2] = {
145 		{0,		0},
146 		{32,		4},
147 		{64,		28},
148 		{128,		96},
149 		{256,		204},
150 		{512,		440},
151 		{1024,		942},
152 		{2048,		1920},
153 		{4096,		3932}
154 	};
155 
156 	/*
157 	 * Work out an upper bound for agp memory allocation. This
158 	 * uses a heuristic table from the Linux driver.
159 	 */
160 	memsize = ptoa(physmem) >> 20;
161 
162 	for (i = 0; i < nitems(agp_max) && memsize > agp_max[i][0]; i++)
163 		;
164 	if (i == nitems(agp_max))
165 		i = nitems(agp_max) - 1;
166 	sc->sc_maxmem = agp_max[i][1] << 20;
167 
168 	/*
169 	 * The lock is used to prevent re-entry to
170 	 * agp_generic_bind_memory() since that function can sleep.
171 	 */
172 	rw_init(&sc->sc_lock, "agplk");
173 
174 	TAILQ_INIT(&sc->sc_memory);
175 
176 	sc->sc_pcitag = pa->pa_tag;
177 	sc->sc_pc = pa->pa_pc;
178 	sc->sc_id = pa->pa_id;
179 	sc->sc_dmat = pa->pa_dmat;
180 	sc->sc_memt = pa->pa_memt;
181 
182 	pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_AGP,
183 	    &sc->sc_capoff, NULL);
184 
185 	printf(": aperture at 0x%lx, size 0x%lx\n", (u_long)sc->sc_apaddr,
186 	    (u_long)sc->sc_apsize);
187 }
188 
189 struct cfattach agp_ca = {
190 	sizeof(struct agp_softc), agp_probe, agp_attach,
191 	NULL, NULL
192 };
193 
194 struct cfdriver agp_cd = {
195 	NULL, "agp", DV_DULL
196 };
197 
198 struct agp_memory *
199 agp_find_memory(struct agp_softc *sc, int id)
200 {
201 	struct agp_memory *mem;
202 
203 	AGP_DPF("searching for memory block %d\n", id);
204 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
205 		AGP_DPF("considering memory block %d\n", mem->am_id);
206 		if (mem->am_id == id)
207 			return (mem);
208 	}
209 	return (NULL);
210 }
211 
212 
213 struct agp_memory *
214 agp_lookup_memory(struct agp_softc *sc, off_t off)
215 {
216 	struct agp_memory* mem;
217 
218 	AGP_DPF("searching for memory offset 0x%lx\n", (unsigned long)off);
219 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
220 		if (mem->am_is_bound == 0)
221 			continue;
222 		if (off >= mem->am_offset &&
223 		    off < (mem->am_offset + mem->am_size))
224 			return (mem);
225 	}
226 	return (NULL);
227 }
228 
229 struct agp_gatt *
230 agp_alloc_gatt(bus_dma_tag_t dmat, u_int32_t apsize)
231 {
232 	struct agp_gatt		*gatt;
233 	u_int32_t	 	 entries = apsize >> AGP_PAGE_SHIFT;
234 
235 	gatt = malloc(sizeof(*gatt), M_AGP, M_NOWAIT | M_ZERO);
236 	if (!gatt)
237 		return (NULL);
238 	gatt->ag_entries = entries;
239 	gatt->ag_size = entries * sizeof(u_int32_t);
240 
241 	if (agp_alloc_dmamem(dmat, gatt->ag_size, &gatt->ag_dmamap,
242 	    &gatt->ag_physical, &gatt->ag_dmaseg) != 0) {
243 		free(gatt, M_AGP, 0);
244 		return (NULL);
245 	}
246 
247 	if (bus_dmamem_map(dmat, &gatt->ag_dmaseg, 1, gatt->ag_size,
248 	    (caddr_t *)&gatt->ag_virtual, BUS_DMA_NOWAIT) != 0) {
249 		agp_free_dmamem(dmat, gatt->ag_size, gatt->ag_dmamap,
250 		    &gatt->ag_dmaseg);
251 		free(gatt, M_AGP, 0);
252 		return (NULL);
253 	}
254 
255 	agp_flush_cache();
256 
257 	return (gatt);
258 }
259 
260 void
261 agp_free_gatt(bus_dma_tag_t dmat, struct agp_gatt *gatt)
262 {
263 	bus_dmamem_unmap(dmat, (caddr_t)gatt->ag_virtual, gatt->ag_size);
264 	agp_free_dmamem(dmat, gatt->ag_size, gatt->ag_dmamap, &gatt->ag_dmaseg);
265 	free(gatt, M_AGP, 0);
266 }
267 
268 int
269 agp_generic_enable(struct agp_softc *sc, u_int32_t mode)
270 {
271 	struct pci_attach_args	pa;
272 	pcireg_t		tstatus, mstatus, command;
273 	int			rq, sba, fw, rate, capoff;
274 
275 	if (pci_find_device(&pa, agpvga_match) == 0 ||
276 	    pci_get_capability(pa.pa_pc, pa.pa_tag, PCI_CAP_AGP,
277 	    &capoff, NULL) == 0) {
278 		printf("agp_generic_enable: not an AGP capable device\n");
279 		return (-1);
280 	}
281 
282 	tstatus = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
283 	    sc->sc_capoff + AGP_STATUS);
284 	/* display agp mode */
285 	mstatus = pci_conf_read(pa.pa_pc, pa.pa_tag,
286 	    capoff + AGP_STATUS);
287 
288 	/* Set RQ to the min of mode, tstatus and mstatus */
289 	rq = AGP_MODE_GET_RQ(mode);
290 	if (AGP_MODE_GET_RQ(tstatus) < rq)
291 		rq = AGP_MODE_GET_RQ(tstatus);
292 	if (AGP_MODE_GET_RQ(mstatus) < rq)
293 		rq = AGP_MODE_GET_RQ(mstatus);
294 
295 	/* Set SBA if all three can deal with SBA */
296 	sba = (AGP_MODE_GET_SBA(tstatus)
297 	    & AGP_MODE_GET_SBA(mstatus)
298 	    & AGP_MODE_GET_SBA(mode));
299 
300 	/* Similar for FW */
301 	fw = (AGP_MODE_GET_FW(tstatus)
302 	    & AGP_MODE_GET_FW(mstatus)
303 	    & AGP_MODE_GET_FW(mode));
304 
305 	/* Figure out the max rate */
306 	rate = (AGP_MODE_GET_RATE(tstatus)
307 	    & AGP_MODE_GET_RATE(mstatus)
308 	    & AGP_MODE_GET_RATE(mode));
309 	if (rate & AGP_MODE_RATE_4x)
310 		rate = AGP_MODE_RATE_4x;
311 	else if (rate & AGP_MODE_RATE_2x)
312 		rate = AGP_MODE_RATE_2x;
313 	else
314 		rate = AGP_MODE_RATE_1x;
315 
316 	/* Construct the new mode word and tell the hardware  */
317 	command = AGP_MODE_SET_RQ(0, rq);
318 	command = AGP_MODE_SET_SBA(command, sba);
319 	command = AGP_MODE_SET_FW(command, fw);
320 	command = AGP_MODE_SET_RATE(command, rate);
321 	command = AGP_MODE_SET_AGP(command, 1);
322 
323 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
324 	    sc->sc_capoff + AGP_COMMAND, command);
325 	pci_conf_write(pa.pa_pc, pa.pa_tag, capoff + AGP_COMMAND, command);
326 	return (0);
327 }
328 
329 struct agp_memory *
330 agp_generic_alloc_memory(struct agp_softc *sc, int type, vsize_t size)
331 {
332 	struct agp_memory *mem;
333 
334 	if (type != 0) {
335 		printf("agp_generic_alloc_memory: unsupported type %d\n", type);
336 		return (0);
337 	}
338 
339 	mem = malloc(sizeof *mem, M_AGP, M_WAITOK | M_ZERO);
340 
341 	if (bus_dmamap_create(sc->sc_dmat, size, size / PAGE_SIZE + 1,
342 	    size, 0, BUS_DMA_NOWAIT, &mem->am_dmamap) != 0) {
343 		free(mem, M_AGP, 0);
344 		return (NULL);
345 	}
346 
347 	mem->am_id = sc->sc_nextid++;
348 	mem->am_size = size;
349 	TAILQ_INSERT_TAIL(&sc->sc_memory, mem, am_link);
350 	sc->sc_allocated += size;
351 
352 	return (mem);
353 }
354 
355 int
356 agp_generic_free_memory(struct agp_softc *sc, struct agp_memory *mem)
357 {
358 	if (mem->am_is_bound)
359 		return (EBUSY);
360 
361 	sc->sc_allocated -= mem->am_size;
362 	TAILQ_REMOVE(&sc->sc_memory, mem, am_link);
363 	bus_dmamap_destroy(sc->sc_dmat, mem->am_dmamap);
364 	free(mem, M_AGP, 0);
365 	return (0);
366 }
367 
368 int
369 agp_generic_bind_memory(struct agp_softc *sc, struct agp_memory *mem,
370     bus_size_t offset)
371 {
372 	bus_dma_segment_t	*segs, *seg;
373 	bus_addr_t		 apaddr = sc->sc_apaddr + offset;
374 	bus_size_t		 done, i, j;
375 	int			 nseg, error;
376 
377 	rw_enter_write(&sc->sc_lock);
378 
379 	if (mem->am_is_bound) {
380 		printf("AGP: memory already bound\n");
381 		rw_exit_write(&sc->sc_lock);
382 		return (EINVAL);
383 	}
384 
385 	if ((offset & (AGP_PAGE_SIZE - 1)) != 0 ||
386 	    offset + mem->am_size > sc->sc_apsize) {
387 		printf("AGP: binding memory at bad offset %#lx\n",
388 		    (unsigned long) offset);
389 		rw_exit_write(&sc->sc_lock);
390 		return (EINVAL);
391 	}
392 
393 	/*
394 	 * The memory here needs to be directly accessable from the
395 	 * AGP video card, so it should be allocated using bus_dma.
396 	 * However, it need not be contiguous, since individual pages
397 	 * are translated using the GATT.
398 	 */
399 
400 	nseg = (mem->am_size + PAGE_SIZE - 1) / PAGE_SIZE;
401 	segs = mallocarray(nseg, sizeof *segs, M_AGP, M_WAITOK);
402 	if ((error = bus_dmamem_alloc(sc->sc_dmat, mem->am_size, PAGE_SIZE, 0,
403 	    segs, nseg, &mem->am_nseg, BUS_DMA_ZERO | BUS_DMA_WAITOK)) != 0) {
404 		free(segs, M_AGP, 0);
405 		rw_exit_write(&sc->sc_lock);
406 		AGP_DPF("bus_dmamem_alloc failed %d\n", error);
407 		return (error);
408 	}
409 	if ((error = bus_dmamap_load_raw(sc->sc_dmat, mem->am_dmamap, segs,
410 	    mem->am_nseg, mem->am_size, BUS_DMA_WAITOK)) != 0) {
411 		bus_dmamem_free(sc->sc_dmat, segs, mem->am_nseg);
412 		free(segs, M_AGP, 0);
413 		rw_exit_write(&sc->sc_lock);
414 		AGP_DPF("bus_dmamap_load failed %d\n", error);
415 		return (error);
416 	}
417 	mem->am_dmaseg = segs;
418 
419 	/*
420 	 * Install entries in the GATT, making sure that if
421 	 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
422 	 * aligned to PAGE_SIZE, we don't modify too many GATT
423 	 * entries. Flush chipset tlb when done.
424 	 */
425 	done = 0;
426 	for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) {
427 		seg = &mem->am_dmamap->dm_segs[i];
428 		for (j = 0; j < seg->ds_len && (done + j) < mem->am_size;
429 		    j += AGP_PAGE_SIZE) {
430 			AGP_DPF("binding offset %#lx to pa %#lx\n",
431 			    (unsigned long)(offset + done + j),
432 			    (unsigned long)seg->ds_addr + j);
433 			sc->sc_methods->bind_page(sc->sc_chipc,
434 			    apaddr + done + j, seg->ds_addr + j, 0);
435 		}
436 		done += seg->ds_len;
437 	}
438 
439 	/*
440 	 * Flush the cpu cache since we are providing a new mapping
441 	 * for these pages.
442 	 */
443 	agp_flush_cache();
444 
445 	/*
446 	 * Make sure the chipset gets the new mappings.
447 	 */
448 	sc->sc_methods->flush_tlb(sc->sc_chipc);
449 
450 	mem->am_offset = offset;
451 	mem->am_is_bound = 1;
452 
453 	rw_exit_write(&sc->sc_lock);
454 
455 	return (0);
456 }
457 
458 int
459 agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem)
460 {
461 	bus_addr_t	apaddr = sc->sc_apaddr + mem->am_offset;
462 	bus_size_t	i;
463 
464 	rw_enter_write(&sc->sc_lock);
465 
466 	if (mem->am_is_bound == 0) {
467 		printf("AGP: memory is not bound\n");
468 		rw_exit_write(&sc->sc_lock);
469 		return (EINVAL);
470 	}
471 
472 	if (mem->am_mapref > 0) {
473 		printf("AGP: memory is mapped\n");
474 		rw_exit_write(&sc->sc_lock);
475 		return (EINVAL);
476 	}
477 
478 	/*
479 	 * Unbind the individual pages and flush the chipset's
480 	 * TLB. Unwire the pages so they can be swapped.
481 	 */
482 	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
483 		sc->sc_methods->unbind_page(sc->sc_chipc, apaddr + i);
484 
485 	agp_flush_cache();
486 	sc->sc_methods->flush_tlb(sc->sc_chipc);
487 
488 	bus_dmamap_unload(sc->sc_dmat, mem->am_dmamap);
489 	bus_dmamem_free(sc->sc_dmat, mem->am_dmaseg, mem->am_nseg);
490 
491 	free(mem->am_dmaseg, M_AGP, 0);
492 
493 	mem->am_offset = 0;
494 	mem->am_is_bound = 0;
495 
496 	rw_exit_write(&sc->sc_lock);
497 
498 	return (0);
499 }
500 
501 /*
502  * Allocates a single-segment block of zeroed, wired dma memory.
503  */
504 int
505 agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t *mapp,
506     bus_addr_t *baddr, bus_dma_segment_t *seg)
507 {
508 	int error, level = 0, nseg;
509 
510 	if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0,
511 	    seg, 1, &nseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) != 0)
512 		goto out;
513 	level++;
514 
515 	if ((error = bus_dmamap_create(tag, size, nseg, size, 0,
516 	    BUS_DMA_NOWAIT, mapp)) != 0)
517 		goto out;
518 	level++;
519 
520 	if ((error = bus_dmamap_load_raw(tag, *mapp, seg, nseg, size,
521 	    BUS_DMA_NOWAIT)) != 0)
522 		goto out;
523 
524 	*baddr = (*mapp)->dm_segs[0].ds_addr;
525 
526 	return (0);
527 out:
528 	switch (level) {
529 	case 2:
530 		bus_dmamap_destroy(tag, *mapp);
531 		/* FALLTHROUGH */
532 	case 1:
533 		bus_dmamem_free(tag, seg, nseg);
534 		break;
535 	default:
536 		break;
537 	}
538 
539 	return (error);
540 }
541 
542 void
543 agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map,
544     bus_dma_segment_t *seg)
545 {
546 	bus_dmamap_unload(tag, map);
547 	bus_dmamap_destroy(tag, map);
548 	bus_dmamem_free(tag, seg, 1);
549 }
550 
551 /* Helper functions used in both user and kernel APIs */
552 
553 int
554 agp_acquire_helper(void *dev, enum agp_acquire_state state)
555 {
556 	struct agp_softc *sc = (struct agp_softc *)dev;
557 
558 	if (sc->sc_chipc == NULL)
559 		return (EINVAL);
560 
561 	if (sc->sc_state != AGP_ACQUIRE_FREE)
562 		return (EBUSY);
563 	sc->sc_state = state;
564 
565 	return (0);
566 }
567 
568 int
569 agp_release_helper(void *dev, enum agp_acquire_state state)
570 {
571 	struct agp_softc *sc = (struct agp_softc *)dev;
572 	struct agp_memory* mem;
573 
574 	if (sc->sc_state == AGP_ACQUIRE_FREE)
575 		return (0);
576 
577 	if (sc->sc_state != state)
578 		return (EBUSY);
579 
580 	/*
581 	 * Clear out the aperture and free any
582 	 * outstanding memory blocks.
583 	 */
584 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
585 		if (mem->am_is_bound) {
586 			printf("agp_release_helper: mem %d is bound\n",
587 			    mem->am_id);
588 			agp_unbind_memory(sc, mem);
589 		}
590 	}
591 	sc->sc_state = AGP_ACQUIRE_FREE;
592 	return (0);
593 }
594 
595 /* Implementation of the kernel api */
596 
597 void *
598 agp_find_device(int unit)
599 {
600 	if (unit >= agp_cd.cd_ndevs || unit < 0)
601 		return (NULL);
602 	return (agp_cd.cd_devs[unit]);
603 }
604 
605 enum agp_acquire_state
606 agp_state(void *dev)
607 {
608 	struct agp_softc *sc = (struct agp_softc *) dev;
609         return (sc->sc_state);
610 }
611 
612 void
613 agp_get_info(void *dev, struct agp_info *info)
614 {
615 	struct agp_softc *sc = (struct agp_softc *)dev;
616 
617 	if (sc->sc_capoff != 0)
618 		info->ai_mode = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
619 		    AGP_STATUS + sc->sc_capoff);
620 	else
621 		info->ai_mode = 0; /* i810 doesn't have real AGP */
622 	info->ai_aperture_base = sc->sc_apaddr;
623 	info->ai_aperture_size = sc->sc_apsize;
624 	info->ai_memory_allowed = sc->sc_maxmem;
625 	info->ai_memory_used = sc->sc_allocated;
626 	info->ai_devid = sc->sc_id;
627 }
628 
629 int
630 agp_acquire(void *dev)
631 {
632 	struct agp_softc *sc = (struct agp_softc *)dev;
633 
634         return (agp_acquire_helper(sc, AGP_ACQUIRE_KERNEL));
635 }
636 
637 int
638 agp_release(void *dev)
639 {
640 	struct agp_softc *sc = (struct agp_softc *)dev;
641 
642         return (agp_release_helper(sc, AGP_ACQUIRE_KERNEL));
643 }
644 
645 int
646 agp_enable(void *dev, u_int32_t mode)
647 {
648 	struct agp_softc	*sc = dev;
649 	int			 ret;
650 
651 	if (sc->sc_methods->enable != NULL) {
652 		ret = sc->sc_methods->enable(sc->sc_chipc, mode);
653 	} else {
654 		ret = agp_generic_enable(sc, mode);
655 	}
656 	return (ret);
657 }
658 
659 void *
660 agp_alloc_memory(void *dev, int type, vsize_t bytes)
661 {
662 	struct agp_softc	*sc = dev;
663 	struct agp_memory	*mem;
664 
665 	if (sc->sc_methods->alloc_memory != NULL) {
666 		mem = sc->sc_methods->alloc_memory(sc->sc_chipc, type, bytes);
667 	} else {
668 		mem = agp_generic_alloc_memory(sc, type, bytes);
669 	}
670         return  (mem);
671 }
672 
673 void
674 agp_free_memory(void *dev, void *handle)
675 {
676 	struct agp_softc *sc = dev;
677         struct agp_memory *mem = handle;
678 
679 	if (sc->sc_methods->free_memory != NULL) {
680 		sc->sc_methods->free_memory(sc->sc_chipc, mem);
681 	} else {
682 		agp_generic_free_memory(sc, mem);
683 	}
684 }
685 
686 int
687 agp_bind_memory(void *dev, void *handle, off_t offset)
688 {
689 	struct agp_softc	*sc = dev;
690 	struct agp_memory	*mem = handle;
691 	int			 ret;
692 
693 	if (sc->sc_methods->bind_memory != NULL) {
694 		ret = sc->sc_methods->bind_memory(sc->sc_chipc, mem, offset);
695 	} else {
696 		ret = agp_generic_bind_memory(sc, mem, offset);
697 	}
698 	return (ret);
699 }
700 
701 int
702 agp_unbind_memory(void *dev, void *handle)
703 {
704 	struct agp_softc	*sc = dev;
705         struct agp_memory	*mem = handle;
706 	int			 ret;
707 
708 	if (sc->sc_methods->unbind_memory != NULL) {
709 		ret = sc->sc_methods->unbind_memory(sc->sc_chipc, mem);
710 	} else {
711 		ret = agp_generic_unbind_memory(sc, mem);
712 	}
713 	return (ret);
714 }
715 
716 void
717 agp_memory_info(void *dev, void *handle, struct agp_memory_info *mi)
718 {
719         struct agp_memory *mem = (struct agp_memory *) handle;
720 
721         mi->ami_size = mem->am_size;
722         mi->ami_physical = mem->am_physical;
723         mi->ami_offset = mem->am_offset;
724         mi->ami_is_bound = mem->am_is_bound;
725 }
726 
727 void *
728 agp_map(struct agp_softc *sc, bus_size_t address, bus_size_t size,
729     bus_space_handle_t *memh)
730 {
731 	struct agp_memory* mem;
732 
733 	if (sc->sc_chipc == NULL)
734 		return (NULL);
735 
736 	if (address >= sc->sc_apsize)
737 		return (NULL);
738 
739 	if (sc->sc_apaddr) {
740 		if (bus_space_map(sc->sc_memt, sc->sc_apaddr + address, size,
741 		    BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, memh))
742 			return (NULL);
743 	} else {
744 		/*
745 		 * If the aperture base address is 0 assume that the AGP
746 		 * bridge does not support remapping for processor accesses.
747 		 */
748 		mem = agp_lookup_memory(sc, address);
749 		if (mem == NULL)
750 			return (NULL);
751 
752 		/*
753 		 * Map the whole memory region because it is easier to
754 		 * do so and it is improbable that only a part of it
755 		 * will be used.
756 		 */
757 		if (mem->am_mapref == 0)
758 			if (bus_dmamem_map(sc->sc_dmat, mem->am_dmaseg,
759 			    mem->am_nseg, mem->am_size, &mem->am_kva,
760 			    BUS_DMA_NOWAIT | BUS_DMA_NOCACHE))
761 				return (NULL);
762 
763 		mem->am_mapref++;
764 
765 		/*
766 		 * XXX Fake a bus handle even if it is managed memory,
767 		 * this is needed at least by radeondrm(4).
768 		 */
769 		*memh = (bus_space_handle_t)(mem->am_kva + address);
770 	}
771 
772 	return bus_space_vaddr(sc->sc_memt, *memh);
773 }
774 
775 void
776 agp_unmap(struct agp_softc *sc, void *address, size_t size,
777     bus_space_handle_t memh)
778 {
779 	struct agp_memory* mem;
780 	caddr_t kva;
781 
782 	if (sc->sc_apaddr)
783 		return bus_space_unmap(sc->sc_memt, memh, size);
784 
785 	kva = (caddr_t)address;
786 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
787 		if (mem->am_is_bound == 0)
788 			continue;
789 
790 		if (kva >= mem->am_kva && kva < (mem->am_kva + mem->am_size)) {
791 			mem->am_mapref--;
792 
793 			if (mem->am_mapref == 0) {
794 				bus_dmamem_unmap(sc->sc_dmat, mem->am_kva,
795 				    mem->am_size);
796 				mem->am_kva = 0;
797 			}
798 			break;
799 		}
800 	}
801 }
802 
803 paddr_t
804 agp_mmap(struct agp_softc *sc, off_t off, int prot)
805 {
806 	struct agp_memory* mem;
807 
808 	if (sc->sc_chipc == NULL)
809 		return (-1);
810 
811 	if (off >= sc->sc_apsize)
812 		return (-1);
813 
814 	if (sc->sc_apaddr)
815 		return bus_space_mmap(sc->sc_memt, sc->sc_apaddr, off, prot, 0);
816 
817 	mem = agp_lookup_memory(sc, off);
818 	if (mem == NULL)
819 		return (-1);
820 
821 	return bus_dmamem_mmap(sc->sc_dmat, mem->am_dmaseg, mem->am_nseg, off,
822 	    prot, BUS_DMA_NOCACHE);
823 }
824