xref: /openbsd-src/sys/dev/pci/agp.c (revision 43003dfe3ad45d1698bed8a37f2b0f5b14f20d4f)
1 /* $OpenBSD: agp.c,v 1.32 2009/05/10 16:57:44 oga Exp $ */
2 /*-
3  * Copyright (c) 2000 Doug Rabson
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  *	$FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/malloc.h>
32 #include <sys/agpio.h>
33 #include <sys/fcntl.h>
34 #include <sys/ioctl.h>
35 
36 #include <uvm/uvm.h>
37 
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <dev/ic/mc6845reg.h>
42 #include <dev/ic/pcdisplayvar.h>
43 #include <dev/ic/vgareg.h>
44 #include <dev/ic/vgavar.h>
45 
46 #include <dev/pci/agpvar.h>
47 #include <dev/pci/agpreg.h>
48 
49 /*
50  * the enable and {alloc, free, bind, unbind} memory routines have default
51  * fallbacks, these macros do the right thing. The callbacks with no fallback
52  * are called directly. These are mostly hacks around the weirdness of intel
53  * integrated graphics, since they are not technically a true agp chipset,
54  * but provide an almost identical interface.
55  */
56 int	agp_generic_enable(struct agp_softc *, u_int32_t);
57 struct agp_memory *
58 	agp_generic_alloc_memory(struct agp_softc *, int, vsize_t size);
59 int	agp_generic_free_memory(struct agp_softc *, struct agp_memory *);
60 void	agp_attach(struct device *, struct device *, void *);
61 int	agp_probe(struct device *, void *, void *);
62 int	agpbusprint(void *, const char *);
63 paddr_t	agpmmap(void *, off_t, int);
64 int	agpioctl(dev_t, u_long, caddr_t, int, struct proc *);
65 int	agpopen(dev_t, int, int, struct proc *);
66 int	agpclose(dev_t, int, int , struct proc *);
67 
68 struct agp_memory *agp_find_memory(struct agp_softc *sc, int id);
69 /* userland ioctl functions */
70 int	agpvga_match(struct pci_attach_args *);
71 int	agp_info_user(void *, agp_info *);
72 int	agp_setup_user(void *, agp_setup *);
73 int	agp_allocate_user(void *, agp_allocate *);
74 int	agp_deallocate_user(void *, int);
75 int	agp_bind_user(void *, agp_bind *);
76 int	agp_unbind_user(void *, agp_unbind *);
77 int	agp_acquire_helper(void *dev, enum agp_acquire_state state);
78 int	agp_release_helper(void *dev, enum agp_acquire_state state);
79 
80 int
81 agpdev_print(void *aux, const char *pnp)
82 {
83 	if (pnp) {
84 		printf("agp at %s", pnp);
85 	}
86 	return (UNCONF);
87 }
88 
89 int
90 agpbus_probe(struct agp_attach_args *aa)
91 {
92 	struct pci_attach_args	*pa = aa->aa_pa;
93 
94 	if (strncmp(aa->aa_busname, "agp", 3) == 0 &&
95 	    PCI_CLASS(pa->pa_class) == PCI_CLASS_BRIDGE &&
96 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_BRIDGE_HOST)
97 		return (1);
98 	return (0);
99 }
100 
101 /*
102  * Find the video card hanging off the agp bus XXX assumes only one bus
103  */
104 int
105 agpvga_match(struct pci_attach_args *pa)
106 {
107 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY &&
108 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA) {
109 		if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP,
110 		    NULL, NULL))
111 			return (1);
112 	}
113 	return (0);
114 }
115 
116 struct device *
117 agp_attach_bus(struct pci_attach_args *pa, const struct agp_methods *methods,
118     bus_addr_t apaddr, bus_size_t apsize, struct device *dev)
119 {
120 	struct agpbus_attach_args arg;
121 
122 	arg.aa_methods = methods;
123 	arg.aa_pa = pa;
124 	arg.aa_apaddr = apaddr;
125 	arg.aa_apsize = apsize;
126 
127 	printf("\n"); /* newline from the driver that called us */
128 	return (config_found(dev, &arg, agpdev_print));
129 }
130 
131 int
132 agp_probe(struct device *parent, void *match, void *aux)
133 {
134 	/*
135 	 * we don't do any checking here, driver we're attaching this
136 	 * interface to should have already done it.
137 	 */
138 	return (1);
139 }
140 
141 void
142 agp_attach(struct device *parent, struct device *self, void *aux)
143 {
144 	struct agpbus_attach_args *aa = aux;
145 	struct pci_attach_args *pa = aa->aa_pa;
146 	struct agp_softc *sc = (struct agp_softc *)self;
147 	u_int memsize;
148 	int i;
149 
150 	sc->sc_chipc = parent;
151 	sc->sc_methods = aa->aa_methods;
152 	sc->sc_apaddr = aa->aa_apaddr;
153 	sc->sc_apsize = aa->aa_apsize;
154 
155 	static const int agp_max[][2] = {
156 		{0,		0},
157 		{32,		4},
158 		{64,		28},
159 		{128,		96},
160 		{256,		204},
161 		{512,		440},
162 		{1024,		942},
163 		{2048,		1920},
164 		{4096,		3932}
165 	};
166 #define	agp_max_size	 (sizeof(agp_max)/sizeof(agp_max[0]))
167 
168 	/*
169 	 * Work out an upper bound for agp memory allocation. This
170 	 * uses a heuristic table from the Linux driver.
171 	 */
172 	memsize = ptoa(physmem) >> 20;
173 
174 	for (i = 0; i < agp_max_size && memsize > agp_max[i][0]; i++)
175 		;
176 	if (i == agp_max_size)
177 		i = agp_max_size - 1;
178 	sc->sc_maxmem = agp_max[i][1] << 20;
179 
180 	/*
181 	 * The lock is used to prevent re-entry to
182 	 * agp_generic_bind_memory() since that function can sleep.
183 	 */
184 	rw_init(&sc->sc_lock, "agplk");
185 
186 	TAILQ_INIT(&sc->sc_memory);
187 
188 	sc->sc_pcitag = pa->pa_tag;
189 	sc->sc_pc = pa->pa_pc;
190 	sc->sc_id = pa->pa_id;
191 	sc->sc_dmat = pa->pa_dmat;
192 
193 	pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_AGP,
194 	    &sc->sc_capoff, NULL);
195 
196 	printf(": aperture at 0x%lx, size 0x%lx\n", (u_long)sc->sc_apaddr,
197 	    (u_long)sc->sc_apsize);
198 }
199 
200 struct cfattach agp_ca = {
201 	sizeof(struct agp_softc), agp_probe, agp_attach,
202 	NULL, NULL
203 };
204 
205 struct cfdriver agp_cd = {
206 	NULL, "agp", DV_DULL
207 };
208 
209 paddr_t
210 agpmmap(void *v, off_t off, int prot)
211 {
212 	struct agp_softc* sc = (struct agp_softc *)v;
213 
214 	if (sc->sc_apaddr) {
215 
216 		if (off > sc->sc_apsize)
217 			return (-1);
218 
219 		/*
220 		 * XXX this should use bus_space_mmap() but it's not
221 		 * availiable on all archs.
222 		 */
223 		return atop(sc->sc_apaddr + off);
224 	}
225 	return (-1);
226 }
227 
228 int
229 agpopen(dev_t dev, int oflags, int devtype, struct proc *p)
230 {
231         struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
232 
233         if (sc == NULL || sc->sc_chipc == NULL)
234                 return (ENXIO);
235 
236         if (!sc->sc_opened)
237                 sc->sc_opened = 1;
238         else
239                 return (EBUSY);
240 
241         return (0);
242 }
243 
244 
245 int
246 agpioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *pb)
247 {
248 	struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
249 
250 	if (sc == NULL)
251 		return (ENODEV);
252 
253 	if (sc->sc_methods == NULL || sc->sc_chipc == NULL)
254 		return (ENXIO);
255 
256 	if (cmd != AGPIOC_INFO && !(flag & FWRITE))
257 		return (EPERM);
258 
259 	switch(cmd) {
260 	case AGPIOC_INFO:
261 		return (agp_info_user(sc, (agp_info *)addr));
262 
263 	case AGPIOC_ACQUIRE:
264 		return (agp_acquire_helper(sc, AGP_ACQUIRE_USER));
265 
266 	case AGPIOC_RELEASE:
267 		return (agp_release_helper(sc, AGP_ACQUIRE_USER));
268 
269 	case AGPIOC_SETUP:
270 		return (agp_setup_user(sc, (agp_setup *)addr));
271 
272 	case AGPIOC_ALLOCATE:
273 		return (agp_allocate_user(sc, (agp_allocate *)addr));
274 
275 	case AGPIOC_DEALLOCATE:
276 		return (agp_deallocate_user(sc, *(int *)addr));
277 
278 	case AGPIOC_BIND:
279 		return (agp_bind_user(sc, (agp_bind *)addr));
280 
281 	case AGPIOC_UNBIND:
282 		return (agp_unbind_user(sc, (agp_unbind *)addr));
283 
284 	default:
285 		return (ENOTTY);
286 	}
287 
288 }
289 
290 int
291 agpclose(dev_t dev, int flags, int devtype, struct proc *p)
292 {
293 	struct agp_softc *sc = agp_find_device(AGPUNIT(dev));
294 	struct agp_memory *mem;
295 
296 	/*
297          * Clear the GATT and force release on last close
298          */
299 	if (sc->sc_state == AGP_ACQUIRE_USER) {
300 		while ((mem = TAILQ_FIRST(&sc->sc_memory)) != 0) {
301 			if (mem->am_is_bound) {
302 				agp_unbind_memory(sc, mem);
303 			}
304 			agp_free_memory(sc, mem);
305 		}
306                 agp_release_helper(sc, AGP_ACQUIRE_USER);
307 	}
308         sc->sc_opened = 0;
309 
310 	return (0);
311 }
312 
313 struct agp_memory *
314 agp_find_memory(struct agp_softc *sc, int id)
315 {
316 	struct agp_memory *mem;
317 
318 	AGP_DPF("searching for memory block %d\n", id);
319 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
320 		AGP_DPF("considering memory block %d\n", mem->am_id);
321 		if (mem->am_id == id)
322 			return (mem);
323 	}
324 	return (0);
325 }
326 
327 struct agp_gatt *
328 agp_alloc_gatt(bus_dma_tag_t dmat, u_int32_t apsize)
329 {
330 	struct agp_gatt		*gatt;
331 	u_int32_t	 	 entries = apsize >> AGP_PAGE_SHIFT;
332 
333 	gatt = malloc(sizeof(*gatt), M_AGP, M_NOWAIT | M_ZERO);
334 	if (!gatt)
335 		return (NULL);
336 	gatt->ag_entries = entries;
337 	gatt->ag_size = entries * sizeof(u_int32_t);
338 
339 	if (agp_alloc_dmamem(dmat, gatt->ag_size, &gatt->ag_dmamap,
340 	    &gatt->ag_physical, &gatt->ag_dmaseg) != 0)
341 		return (NULL);
342 
343 	if (bus_dmamem_map(dmat, &gatt->ag_dmaseg, 1, gatt->ag_size,
344 	    (caddr_t *)&gatt->ag_virtual, BUS_DMA_NOWAIT) != 0) {
345 		agp_free_dmamem(dmat, gatt->ag_size, gatt->ag_dmamap,
346 		    &gatt->ag_dmaseg);
347 		return (NULL);
348 	}
349 
350 	agp_flush_cache();
351 
352 	return (gatt);
353 }
354 
355 void
356 agp_free_gatt(bus_dma_tag_t dmat, struct agp_gatt *gatt)
357 {
358 	bus_dmamem_unmap(dmat, (caddr_t)gatt->ag_virtual, gatt->ag_size);
359 	agp_free_dmamem(dmat, gatt->ag_size, gatt->ag_dmamap, &gatt->ag_dmaseg);
360 	free(gatt, M_AGP);
361 }
362 
363 int
364 agp_generic_enable(struct agp_softc *sc, u_int32_t mode)
365 {
366 	struct pci_attach_args	pa;
367 	pcireg_t		tstatus, mstatus, command;
368 	int			rq, sba, fw, rate, capoff;
369 
370 	if (pci_find_device(&pa, agpvga_match) == 0 ||
371 	    pci_get_capability(pa.pa_pc, pa.pa_tag, PCI_CAP_AGP,
372 	    &capoff, NULL) == 0) {
373 		printf("agp_generic_enable: not an AGP capable device\n");
374 		return (-1);
375 	}
376 
377 	tstatus = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
378 	    sc->sc_capoff + AGP_STATUS);
379 	/* display agp mode */
380 	mstatus = pci_conf_read(pa.pa_pc, pa.pa_tag,
381 	    capoff + AGP_STATUS);
382 
383 	/* Set RQ to the min of mode, tstatus and mstatus */
384 	rq = AGP_MODE_GET_RQ(mode);
385 	if (AGP_MODE_GET_RQ(tstatus) < rq)
386 		rq = AGP_MODE_GET_RQ(tstatus);
387 	if (AGP_MODE_GET_RQ(mstatus) < rq)
388 		rq = AGP_MODE_GET_RQ(mstatus);
389 
390 	/* Set SBA if all three can deal with SBA */
391 	sba = (AGP_MODE_GET_SBA(tstatus)
392 	    & AGP_MODE_GET_SBA(mstatus)
393 	    & AGP_MODE_GET_SBA(mode));
394 
395 	/* Similar for FW */
396 	fw = (AGP_MODE_GET_FW(tstatus)
397 	    & AGP_MODE_GET_FW(mstatus)
398 	    & AGP_MODE_GET_FW(mode));
399 
400 	/* Figure out the max rate */
401 	rate = (AGP_MODE_GET_RATE(tstatus)
402 	    & AGP_MODE_GET_RATE(mstatus)
403 	    & AGP_MODE_GET_RATE(mode));
404 	if (rate & AGP_MODE_RATE_4x)
405 		rate = AGP_MODE_RATE_4x;
406 	else if (rate & AGP_MODE_RATE_2x)
407 		rate = AGP_MODE_RATE_2x;
408 	else
409 		rate = AGP_MODE_RATE_1x;
410 
411 	/* Construct the new mode word and tell the hardware  */
412 	command = AGP_MODE_SET_RQ(0, rq);
413 	command = AGP_MODE_SET_SBA(command, sba);
414 	command = AGP_MODE_SET_FW(command, fw);
415 	command = AGP_MODE_SET_RATE(command, rate);
416 	command = AGP_MODE_SET_AGP(command, 1);
417 
418 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
419 	    sc->sc_capoff + AGP_COMMAND, command);
420 	pci_conf_write(pa.pa_pc, pa.pa_tag, capoff + AGP_COMMAND, command);
421 	return (0);
422 }
423 
424 struct agp_memory *
425 agp_generic_alloc_memory(struct agp_softc *sc, int type, vsize_t size)
426 {
427 	struct agp_memory *mem;
428 
429 	if (type != 0) {
430 		printf("agp_generic_alloc_memory: unsupported type %d\n", type);
431 		return (0);
432 	}
433 
434 	mem = malloc(sizeof *mem, M_AGP, M_WAITOK | M_ZERO);
435 
436 	if (bus_dmamap_create(sc->sc_dmat, size, size / PAGE_SIZE + 1,
437 	    size, 0, BUS_DMA_NOWAIT, &mem->am_dmamap) != 0) {
438 		free(mem, M_AGP);
439 		return (NULL);
440 	}
441 
442 	mem->am_id = sc->sc_nextid++;
443 	mem->am_size = size;
444 	TAILQ_INSERT_TAIL(&sc->sc_memory, mem, am_link);
445 	sc->sc_allocated += size;
446 
447 	return (mem);
448 }
449 
450 int
451 agp_generic_free_memory(struct agp_softc *sc, struct agp_memory *mem)
452 {
453 	if (mem->am_is_bound)
454 		return (EBUSY);
455 
456 	sc->sc_allocated -= mem->am_size;
457 	TAILQ_REMOVE(&sc->sc_memory, mem, am_link);
458 	bus_dmamap_destroy(sc->sc_dmat, mem->am_dmamap);
459 	free(mem, M_AGP);
460 	return (0);
461 }
462 
463 int
464 agp_generic_bind_memory(struct agp_softc *sc, struct agp_memory *mem,
465     bus_size_t offset)
466 {
467 	bus_dma_segment_t	*segs, *seg;
468 	bus_addr_t		 apaddr = sc->sc_apaddr + offset;
469 	bus_size_t		 done, i, j;
470 	int			 nseg, error;
471 
472 	rw_enter_write(&sc->sc_lock);
473 
474 	if (mem->am_is_bound) {
475 		printf("AGP: memory already bound\n");
476 		rw_exit_write(&sc->sc_lock);
477 		return (EINVAL);
478 	}
479 
480 	if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 ||
481 	    offset + mem->am_size > sc->sc_apsize) {
482 		printf("AGP: binding memory at bad offset %#lx\n",
483 		    (unsigned long) offset);
484 		rw_exit_write(&sc->sc_lock);
485 		return (EINVAL);
486 	}
487 
488 	/*
489 	 * The memory here needs to be directly accessable from the
490 	 * AGP video card, so it should be allocated using bus_dma.
491 	 * However, it need not be contiguous, since individual pages
492 	 * are translated using the GATT.
493 	 */
494 
495 	nseg = (mem->am_size + PAGE_SIZE - 1) / PAGE_SIZE;
496 	segs = malloc(nseg * sizeof *segs, M_AGP, M_WAITOK);
497 	if ((error = bus_dmamem_alloc(sc->sc_dmat, mem->am_size, PAGE_SIZE, 0,
498 	    segs, nseg, &mem->am_nseg, BUS_DMA_ZERO | BUS_DMA_WAITOK)) != 0) {
499 		free(segs, M_AGP);
500 		rw_exit_write(&sc->sc_lock);
501 		AGP_DPF("bus_dmamem_alloc failed %d\n", error);
502 		return (error);
503 	}
504 	if ((error = bus_dmamap_load_raw(sc->sc_dmat, mem->am_dmamap, segs,
505 	    mem->am_nseg, mem->am_size, BUS_DMA_WAITOK)) != 0) {
506 		bus_dmamem_free(sc->sc_dmat, segs, mem->am_nseg);
507 		free(segs, M_AGP);
508 		rw_exit_write(&sc->sc_lock);
509 		AGP_DPF("bus_dmamap_load failed %d\n", error);
510 		return (error);
511 	}
512 	mem->am_dmaseg = segs;
513 
514 	/*
515 	 * Install entries in the GATT, making sure that if
516 	 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
517 	 * aligned to PAGE_SIZE, we don't modify too many GATT
518 	 * entries. Flush chipset tlb when done.
519 	 */
520 	done = 0;
521 	for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) {
522 		seg = &mem->am_dmamap->dm_segs[i];
523 		for (j = 0; j < seg->ds_len && (done + j) < mem->am_size;
524 		    j += AGP_PAGE_SIZE) {
525 			AGP_DPF("binding offset %#lx to pa %#lx\n",
526 			    (unsigned long)(offset + done + j),
527 			    (unsigned long)seg->ds_addr + j);
528 			sc->sc_methods->bind_page(sc->sc_chipc,
529 			    apaddr + done + j, seg->ds_addr + j, 0);
530 		}
531 		done += seg->ds_len;
532 	}
533 
534 	/*
535 	 * Flush the cpu cache since we are providing a new mapping
536 	 * for these pages.
537 	 */
538 	agp_flush_cache();
539 
540 	/*
541 	 * Make sure the chipset gets the new mappings.
542 	 */
543 	sc->sc_methods->flush_tlb(sc->sc_chipc);
544 
545 	mem->am_offset = offset;
546 	mem->am_is_bound = 1;
547 
548 	rw_exit_write(&sc->sc_lock);
549 
550 	return (0);
551 }
552 
553 int
554 agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem)
555 {
556 	bus_addr_t	apaddr = sc->sc_apaddr + mem->am_offset;
557 	bus_size_t	i;
558 
559 	rw_enter_write(&sc->sc_lock);
560 
561 	if (mem->am_is_bound == 0) {
562 		printf("AGP: memory is not bound\n");
563 		rw_exit_write(&sc->sc_lock);
564 		return (EINVAL);
565 	}
566 
567 
568 	/*
569 	 * Unbind the individual pages and flush the chipset's
570 	 * TLB. Unwire the pages so they can be swapped.
571 	 */
572 	for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
573 		sc->sc_methods->unbind_page(sc->sc_chipc, apaddr + i);
574 
575 	agp_flush_cache();
576 	sc->sc_methods->flush_tlb(sc->sc_chipc);
577 
578 	bus_dmamap_unload(sc->sc_dmat, mem->am_dmamap);
579 	bus_dmamem_free(sc->sc_dmat, mem->am_dmaseg, mem->am_nseg);
580 
581 	free(mem->am_dmaseg, M_AGP);
582 
583 	mem->am_offset = 0;
584 	mem->am_is_bound = 0;
585 
586 	rw_exit_write(&sc->sc_lock);
587 
588 	return (0);
589 }
590 
591 /*
592  * Allocates a single-segment block of zeroed, wired dma memory.
593  */
594 int
595 agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t *mapp,
596     bus_addr_t *baddr, bus_dma_segment_t *seg)
597 {
598 	int error, level = 0, nseg;
599 
600 	if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0,
601 	    seg, 1, &nseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) != 0)
602 		goto out;
603 	level++;
604 
605 	if ((error = bus_dmamap_create(tag, size, nseg, size, 0,
606 	    BUS_DMA_NOWAIT, mapp)) != 0)
607 		goto out;
608 	level++;
609 
610 	if ((error = bus_dmamap_load_raw(tag, *mapp, seg, nseg, size,
611 	    BUS_DMA_NOWAIT)) != 0)
612 		goto out;
613 
614 	*baddr = (*mapp)->dm_segs[0].ds_addr;
615 
616 	return (0);
617 out:
618 	switch (level) {
619 	case 2:
620 		bus_dmamap_destroy(tag, *mapp);
621 		/* FALLTHROUGH */
622 	case 1:
623 		bus_dmamem_free(tag, seg, nseg);
624 		break;
625 	default:
626 		break;
627 	}
628 
629 	return (error);
630 }
631 
632 void
633 agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map,
634     bus_dma_segment_t *seg)
635 {
636 	bus_dmamap_unload(tag, map);
637 	bus_dmamap_destroy(tag, map);
638 	bus_dmamem_free(tag, seg, 1);
639 }
640 
641 /* Helper functions used in both user and kernel APIs */
642 
643 int
644 agp_acquire_helper(void *dev, enum agp_acquire_state state)
645 {
646 	struct agp_softc *sc = (struct agp_softc *)dev;
647 
648 	if (sc->sc_chipc == NULL)
649 		return (EINVAL);
650 
651 	if (sc->sc_state != AGP_ACQUIRE_FREE)
652 		return (EBUSY);
653 	sc->sc_state = state;
654 
655 	return (0);
656 }
657 
658 int
659 agp_release_helper(void *dev, enum agp_acquire_state state)
660 {
661 	struct agp_softc *sc = (struct agp_softc *)dev;
662 	struct agp_memory* mem;
663 
664 	if (sc->sc_state == AGP_ACQUIRE_FREE)
665 		return (0);
666 
667 	if (sc->sc_state != state)
668 		return (EBUSY);
669 
670 	/*
671 	 * Clear out the aperture and free any
672 	 * outstanding memory blocks.
673 	 */
674 	TAILQ_FOREACH(mem, &sc->sc_memory, am_link) {
675 		if (mem->am_is_bound) {
676 			printf("agp_release_helper: mem %d is bound\n",
677 			    mem->am_id);
678 			agp_unbind_memory(sc, mem);
679 		}
680 	}
681 	sc->sc_state = AGP_ACQUIRE_FREE;
682 	return (0);
683 }
684 
685 /* Implementation of the userland ioctl API */
686 
687 int
688 agp_info_user(void *dev, agp_info *info)
689 {
690 	struct agp_softc *sc = (struct agp_softc *) dev;
691 
692 	if (!sc->sc_chipc)
693 		return (ENXIO);
694 
695 	bzero(info, sizeof *info);
696 	info->bridge_id = sc->sc_id;
697 	if (sc->sc_capoff != 0)
698 		info->agp_mode = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
699 		    AGP_STATUS + sc->sc_capoff);
700 	else
701 		info->agp_mode = 0; /* i810 doesn't have real AGP */
702 	info->aper_base = sc->sc_apaddr;
703 	info->aper_size = sc->sc_apsize >> 20;
704 	info->pg_total =
705 	info->pg_system = sc->sc_maxmem >> AGP_PAGE_SHIFT;
706 	info->pg_used = sc->sc_allocated >> AGP_PAGE_SHIFT;
707 
708 	return (0);
709 }
710 
711 int
712 agp_setup_user(void *dev, agp_setup *setup)
713 {
714 	struct agp_softc	*sc = dev;
715 
716 	return (agp_enable(sc, setup->agp_mode));
717 }
718 
719 int
720 agp_allocate_user(void *dev, agp_allocate *alloc)
721 {
722 	struct agp_softc	*sc = dev;
723 	struct agp_memory	*mem;
724 	size_t			 size = alloc->pg_count << AGP_PAGE_SHIFT;
725 
726 	if (sc->sc_allocated + size > sc->sc_maxmem)
727 		return (EINVAL);
728 
729 	mem = agp_alloc_memory(sc, alloc->type, size);
730 	if (mem) {
731 		alloc->key = mem->am_id;
732 		alloc->physical = mem->am_physical;
733 		return (0);
734 	} else
735 		return (ENOMEM);
736 }
737 
738 int
739 agp_deallocate_user(void *dev, int id)
740 {
741 	struct agp_softc	*sc = dev;
742 	struct agp_memory	*mem;
743 
744 	if ((mem = agp_find_memory(sc, id)) != NULL) {
745 		agp_free_memory(sc, mem);
746 		return (0);
747 	} else
748 		return (ENOENT);
749 }
750 
751 int
752 agp_bind_user(void *dev, agp_bind *bind)
753 {
754 	struct agp_softc	*sc = dev;
755 	struct agp_memory	*mem;
756 
757 	if ((mem = agp_find_memory(sc, bind->key)) == NULL)
758 		return (ENOENT);
759 	return (agp_bind_memory(sc, mem, bind->pg_start << AGP_PAGE_SHIFT));
760 }
761 
762 
763 int
764 agp_unbind_user(void *dev, agp_unbind *unbind)
765 {
766 	struct agp_softc	*sc = dev;
767 	struct agp_memory	*mem;
768 
769 	if ((mem = agp_find_memory(sc, unbind->key)) == NULL)
770 		return (ENOENT);
771 
772 	return (agp_unbind_memory(sc, mem));
773 }
774 
775 /* Implementation of the kernel api */
776 
777 void *
778 agp_find_device(int unit)
779 {
780 	if (unit >= agp_cd.cd_ndevs || unit < 0)
781 		return (NULL);
782 	return (agp_cd.cd_devs[unit]);
783 }
784 
785 enum agp_acquire_state
786 agp_state(void *dev)
787 {
788 	struct agp_softc *sc = (struct agp_softc *) dev;
789         return (sc->sc_state);
790 }
791 
792 void
793 agp_get_info(void *dev, struct agp_info *info)
794 {
795 	struct agp_softc *sc = (struct agp_softc *)dev;
796 
797 	if (sc->sc_capoff != 0)
798 		info->ai_mode = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
799 		    AGP_STATUS + sc->sc_capoff);
800 	else
801 		info->ai_mode = 0; /* i810 doesn't have real AGP */
802 	info->ai_aperture_base = sc->sc_apaddr;
803 	info->ai_aperture_size = sc->sc_apsize;
804         info->ai_memory_allowed = sc->sc_maxmem;
805         info->ai_memory_used = sc->sc_allocated;
806 }
807 
808 int
809 agp_acquire(void *dev)
810 {
811 	struct agp_softc *sc = (struct agp_softc *)dev;
812 
813         return (agp_acquire_helper(sc, AGP_ACQUIRE_KERNEL));
814 }
815 
816 int
817 agp_release(void *dev)
818 {
819 	struct agp_softc *sc = (struct agp_softc *)dev;
820 
821         return (agp_release_helper(sc, AGP_ACQUIRE_KERNEL));
822 }
823 
824 int
825 agp_enable(void *dev, u_int32_t mode)
826 {
827 	struct agp_softc	*sc = dev;
828 	int			 ret;
829 
830 	if (sc->sc_methods->enable != NULL) {
831 		ret = sc->sc_methods->enable(sc->sc_chipc, mode);
832 	} else {
833 		ret = agp_generic_enable(sc, mode);
834 	}
835 	return (ret);
836 }
837 
838 void *
839 agp_alloc_memory(void *dev, int type, vsize_t bytes)
840 {
841 	struct agp_softc	*sc = dev;
842 	struct agp_memory	*mem;
843 
844 	if (sc->sc_methods->alloc_memory != NULL) {
845 		mem = sc->sc_methods->alloc_memory(sc->sc_chipc, type, bytes);
846 	} else {
847 		mem = agp_generic_alloc_memory(sc, type, bytes);
848 	}
849         return  (mem);
850 }
851 
852 void
853 agp_free_memory(void *dev, void *handle)
854 {
855 	struct agp_softc *sc = dev;
856         struct agp_memory *mem = handle;
857 
858 	if (sc->sc_methods->free_memory != NULL) {
859 		sc->sc_methods->free_memory(sc->sc_chipc, mem);
860 	} else {
861 		agp_generic_free_memory(sc, mem);
862 	}
863 }
864 
865 int
866 agp_bind_memory(void *dev, void *handle, off_t offset)
867 {
868 	struct agp_softc	*sc = dev;
869 	struct agp_memory	*mem = handle;
870 	int			 ret;
871 
872 	if (sc->sc_methods->bind_memory != NULL) {
873 		ret = sc->sc_methods->bind_memory(sc->sc_chipc, mem, offset);
874 	} else {
875 		ret = agp_generic_bind_memory(sc, mem, offset);
876 	}
877 	return (ret);
878 }
879 
880 int
881 agp_unbind_memory(void *dev, void *handle)
882 {
883 	struct agp_softc	*sc = dev;
884         struct agp_memory	*mem = handle;
885 	int			 ret;
886 
887 	if (sc->sc_methods->unbind_memory != NULL) {
888 		ret = sc->sc_methods->unbind_memory(sc->sc_chipc, mem);
889 	} else {
890 		ret = agp_generic_unbind_memory(sc, mem);
891 	}
892 	return (ret);
893 }
894 
895 void
896 agp_memory_info(void *dev, void *handle, struct agp_memory_info *mi)
897 {
898         struct agp_memory *mem = (struct agp_memory *) handle;
899 
900         mi->ami_size = mem->am_size;
901         mi->ami_physical = mem->am_physical;
902         mi->ami_offset = mem->am_offset;
903         mi->ami_is_bound = mem->am_is_bound;
904 }
905