1 /* $NetBSD: agp.c,v 1.33 2004/08/30 15:05:19 drochner Exp $ */ 2 3 /*- 4 * Copyright (c) 2000 Doug Rabson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $ 29 */ 30 31 /* 32 * Copyright (c) 2001 Wasabi Systems, Inc. 33 * All rights reserved. 34 * 35 * Written by Frank van der Linden for Wasabi Systems, Inc. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed for the NetBSD Project by 48 * Wasabi Systems, Inc. 49 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 50 * or promote products derived from this software without specific prior 51 * written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 67 #include <sys/cdefs.h> 68 __KERNEL_RCSID(0, "$NetBSD: agp.c,v 1.33 2004/08/30 15:05:19 drochner Exp $"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/malloc.h> 73 #include <sys/kernel.h> 74 #include <sys/device.h> 75 #include <sys/conf.h> 76 #include <sys/ioctl.h> 77 #include <sys/fcntl.h> 78 #include <sys/agpio.h> 79 #include <sys/proc.h> 80 81 #include <uvm/uvm_extern.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 #include <dev/pci/agpvar.h> 86 #include <dev/pci/agpreg.h> 87 #include <dev/pci/pcidevs.h> 88 89 #include <machine/bus.h> 90 91 MALLOC_DEFINE(M_AGP, "AGP", "AGP memory"); 92 93 /* Helper functions for implementing chipset mini drivers. */ 94 /* XXXfvdl get rid of this one. */ 95 96 extern struct cfdriver agp_cd; 97 98 dev_type_open(agpopen); 99 dev_type_close(agpclose); 100 dev_type_ioctl(agpioctl); 101 dev_type_mmap(agpmmap); 102 103 const struct cdevsw agp_cdevsw = { 104 agpopen, agpclose, noread, nowrite, agpioctl, 105 nostop, notty, nopoll, agpmmap, nokqfilter, 106 }; 107 108 int agpmatch(struct device *, struct cfdata *, void *); 109 void agpattach(struct device *, struct device *, void *); 110 111 CFATTACH_DECL(agp, sizeof(struct agp_softc), 112 agpmatch, agpattach, NULL, NULL); 113 114 static int agp_info_user(struct agp_softc *, agp_info *); 115 static int agp_setup_user(struct agp_softc *, agp_setup *); 116 static int agp_allocate_user(struct agp_softc *, agp_allocate *); 117 static int agp_deallocate_user(struct agp_softc *, int); 118 static int agp_bind_user(struct agp_softc *, agp_bind *); 119 static int agp_unbind_user(struct agp_softc *, agp_unbind *); 120 static int agpdev_match(struct pci_attach_args *); 121 122 #include "agp_ali.h" 123 #include "agp_amd.h" 124 #include "agp_i810.h" 125 #include "agp_intel.h" 126 #include "agp_sis.h" 127 #include "agp_via.h" 128 129 const struct agp_product { 130 uint32_t ap_vendor; 131 uint32_t ap_product; 132 int (*ap_match)(const struct pci_attach_args *); 133 int (*ap_attach)(struct device *, struct device *, void *); 134 } agp_products[] = { 135 #if NAGP_ALI > 0 136 { PCI_VENDOR_ALI, -1, 137 NULL, agp_ali_attach }, 138 #endif 139 140 #if NAGP_AMD > 0 141 { PCI_VENDOR_AMD, -1, 142 agp_amd_match, agp_amd_attach }, 143 #endif 144 145 #if NAGP_I810 > 0 146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH, 147 NULL, agp_i810_attach }, 148 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_DC100_MCH, 149 NULL, agp_i810_attach }, 150 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH, 151 NULL, agp_i810_attach }, 152 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB, 153 NULL, agp_i810_attach }, 154 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB, 155 NULL, agp_i810_attach }, 156 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1, 157 NULL, agp_i810_attach }, 158 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82845G_DRAM, 159 NULL, agp_i810_attach }, 160 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82855GM_MCH, 161 NULL, agp_i810_attach }, 162 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82865_HB, 163 NULL, agp_i810_attach }, 164 #endif 165 166 #if NAGP_INTEL > 0 167 { PCI_VENDOR_INTEL, -1, 168 NULL, agp_intel_attach }, 169 #endif 170 171 #if NAGP_SIS > 0 172 { PCI_VENDOR_SIS, -1, 173 NULL, agp_sis_attach }, 174 #endif 175 176 #if NAGP_VIA > 0 177 { PCI_VENDOR_VIATECH, -1, 178 NULL, agp_via_attach }, 179 #endif 180 181 { 0, 0, 182 NULL, NULL }, 183 }; 184 185 static const struct agp_product * 186 agp_lookup(const struct pci_attach_args *pa) 187 { 188 const struct agp_product *ap; 189 190 /* First find the vendor. */ 191 for (ap = agp_products; ap->ap_attach != NULL; ap++) { 192 if (PCI_VENDOR(pa->pa_id) == ap->ap_vendor) 193 break; 194 } 195 196 if (ap->ap_attach == NULL) 197 return (NULL); 198 199 /* Now find the product within the vendor's domain. */ 200 for (; ap->ap_attach != NULL; ap++) { 201 if (PCI_VENDOR(pa->pa_id) != ap->ap_vendor) { 202 /* Ran out of this vendor's section of the table. */ 203 return (NULL); 204 } 205 if (ap->ap_product == PCI_PRODUCT(pa->pa_id)) { 206 /* Exact match. */ 207 break; 208 } 209 if (ap->ap_product == (uint32_t) -1) { 210 /* Wildcard match. */ 211 break; 212 } 213 } 214 215 if (ap->ap_attach == NULL) 216 return (NULL); 217 218 /* Now let the product-specific driver filter the match. */ 219 if (ap->ap_match != NULL && (*ap->ap_match)(pa) == 0) 220 return (NULL); 221 222 return (ap); 223 } 224 225 int 226 agpmatch(struct device *parent, struct cfdata *match, void *aux) 227 { 228 struct agpbus_attach_args *apa = aux; 229 struct pci_attach_args *pa = &apa->apa_pci_args; 230 231 if (agp_lookup(pa) == NULL) 232 return (0); 233 234 return (1); 235 } 236 237 static int agp_max[][2] = { 238 {0, 0}, 239 {32, 4}, 240 {64, 28}, 241 {128, 96}, 242 {256, 204}, 243 {512, 440}, 244 {1024, 942}, 245 {2048, 1920}, 246 {4096, 3932} 247 }; 248 #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0])) 249 250 void 251 agpattach(struct device *parent, struct device *self, void *aux) 252 { 253 struct agpbus_attach_args *apa = aux; 254 struct pci_attach_args *pa = &apa->apa_pci_args; 255 struct agp_softc *sc = (void *)self; 256 const struct agp_product *ap; 257 int memsize, i, ret; 258 259 ap = agp_lookup(pa); 260 if (ap == NULL) { 261 printf("\n"); 262 panic("agpattach: impossible"); 263 } 264 265 aprint_naive(": AGP controller\n"); 266 267 sc->as_dmat = pa->pa_dmat; 268 sc->as_pc = pa->pa_pc; 269 sc->as_tag = pa->pa_tag; 270 sc->as_id = pa->pa_id; 271 272 /* 273 * Work out an upper bound for agp memory allocation. This 274 * uses a heurisitc table from the Linux driver. 275 */ 276 memsize = ptoa(physmem) >> 20; 277 for (i = 0; i < agp_max_size; i++) { 278 if (memsize <= agp_max[i][0]) 279 break; 280 } 281 if (i == agp_max_size) 282 i = agp_max_size - 1; 283 sc->as_maxmem = agp_max[i][1] << 20U; 284 285 /* 286 * The lock is used to prevent re-entry to 287 * agp_generic_bind_memory() since that function can sleep. 288 */ 289 lockinit(&sc->as_lock, PZERO|PCATCH, "agplk", 0, 0); 290 291 TAILQ_INIT(&sc->as_memory); 292 293 ret = (*ap->ap_attach)(parent, self, pa); 294 if (ret == 0) 295 aprint_normal(": aperture at 0x%lx, size 0x%lx\n", 296 (unsigned long)sc->as_apaddr, 297 (unsigned long)AGP_GET_APERTURE(sc)); 298 else 299 sc->as_chipc = NULL; 300 } 301 302 int 303 agp_map_aperture(struct pci_attach_args *pa, struct agp_softc *sc) 304 { 305 /* 306 * Find the aperture. Don't map it (yet), this would 307 * eat KVA. 308 */ 309 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, AGP_APBASE, 310 PCI_MAPREG_TYPE_MEM, &sc->as_apaddr, &sc->as_apsize, 311 &sc->as_apflags) != 0) 312 return ENXIO; 313 314 sc->as_apt = pa->pa_memt; 315 316 return 0; 317 } 318 319 struct agp_gatt * 320 agp_alloc_gatt(struct agp_softc *sc) 321 { 322 u_int32_t apsize = AGP_GET_APERTURE(sc); 323 u_int32_t entries = apsize >> AGP_PAGE_SHIFT; 324 struct agp_gatt *gatt; 325 int dummyseg; 326 327 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT); 328 if (!gatt) 329 return NULL; 330 gatt->ag_entries = entries; 331 332 if (agp_alloc_dmamem(sc->as_dmat, entries * sizeof(u_int32_t), 333 0, &gatt->ag_dmamap, (caddr_t *)&gatt->ag_virtual, 334 &gatt->ag_physical, &gatt->ag_dmaseg, 1, &dummyseg) != 0) 335 return NULL; 336 337 gatt->ag_size = entries * sizeof(u_int32_t); 338 memset(gatt->ag_virtual, 0, gatt->ag_size); 339 agp_flush_cache(); 340 341 return gatt; 342 } 343 344 void 345 agp_free_gatt(struct agp_softc *sc, struct agp_gatt *gatt) 346 { 347 agp_free_dmamem(sc->as_dmat, gatt->ag_size, gatt->ag_dmamap, 348 (caddr_t)gatt->ag_virtual, &gatt->ag_dmaseg, 1); 349 free(gatt, M_AGP); 350 } 351 352 353 int 354 agp_generic_detach(struct agp_softc *sc) 355 { 356 lockmgr(&sc->as_lock, LK_DRAIN, 0); 357 agp_flush_cache(); 358 return 0; 359 } 360 361 static int 362 agpdev_match(struct pci_attach_args *pa) 363 { 364 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY && 365 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA) 366 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP, 367 NULL, NULL)) 368 return 1; 369 370 return 0; 371 } 372 373 int 374 agp_generic_enable(struct agp_softc *sc, u_int32_t mode) 375 { 376 struct pci_attach_args pa; 377 pcireg_t tstatus, mstatus; 378 pcireg_t command; 379 int rq, sba, fw, rate, capoff; 380 381 if (pci_find_device(&pa, agpdev_match) == 0 || 382 pci_get_capability(pa.pa_pc, pa.pa_tag, PCI_CAP_AGP, 383 &capoff, NULL) == 0) { 384 printf("%s: can't find display\n", sc->as_dev.dv_xname); 385 return ENXIO; 386 } 387 388 tstatus = pci_conf_read(sc->as_pc, sc->as_tag, 389 sc->as_capoff + AGP_STATUS); 390 mstatus = pci_conf_read(pa.pa_pc, pa.pa_tag, 391 capoff + AGP_STATUS); 392 393 /* Set RQ to the min of mode, tstatus and mstatus */ 394 rq = AGP_MODE_GET_RQ(mode); 395 if (AGP_MODE_GET_RQ(tstatus) < rq) 396 rq = AGP_MODE_GET_RQ(tstatus); 397 if (AGP_MODE_GET_RQ(mstatus) < rq) 398 rq = AGP_MODE_GET_RQ(mstatus); 399 400 /* Set SBA if all three can deal with SBA */ 401 sba = (AGP_MODE_GET_SBA(tstatus) 402 & AGP_MODE_GET_SBA(mstatus) 403 & AGP_MODE_GET_SBA(mode)); 404 405 /* Similar for FW */ 406 fw = (AGP_MODE_GET_FW(tstatus) 407 & AGP_MODE_GET_FW(mstatus) 408 & AGP_MODE_GET_FW(mode)); 409 410 /* Figure out the max rate */ 411 rate = (AGP_MODE_GET_RATE(tstatus) 412 & AGP_MODE_GET_RATE(mstatus) 413 & AGP_MODE_GET_RATE(mode)); 414 if (rate & AGP_MODE_RATE_4x) 415 rate = AGP_MODE_RATE_4x; 416 else if (rate & AGP_MODE_RATE_2x) 417 rate = AGP_MODE_RATE_2x; 418 else 419 rate = AGP_MODE_RATE_1x; 420 421 /* Construct the new mode word and tell the hardware */ 422 command = AGP_MODE_SET_RQ(0, rq); 423 command = AGP_MODE_SET_SBA(command, sba); 424 command = AGP_MODE_SET_FW(command, fw); 425 command = AGP_MODE_SET_RATE(command, rate); 426 command = AGP_MODE_SET_AGP(command, 1); 427 pci_conf_write(sc->as_pc, sc->as_tag, 428 sc->as_capoff + AGP_COMMAND, command); 429 pci_conf_write(pa.pa_pc, pa.pa_tag, capoff + AGP_COMMAND, command); 430 431 return 0; 432 } 433 434 struct agp_memory * 435 agp_generic_alloc_memory(struct agp_softc *sc, int type, vsize_t size) 436 { 437 struct agp_memory *mem; 438 439 if ((size & (AGP_PAGE_SIZE - 1)) != 0) 440 return 0; 441 442 if (sc->as_allocated + size > sc->as_maxmem) 443 return 0; 444 445 if (type != 0) { 446 printf("agp_generic_alloc_memory: unsupported type %d\n", 447 type); 448 return 0; 449 } 450 451 mem = malloc(sizeof *mem, M_AGP, M_WAITOK); 452 if (mem == NULL) 453 return NULL; 454 455 if (bus_dmamap_create(sc->as_dmat, size, size / PAGE_SIZE + 1, 456 size, 0, BUS_DMA_NOWAIT, &mem->am_dmamap) != 0) { 457 free(mem, M_AGP); 458 return NULL; 459 } 460 461 mem->am_id = sc->as_nextid++; 462 mem->am_size = size; 463 mem->am_type = 0; 464 mem->am_physical = 0; 465 mem->am_offset = 0; 466 mem->am_is_bound = 0; 467 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link); 468 sc->as_allocated += size; 469 470 return mem; 471 } 472 473 int 474 agp_generic_free_memory(struct agp_softc *sc, struct agp_memory *mem) 475 { 476 if (mem->am_is_bound) 477 return EBUSY; 478 479 sc->as_allocated -= mem->am_size; 480 TAILQ_REMOVE(&sc->as_memory, mem, am_link); 481 bus_dmamap_destroy(sc->as_dmat, mem->am_dmamap); 482 free(mem, M_AGP); 483 return 0; 484 } 485 486 int 487 agp_generic_bind_memory(struct agp_softc *sc, struct agp_memory *mem, 488 off_t offset) 489 { 490 off_t i, k; 491 bus_size_t done, j; 492 int error; 493 bus_dma_segment_t *segs, *seg; 494 bus_addr_t pa; 495 int contigpages, nseg; 496 497 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0); 498 499 if (mem->am_is_bound) { 500 printf("%s: memory already bound\n", sc->as_dev.dv_xname); 501 lockmgr(&sc->as_lock, LK_RELEASE, 0); 502 return EINVAL; 503 } 504 505 if (offset < 0 506 || (offset & (AGP_PAGE_SIZE - 1)) != 0 507 || offset + mem->am_size > AGP_GET_APERTURE(sc)) { 508 printf("%s: binding memory at bad offset %#lx\n", 509 sc->as_dev.dv_xname, (unsigned long) offset); 510 lockmgr(&sc->as_lock, LK_RELEASE, 0); 511 return EINVAL; 512 } 513 514 /* 515 * XXXfvdl 516 * The memory here needs to be directly accessable from the 517 * AGP video card, so it should be allocated using bus_dma. 518 * However, it need not be contiguous, since individual pages 519 * are translated using the GATT. 520 * 521 * Using a large chunk of contiguous memory may get in the way 522 * of other subsystems that may need one, so we try to be friendly 523 * and ask for allocation in chunks of a minimum of 8 pages 524 * of contiguous memory on average, falling back to 4, 2 and 1 525 * if really needed. Larger chunks are preferred, since allocating 526 * a bus_dma_segment per page would be overkill. 527 */ 528 529 for (contigpages = 8; contigpages > 0; contigpages >>= 1) { 530 nseg = (mem->am_size / (contigpages * PAGE_SIZE)) + 1; 531 segs = malloc(nseg * sizeof *segs, M_AGP, M_WAITOK); 532 if (segs == NULL) { 533 lockmgr(&sc->as_lock, LK_RELEASE, 0); 534 return ENOMEM; 535 } 536 if (bus_dmamem_alloc(sc->as_dmat, mem->am_size, PAGE_SIZE, 0, 537 segs, nseg, &mem->am_nseg, 538 contigpages > 1 ? 539 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) != 0) { 540 free(segs, M_AGP); 541 continue; 542 } 543 if (bus_dmamem_map(sc->as_dmat, segs, mem->am_nseg, 544 mem->am_size, &mem->am_virtual, BUS_DMA_WAITOK) != 0) { 545 bus_dmamem_free(sc->as_dmat, segs, mem->am_nseg); 546 free(segs, M_AGP); 547 continue; 548 } 549 if (bus_dmamap_load(sc->as_dmat, mem->am_dmamap, 550 mem->am_virtual, mem->am_size, NULL, BUS_DMA_WAITOK) != 0) { 551 bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, 552 mem->am_size); 553 bus_dmamem_free(sc->as_dmat, segs, mem->am_nseg); 554 free(segs, M_AGP); 555 continue; 556 } 557 mem->am_dmaseg = segs; 558 break; 559 } 560 561 if (contigpages == 0) { 562 lockmgr(&sc->as_lock, LK_RELEASE, 0); 563 return ENOMEM; 564 } 565 566 567 /* 568 * Bind the individual pages and flush the chipset's 569 * TLB. 570 */ 571 done = 0; 572 for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) { 573 seg = &mem->am_dmamap->dm_segs[i]; 574 /* 575 * Install entries in the GATT, making sure that if 576 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not 577 * aligned to PAGE_SIZE, we don't modify too many GATT 578 * entries. 579 */ 580 for (j = 0; j < seg->ds_len && (done + j) < mem->am_size; 581 j += AGP_PAGE_SIZE) { 582 pa = seg->ds_addr + j; 583 AGP_DPF("binding offset %#lx to pa %#lx\n", 584 (unsigned long)(offset + done + j), 585 (unsigned long)pa); 586 error = AGP_BIND_PAGE(sc, offset + done + j, pa); 587 if (error) { 588 /* 589 * Bail out. Reverse all the mappings 590 * and unwire the pages. 591 */ 592 for (k = 0; k < done + j; k += AGP_PAGE_SIZE) 593 AGP_UNBIND_PAGE(sc, offset + k); 594 595 bus_dmamap_unload(sc->as_dmat, mem->am_dmamap); 596 bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, 597 mem->am_size); 598 bus_dmamem_free(sc->as_dmat, mem->am_dmaseg, 599 mem->am_nseg); 600 free(mem->am_dmaseg, M_AGP); 601 lockmgr(&sc->as_lock, LK_RELEASE, 0); 602 return error; 603 } 604 } 605 done += seg->ds_len; 606 } 607 608 /* 609 * Flush the CPU cache since we are providing a new mapping 610 * for these pages. 611 */ 612 agp_flush_cache(); 613 614 /* 615 * Make sure the chipset gets the new mappings. 616 */ 617 AGP_FLUSH_TLB(sc); 618 619 mem->am_offset = offset; 620 mem->am_is_bound = 1; 621 622 lockmgr(&sc->as_lock, LK_RELEASE, 0); 623 624 return 0; 625 } 626 627 int 628 agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem) 629 { 630 int i; 631 632 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0); 633 634 if (!mem->am_is_bound) { 635 printf("%s: memory is not bound\n", sc->as_dev.dv_xname); 636 lockmgr(&sc->as_lock, LK_RELEASE, 0); 637 return EINVAL; 638 } 639 640 641 /* 642 * Unbind the individual pages and flush the chipset's 643 * TLB. Unwire the pages so they can be swapped. 644 */ 645 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) 646 AGP_UNBIND_PAGE(sc, mem->am_offset + i); 647 648 agp_flush_cache(); 649 AGP_FLUSH_TLB(sc); 650 651 bus_dmamap_unload(sc->as_dmat, mem->am_dmamap); 652 bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, mem->am_size); 653 bus_dmamem_free(sc->as_dmat, mem->am_dmaseg, mem->am_nseg); 654 655 free(mem->am_dmaseg, M_AGP); 656 657 mem->am_offset = 0; 658 mem->am_is_bound = 0; 659 660 lockmgr(&sc->as_lock, LK_RELEASE, 0); 661 662 return 0; 663 } 664 665 /* Helper functions for implementing user/kernel api */ 666 667 static int 668 agp_acquire_helper(struct agp_softc *sc, enum agp_acquire_state state) 669 { 670 if (sc->as_state != AGP_ACQUIRE_FREE) 671 return EBUSY; 672 sc->as_state = state; 673 674 return 0; 675 } 676 677 static int 678 agp_release_helper(struct agp_softc *sc, enum agp_acquire_state state) 679 { 680 struct agp_memory *mem; 681 682 if (sc->as_state == AGP_ACQUIRE_FREE) 683 return 0; 684 685 if (sc->as_state != state) 686 return EBUSY; 687 688 /* 689 * Clear out outstanding aperture mappings. 690 * (should not be necessary, done by caller) 691 */ 692 TAILQ_FOREACH(mem, &sc->as_memory, am_link) { 693 if (mem->am_is_bound) { 694 printf("agp_release_helper: mem %d is bound\n", 695 mem->am_id); 696 AGP_UNBIND_MEMORY(sc, mem); 697 } 698 } 699 700 sc->as_state = AGP_ACQUIRE_FREE; 701 return 0; 702 } 703 704 static struct agp_memory * 705 agp_find_memory(struct agp_softc *sc, int id) 706 { 707 struct agp_memory *mem; 708 709 AGP_DPF("searching for memory block %d\n", id); 710 TAILQ_FOREACH(mem, &sc->as_memory, am_link) { 711 AGP_DPF("considering memory block %d\n", mem->am_id); 712 if (mem->am_id == id) 713 return mem; 714 } 715 return 0; 716 } 717 718 /* Implementation of the userland ioctl api */ 719 720 static int 721 agp_info_user(struct agp_softc *sc, agp_info *info) 722 { 723 memset(info, 0, sizeof *info); 724 info->bridge_id = sc->as_id; 725 if (sc->as_capoff != 0) 726 info->agp_mode = pci_conf_read(sc->as_pc, sc->as_tag, 727 sc->as_capoff + AGP_STATUS); 728 else 729 info->agp_mode = 0; /* i810 doesn't have real AGP */ 730 info->aper_base = sc->as_apaddr; 731 info->aper_size = AGP_GET_APERTURE(sc) >> 20; 732 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT; 733 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT; 734 735 return 0; 736 } 737 738 static int 739 agp_setup_user(struct agp_softc *sc, agp_setup *setup) 740 { 741 return AGP_ENABLE(sc, setup->agp_mode); 742 } 743 744 static int 745 agp_allocate_user(struct agp_softc *sc, agp_allocate *alloc) 746 { 747 struct agp_memory *mem; 748 749 mem = AGP_ALLOC_MEMORY(sc, 750 alloc->type, 751 alloc->pg_count << AGP_PAGE_SHIFT); 752 if (mem) { 753 alloc->key = mem->am_id; 754 alloc->physical = mem->am_physical; 755 return 0; 756 } else { 757 return ENOMEM; 758 } 759 } 760 761 static int 762 agp_deallocate_user(struct agp_softc *sc, int id) 763 { 764 struct agp_memory *mem = agp_find_memory(sc, id); 765 766 if (mem) { 767 AGP_FREE_MEMORY(sc, mem); 768 return 0; 769 } else { 770 return ENOENT; 771 } 772 } 773 774 static int 775 agp_bind_user(struct agp_softc *sc, agp_bind *bind) 776 { 777 struct agp_memory *mem = agp_find_memory(sc, bind->key); 778 779 if (!mem) 780 return ENOENT; 781 782 return AGP_BIND_MEMORY(sc, mem, bind->pg_start << AGP_PAGE_SHIFT); 783 } 784 785 static int 786 agp_unbind_user(struct agp_softc *sc, agp_unbind *unbind) 787 { 788 struct agp_memory *mem = agp_find_memory(sc, unbind->key); 789 790 if (!mem) 791 return ENOENT; 792 793 return AGP_UNBIND_MEMORY(sc, mem); 794 } 795 796 int 797 agpopen(dev_t dev, int oflags, int devtype, struct proc *p) 798 { 799 struct agp_softc *sc = device_lookup(&agp_cd, AGPUNIT(dev)); 800 801 if (sc == NULL) 802 return ENXIO; 803 804 if (sc->as_chipc == NULL) 805 return ENXIO; 806 807 if (!sc->as_isopen) 808 sc->as_isopen = 1; 809 else 810 return EBUSY; 811 812 return 0; 813 } 814 815 int 816 agpclose(dev_t dev, int fflag, int devtype, struct proc *p) 817 { 818 struct agp_softc *sc = device_lookup(&agp_cd, AGPUNIT(dev)); 819 struct agp_memory *mem; 820 821 /* 822 * Clear the GATT and force release on last close 823 */ 824 if (sc->as_state == AGP_ACQUIRE_USER) { 825 while ((mem = TAILQ_FIRST(&sc->as_memory))) { 826 if (mem->am_is_bound) { 827 printf("agpclose: mem %d is bound\n", 828 mem->am_id); 829 AGP_UNBIND_MEMORY(sc, mem); 830 } 831 /* 832 * XXX it is not documented, but if the protocol allows 833 * allocate->acquire->bind, it would be possible that 834 * memory ranges are allocated by the kernel here, 835 * which we shouldn't free. We'd have to keep track of 836 * the memory range's owner. 837 * The kernel API is unsed yet, so we get away with 838 * freeing all. 839 */ 840 AGP_FREE_MEMORY(sc, mem); 841 } 842 agp_release_helper(sc, AGP_ACQUIRE_USER); 843 } 844 sc->as_isopen = 0; 845 846 return 0; 847 } 848 849 int 850 agpioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p) 851 { 852 struct agp_softc *sc = device_lookup(&agp_cd, AGPUNIT(dev)); 853 854 if (sc == NULL) 855 return ENODEV; 856 857 if ((fflag & FWRITE) == 0 && cmd != AGPIOC_INFO) 858 return EPERM; 859 860 switch (cmd) { 861 case AGPIOC_INFO: 862 return agp_info_user(sc, (agp_info *) data); 863 864 case AGPIOC_ACQUIRE: 865 return agp_acquire_helper(sc, AGP_ACQUIRE_USER); 866 867 case AGPIOC_RELEASE: 868 return agp_release_helper(sc, AGP_ACQUIRE_USER); 869 870 case AGPIOC_SETUP: 871 return agp_setup_user(sc, (agp_setup *)data); 872 873 case AGPIOC_ALLOCATE: 874 return agp_allocate_user(sc, (agp_allocate *)data); 875 876 case AGPIOC_DEALLOCATE: 877 return agp_deallocate_user(sc, *(int *) data); 878 879 case AGPIOC_BIND: 880 return agp_bind_user(sc, (agp_bind *)data); 881 882 case AGPIOC_UNBIND: 883 return agp_unbind_user(sc, (agp_unbind *)data); 884 885 } 886 887 return EINVAL; 888 } 889 890 paddr_t 891 agpmmap(dev_t dev, off_t offset, int prot) 892 { 893 struct agp_softc *sc = device_lookup(&agp_cd, AGPUNIT(dev)); 894 895 if (offset > AGP_GET_APERTURE(sc)) 896 return -1; 897 898 return (bus_space_mmap(sc->as_apt, sc->as_apaddr, offset, prot, 899 BUS_SPACE_MAP_LINEAR)); 900 } 901 902 /* Implementation of the kernel api */ 903 904 void * 905 agp_find_device(int unit) 906 { 907 return device_lookup(&agp_cd, unit); 908 } 909 910 enum agp_acquire_state 911 agp_state(void *devcookie) 912 { 913 struct agp_softc *sc = devcookie; 914 return sc->as_state; 915 } 916 917 void 918 agp_get_info(void *devcookie, struct agp_info *info) 919 { 920 struct agp_softc *sc = devcookie; 921 922 info->ai_mode = pci_conf_read(sc->as_pc, sc->as_tag, 923 sc->as_capoff + AGP_STATUS); 924 info->ai_aperture_base = sc->as_apaddr; 925 info->ai_aperture_size = sc->as_apsize; /* XXXfvdl inconsistent */ 926 info->ai_memory_allowed = sc->as_maxmem; 927 info->ai_memory_used = sc->as_allocated; 928 } 929 930 int 931 agp_acquire(void *dev) 932 { 933 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL); 934 } 935 936 int 937 agp_release(void *dev) 938 { 939 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL); 940 } 941 942 int 943 agp_enable(void *dev, u_int32_t mode) 944 { 945 struct agp_softc *sc = dev; 946 947 return AGP_ENABLE(sc, mode); 948 } 949 950 void *agp_alloc_memory(void *dev, int type, vsize_t bytes) 951 { 952 struct agp_softc *sc = dev; 953 954 return (void *)AGP_ALLOC_MEMORY(sc, type, bytes); 955 } 956 957 void agp_free_memory(void *dev, void *handle) 958 { 959 struct agp_softc *sc = dev; 960 struct agp_memory *mem = (struct agp_memory *) handle; 961 AGP_FREE_MEMORY(sc, mem); 962 } 963 964 int agp_bind_memory(void *dev, void *handle, off_t offset) 965 { 966 struct agp_softc *sc = dev; 967 struct agp_memory *mem = (struct agp_memory *) handle; 968 969 return AGP_BIND_MEMORY(sc, mem, offset); 970 } 971 972 int agp_unbind_memory(void *dev, void *handle) 973 { 974 struct agp_softc *sc = dev; 975 struct agp_memory *mem = (struct agp_memory *) handle; 976 977 return AGP_UNBIND_MEMORY(sc, mem); 978 } 979 980 void agp_memory_info(void *dev, void *handle, struct agp_memory_info *mi) 981 { 982 struct agp_memory *mem = (struct agp_memory *) handle; 983 984 mi->ami_size = mem->am_size; 985 mi->ami_physical = mem->am_physical; 986 mi->ami_offset = mem->am_offset; 987 mi->ami_is_bound = mem->am_is_bound; 988 } 989 990 int 991 agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, int flags, 992 bus_dmamap_t *mapp, caddr_t *vaddr, bus_addr_t *baddr, 993 bus_dma_segment_t *seg, int nseg, int *rseg) 994 995 { 996 int error, level = 0; 997 998 if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0, 999 seg, nseg, rseg, BUS_DMA_NOWAIT)) != 0) 1000 goto out; 1001 level++; 1002 1003 if ((error = bus_dmamem_map(tag, seg, *rseg, size, vaddr, 1004 BUS_DMA_NOWAIT | flags)) != 0) 1005 goto out; 1006 level++; 1007 1008 if ((error = bus_dmamap_create(tag, size, *rseg, size, 0, 1009 BUS_DMA_NOWAIT, mapp)) != 0) 1010 goto out; 1011 level++; 1012 1013 if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL, 1014 BUS_DMA_NOWAIT)) != 0) 1015 goto out; 1016 1017 *baddr = (*mapp)->dm_segs[0].ds_addr; 1018 1019 return 0; 1020 out: 1021 switch (level) { 1022 case 3: 1023 bus_dmamap_destroy(tag, *mapp); 1024 /* FALLTHROUGH */ 1025 case 2: 1026 bus_dmamem_unmap(tag, *vaddr, size); 1027 /* FALLTHROUGH */ 1028 case 1: 1029 bus_dmamem_free(tag, seg, *rseg); 1030 break; 1031 default: 1032 break; 1033 } 1034 1035 return error; 1036 } 1037 1038 void 1039 agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map, 1040 caddr_t vaddr, bus_dma_segment_t *seg, int nseg) 1041 { 1042 1043 bus_dmamap_unload(tag, map); 1044 bus_dmamap_destroy(tag, map); 1045 bus_dmamem_unmap(tag, vaddr, size); 1046 bus_dmamem_free(tag, seg, nseg); 1047 } 1048