1 /* $NetBSD: agp.c,v 1.13 2002/01/14 01:38:25 augustss Exp $ */ 2 3 /*- 4 * Copyright (c) 2000 Doug Rabson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $ 29 */ 30 31 /* 32 * Copyright (c) 2001 Wasabi Systems, Inc. 33 * All rights reserved. 34 * 35 * Written by Frank van der Linden for Wasabi Systems, Inc. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed for the NetBSD Project by 48 * Wasabi Systems, Inc. 49 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 50 * or promote products derived from this software without specific prior 51 * written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 67 #include <sys/cdefs.h> 68 __KERNEL_RCSID(0, "$NetBSD: agp.c,v 1.13 2002/01/14 01:38:25 augustss Exp $"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/malloc.h> 73 #include <sys/kernel.h> 74 #include <sys/device.h> 75 #include <sys/conf.h> 76 #include <sys/ioctl.h> 77 #include <sys/fcntl.h> 78 #include <sys/agpio.h> 79 #include <sys/proc.h> 80 81 #include <uvm/uvm_extern.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 #include <dev/pci/agpvar.h> 86 #include <dev/pci/agpreg.h> 87 #include <dev/pci/pcidevs.h> 88 89 #include <machine/bus.h> 90 91 /* Helper functions for implementing chipset mini drivers. */ 92 /* XXXfvdl get rid of this one. */ 93 94 extern struct cfdriver agp_cd; 95 cdev_decl(agp); 96 97 int agpmatch(struct device *, struct cfdata *, void *); 98 void agpattach(struct device *, struct device *, void *); 99 100 struct cfattach agp_ca = { 101 sizeof(struct agp_softc), agpmatch, agpattach 102 }; 103 104 static int agp_info_user(struct agp_softc *, agp_info *); 105 static int agp_setup_user(struct agp_softc *, agp_setup *); 106 static int agp_allocate_user(struct agp_softc *, agp_allocate *); 107 static int agp_deallocate_user(struct agp_softc *, int); 108 static int agp_bind_user(struct agp_softc *, agp_bind *); 109 static int agp_unbind_user(struct agp_softc *, agp_unbind *); 110 static int agpdev_match(struct pci_attach_args *); 111 112 #include "agp_ali.h" 113 #include "agp_amd.h" 114 #include "agp_i810.h" 115 #include "agp_intel.h" 116 #include "agp_sis.h" 117 #include "agp_via.h" 118 119 const struct agp_product { 120 uint32_t ap_vendor; 121 uint32_t ap_product; 122 int (*ap_match)(const struct pci_attach_args *); 123 int (*ap_attach)(struct device *, struct device *, void *); 124 } agp_products[] = { 125 #if NAGP_ALI > 0 126 { PCI_VENDOR_ALI, -1, 127 NULL, agp_ali_attach }, 128 #endif 129 130 #if NAGP_AMD > 0 131 { PCI_VENDOR_AMD, -1, 132 agp_amd_match, agp_amd_attach }, 133 #endif 134 135 #if NAGP_I810 > 0 136 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH, 137 NULL, agp_i810_attach }, 138 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_DC100_MCH, 139 NULL, agp_i810_attach }, 140 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH, 141 NULL, agp_i810_attach }, 142 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB, 143 NULL, agp_i810_attach }, 144 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB, 145 NULL, agp_i810_attach }, 146 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1, 147 NULL, agp_i810_attach }, 148 #endif 149 150 #if NAGP_INTEL > 0 151 { PCI_VENDOR_INTEL, -1, 152 NULL, agp_intel_attach }, 153 #endif 154 155 #if NAGP_SIS > 0 156 { PCI_VENDOR_SIS, -1, 157 NULL, agp_sis_attach }, 158 #endif 159 160 #if NAGP_VIA > 0 161 { PCI_VENDOR_VIATECH, -1, 162 NULL, agp_via_attach }, 163 #endif 164 165 { 0, 0, 166 NULL, NULL }, 167 }; 168 169 static const struct agp_product * 170 agp_lookup(const struct pci_attach_args *pa) 171 { 172 const struct agp_product *ap; 173 174 /* First find the vendor. */ 175 for (ap = agp_products; ap->ap_attach != NULL; ap++) { 176 if (PCI_VENDOR(pa->pa_id) == ap->ap_vendor) 177 break; 178 } 179 180 if (ap->ap_attach == NULL) 181 return (NULL); 182 183 /* Now find the product within the vendor's domain. */ 184 for (; ap->ap_attach != NULL; ap++) { 185 if (PCI_VENDOR(pa->pa_id) != ap->ap_vendor) { 186 /* Ran out of this vendor's section of the table. */ 187 return (NULL); 188 } 189 if (ap->ap_product == PCI_PRODUCT(pa->pa_id)) { 190 /* Exact match. */ 191 break; 192 } 193 if (ap->ap_product == (uint32_t) -1) { 194 /* Wildcard match. */ 195 break; 196 } 197 } 198 199 if (ap->ap_attach == NULL) 200 return (NULL); 201 202 /* Now let the product-specific driver filter the match. */ 203 if (ap->ap_match != NULL && (*ap->ap_match)(pa) == 0) 204 return (NULL); 205 206 return (ap); 207 } 208 209 int 210 agpmatch(struct device *parent, struct cfdata *match, void *aux) 211 { 212 struct agpbus_attach_args *apa = aux; 213 struct pci_attach_args *pa = &apa->apa_pci_args; 214 215 if (strcmp(apa->apa_busname, "agp") != 0) 216 return (0); 217 218 if (agp_lookup(pa) == NULL) 219 return (0); 220 221 return (1); 222 } 223 224 static int agp_max[][2] = { 225 {0, 0}, 226 {32, 4}, 227 {64, 28}, 228 {128, 96}, 229 {256, 204}, 230 {512, 440}, 231 {1024, 942}, 232 {2048, 1920}, 233 {4096, 3932} 234 }; 235 #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0])) 236 237 void 238 agpattach(struct device *parent, struct device *self, void *aux) 239 { 240 struct agpbus_attach_args *apa = aux; 241 struct pci_attach_args *pa = &apa->apa_pci_args; 242 struct agp_softc *sc = (void *)self; 243 const struct agp_product *ap; 244 int memsize, i, ret; 245 246 ap = agp_lookup(pa); 247 if (ap == NULL) { 248 printf("\n"); 249 panic("agpattach: impossible"); 250 } 251 252 sc->as_dmat = pa->pa_dmat; 253 sc->as_pc = pa->pa_pc; 254 sc->as_tag = pa->pa_tag; 255 sc->as_id = pa->pa_id; 256 257 /* 258 * Work out an upper bound for agp memory allocation. This 259 * uses a heurisitc table from the Linux driver. 260 */ 261 memsize = ptoa(physmem) >> 20; 262 for (i = 0; i < agp_max_size; i++) { 263 if (memsize <= agp_max[i][0]) 264 break; 265 } 266 if (i == agp_max_size) 267 i = agp_max_size - 1; 268 sc->as_maxmem = agp_max[i][1] << 20U; 269 270 /* 271 * The lock is used to prevent re-entry to 272 * agp_generic_bind_memory() since that function can sleep. 273 */ 274 lockinit(&sc->as_lock, PZERO|PCATCH, "agplk", 0, 0); 275 276 TAILQ_INIT(&sc->as_memory); 277 278 ret = (*ap->ap_attach)(parent, self, pa); 279 if (ret == 0) 280 printf(": aperture at 0x%lx, size 0x%lx\n", 281 (unsigned long)sc->as_apaddr, 282 (unsigned long)AGP_GET_APERTURE(sc)); 283 else 284 sc->as_chipc = NULL; 285 } 286 int 287 agp_map_aperture(struct pci_attach_args *pa, struct agp_softc *sc) 288 { 289 /* 290 * Find and the aperture. Don't map it (yet), this would 291 * eat KVA. 292 */ 293 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, AGP_APBASE, 294 PCI_MAPREG_TYPE_MEM, &sc->as_apaddr, &sc->as_apsize, 295 &sc->as_apflags) != 0) 296 return ENXIO; 297 298 sc->as_apt = pa->pa_memt; 299 300 return 0; 301 } 302 303 struct agp_gatt * 304 agp_alloc_gatt(struct agp_softc *sc) 305 { 306 u_int32_t apsize = AGP_GET_APERTURE(sc); 307 u_int32_t entries = apsize >> AGP_PAGE_SHIFT; 308 struct agp_gatt *gatt; 309 int dummyseg; 310 311 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT); 312 if (!gatt) 313 return NULL; 314 gatt->ag_entries = entries; 315 316 if (agp_alloc_dmamem(sc->as_dmat, entries * sizeof(u_int32_t), 317 0, &gatt->ag_dmamap, (caddr_t *)&gatt->ag_virtual, 318 &gatt->ag_physical, &gatt->ag_dmaseg, 1, &dummyseg) != 0) 319 return NULL; 320 321 gatt->ag_size = entries * sizeof(u_int32_t); 322 memset(gatt->ag_virtual, 0, gatt->ag_size); 323 agp_flush_cache(); 324 325 return gatt; 326 } 327 328 void 329 agp_free_gatt(struct agp_softc *sc, struct agp_gatt *gatt) 330 { 331 agp_free_dmamem(sc->as_dmat, gatt->ag_size, gatt->ag_dmamap, 332 (caddr_t)gatt->ag_virtual, &gatt->ag_dmaseg, 1); 333 free(gatt, M_AGP); 334 } 335 336 337 int 338 agp_generic_detach(struct agp_softc *sc) 339 { 340 lockmgr(&sc->as_lock, LK_DRAIN, 0); 341 agp_flush_cache(); 342 return 0; 343 } 344 345 static int 346 agpdev_match(struct pci_attach_args *pa) 347 { 348 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY && 349 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA) 350 return 1; 351 352 return 0; 353 } 354 355 int 356 agp_generic_enable(struct agp_softc *sc, u_int32_t mode) 357 { 358 struct pci_attach_args pa; 359 pcireg_t tstatus, mstatus; 360 pcireg_t command; 361 int rq, sba, fw, rate, capoff; 362 363 if (pci_find_device(&pa, agpdev_match) == 0 || 364 pci_get_capability(pa.pa_pc, pa.pa_tag, PCI_CAP_AGP, 365 &capoff, NULL) == 0) { 366 printf("%s: can't find display\n", sc->as_dev.dv_xname); 367 return ENXIO; 368 } 369 370 tstatus = pci_conf_read(sc->as_pc, sc->as_tag, 371 sc->as_capoff + AGP_STATUS); 372 mstatus = pci_conf_read(pa.pa_pc, pa.pa_tag, 373 capoff + AGP_STATUS); 374 375 /* Set RQ to the min of mode, tstatus and mstatus */ 376 rq = AGP_MODE_GET_RQ(mode); 377 if (AGP_MODE_GET_RQ(tstatus) < rq) 378 rq = AGP_MODE_GET_RQ(tstatus); 379 if (AGP_MODE_GET_RQ(mstatus) < rq) 380 rq = AGP_MODE_GET_RQ(mstatus); 381 382 /* Set SBA if all three can deal with SBA */ 383 sba = (AGP_MODE_GET_SBA(tstatus) 384 & AGP_MODE_GET_SBA(mstatus) 385 & AGP_MODE_GET_SBA(mode)); 386 387 /* Similar for FW */ 388 fw = (AGP_MODE_GET_FW(tstatus) 389 & AGP_MODE_GET_FW(mstatus) 390 & AGP_MODE_GET_FW(mode)); 391 392 /* Figure out the max rate */ 393 rate = (AGP_MODE_GET_RATE(tstatus) 394 & AGP_MODE_GET_RATE(mstatus) 395 & AGP_MODE_GET_RATE(mode)); 396 if (rate & AGP_MODE_RATE_4x) 397 rate = AGP_MODE_RATE_4x; 398 else if (rate & AGP_MODE_RATE_2x) 399 rate = AGP_MODE_RATE_2x; 400 else 401 rate = AGP_MODE_RATE_1x; 402 403 /* Construct the new mode word and tell the hardware */ 404 command = AGP_MODE_SET_RQ(0, rq); 405 command = AGP_MODE_SET_SBA(command, sba); 406 command = AGP_MODE_SET_FW(command, fw); 407 command = AGP_MODE_SET_RATE(command, rate); 408 command = AGP_MODE_SET_AGP(command, 1); 409 pci_conf_write(sc->as_pc, sc->as_tag, 410 sc->as_capoff + AGP_COMMAND, command); 411 pci_conf_write(pa.pa_pc, pa.pa_tag, capoff + AGP_COMMAND, command); 412 413 return 0; 414 } 415 416 struct agp_memory * 417 agp_generic_alloc_memory(struct agp_softc *sc, int type, vsize_t size) 418 { 419 struct agp_memory *mem; 420 421 if ((size & (AGP_PAGE_SIZE - 1)) != 0) 422 return 0; 423 424 if (sc->as_allocated + size > sc->as_maxmem) 425 return 0; 426 427 if (type != 0) { 428 printf("agp_generic_alloc_memory: unsupported type %d\n", 429 type); 430 return 0; 431 } 432 433 mem = malloc(sizeof *mem, M_AGP, M_WAITOK); 434 if (mem == NULL) 435 return NULL; 436 437 if (bus_dmamap_create(sc->as_dmat, size, size / PAGE_SIZE + 1, 438 size, 0, BUS_DMA_NOWAIT, &mem->am_dmamap) != 0) { 439 free(mem, M_AGP); 440 return NULL; 441 } 442 443 mem->am_id = sc->as_nextid++; 444 mem->am_size = size; 445 mem->am_type = 0; 446 mem->am_physical = 0; 447 mem->am_offset = 0; 448 mem->am_is_bound = 0; 449 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link); 450 sc->as_allocated += size; 451 452 return mem; 453 } 454 455 int 456 agp_generic_free_memory(struct agp_softc *sc, struct agp_memory *mem) 457 { 458 if (mem->am_is_bound) 459 return EBUSY; 460 461 sc->as_allocated -= mem->am_size; 462 TAILQ_REMOVE(&sc->as_memory, mem, am_link); 463 bus_dmamap_destroy(sc->as_dmat, mem->am_dmamap); 464 free(mem, M_AGP); 465 return 0; 466 } 467 468 int 469 agp_generic_bind_memory(struct agp_softc *sc, struct agp_memory *mem, 470 off_t offset) 471 { 472 off_t i, k; 473 bus_size_t done, j; 474 int error; 475 bus_dma_segment_t *segs, *seg; 476 bus_addr_t pa; 477 int contigpages, nseg; 478 479 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0); 480 481 if (mem->am_is_bound) { 482 printf("%s: memory already bound\n", sc->as_dev.dv_xname); 483 lockmgr(&sc->as_lock, LK_RELEASE, 0); 484 return EINVAL; 485 } 486 487 if (offset < 0 488 || (offset & (AGP_PAGE_SIZE - 1)) != 0 489 || offset + mem->am_size > AGP_GET_APERTURE(sc)) { 490 printf("%s: binding memory at bad offset %#lx\n", 491 sc->as_dev.dv_xname, (unsigned long) offset); 492 lockmgr(&sc->as_lock, LK_RELEASE, 0); 493 return EINVAL; 494 } 495 496 /* 497 * XXXfvdl 498 * The memory here needs to be directly accessable from the 499 * AGP video card, so it should be allocated using bus_dma. 500 * However, it need not be contiguous, since individual pages 501 * are translated using the GATT. 502 * 503 * Using a large chunk of contiguous memory may get in the way 504 * of other subsystems that may need one, so we try to be friendly 505 * and ask for allocation in chunks of a minimum of 8 pages 506 * of contiguous memory on average, falling back to 4, 2 and 1 507 * if really needed. Larger chunks are preferred, since allocating 508 * a bus_dma_segment per page would be overkill. 509 */ 510 511 for (contigpages = 8; contigpages > 0; contigpages >>= 1) { 512 nseg = (mem->am_size / (contigpages * PAGE_SIZE)) + 1; 513 segs = malloc(nseg * sizeof *segs, M_AGP, M_WAITOK); 514 if (segs == NULL) 515 return ENOMEM; 516 if (bus_dmamem_alloc(sc->as_dmat, mem->am_size, PAGE_SIZE, 0, 517 segs, nseg, &mem->am_nseg, 518 BUS_DMA_WAITOK) != 0) { 519 free(segs, M_AGP); 520 continue; 521 } 522 if (bus_dmamem_map(sc->as_dmat, segs, mem->am_nseg, 523 mem->am_size, &mem->am_virtual, BUS_DMA_WAITOK) != 0) { 524 bus_dmamem_free(sc->as_dmat, segs, mem->am_nseg); 525 free(segs, M_AGP); 526 continue; 527 } 528 if (bus_dmamap_load(sc->as_dmat, mem->am_dmamap, 529 mem->am_virtual, mem->am_size, NULL, BUS_DMA_WAITOK) != 0) { 530 bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, 531 mem->am_size); 532 bus_dmamem_free(sc->as_dmat, segs, mem->am_nseg); 533 free(segs, M_AGP); 534 continue; 535 } 536 mem->am_dmaseg = segs; 537 break; 538 } 539 540 if (contigpages == 0) { 541 lockmgr(&sc->as_lock, LK_RELEASE, 0); 542 return ENOMEM; 543 } 544 545 546 /* 547 * Bind the individual pages and flush the chipset's 548 * TLB. 549 */ 550 done = 0; 551 for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) { 552 seg = &mem->am_dmamap->dm_segs[i]; 553 /* 554 * Install entries in the GATT, making sure that if 555 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not 556 * aligned to PAGE_SIZE, we don't modify too many GATT 557 * entries. 558 */ 559 for (j = 0; j < seg->ds_len && (done + j) < mem->am_size; 560 j += AGP_PAGE_SIZE) { 561 pa = seg->ds_addr + j; 562 AGP_DPF("binding offset %#lx to pa %#lx\n", 563 (unsigned long)(offset + done + j), 564 (unsigned long)pa); 565 error = AGP_BIND_PAGE(sc, offset + done + j, pa); 566 if (error) { 567 /* 568 * Bail out. Reverse all the mappings 569 * and unwire the pages. 570 */ 571 for (k = 0; k < done + j; k += AGP_PAGE_SIZE) 572 AGP_UNBIND_PAGE(sc, offset + k); 573 574 bus_dmamap_unload(sc->as_dmat, mem->am_dmamap); 575 bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, 576 mem->am_size); 577 bus_dmamem_free(sc->as_dmat, mem->am_dmaseg, 578 mem->am_nseg); 579 free(mem->am_dmaseg, M_AGP); 580 lockmgr(&sc->as_lock, LK_RELEASE, 0); 581 return error; 582 } 583 } 584 done += seg->ds_len; 585 } 586 587 /* 588 * Flush the cpu cache since we are providing a new mapping 589 * for these pages. 590 */ 591 agp_flush_cache(); 592 593 /* 594 * Make sure the chipset gets the new mappings. 595 */ 596 AGP_FLUSH_TLB(sc); 597 598 mem->am_offset = offset; 599 mem->am_is_bound = 1; 600 601 lockmgr(&sc->as_lock, LK_RELEASE, 0); 602 603 return 0; 604 } 605 606 int 607 agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem) 608 { 609 int i; 610 611 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0); 612 613 if (!mem->am_is_bound) { 614 printf("%s: memory is not bound\n", sc->as_dev.dv_xname); 615 lockmgr(&sc->as_lock, LK_RELEASE, 0); 616 return EINVAL; 617 } 618 619 620 /* 621 * Unbind the individual pages and flush the chipset's 622 * TLB. Unwire the pages so they can be swapped. 623 */ 624 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) 625 AGP_UNBIND_PAGE(sc, mem->am_offset + i); 626 627 agp_flush_cache(); 628 AGP_FLUSH_TLB(sc); 629 630 bus_dmamap_unload(sc->as_dmat, mem->am_dmamap); 631 bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, mem->am_size); 632 bus_dmamem_free(sc->as_dmat, mem->am_dmaseg, mem->am_nseg); 633 634 free(mem->am_dmaseg, M_AGP); 635 636 mem->am_offset = 0; 637 mem->am_is_bound = 0; 638 639 lockmgr(&sc->as_lock, LK_RELEASE, 0); 640 641 return 0; 642 } 643 644 /* Helper functions for implementing user/kernel api */ 645 646 static int 647 agp_acquire_helper(struct agp_softc *sc, enum agp_acquire_state state) 648 { 649 if (sc->as_state != AGP_ACQUIRE_FREE) 650 return EBUSY; 651 sc->as_state = state; 652 653 return 0; 654 } 655 656 static int 657 agp_release_helper(struct agp_softc *sc, enum agp_acquire_state state) 658 { 659 struct agp_memory *mem; 660 661 if (sc->as_state == AGP_ACQUIRE_FREE) 662 return 0; 663 664 if (sc->as_state != state) 665 return EBUSY; 666 667 /* 668 * Clear out the aperture and free any outstanding memory blocks. 669 */ 670 TAILQ_FOREACH(mem, &sc->as_memory, am_link) { 671 if (mem->am_is_bound) { 672 printf("agp_release_helper: mem %d is bound\n", 673 mem->am_id); 674 AGP_UNBIND_MEMORY(sc, mem); 675 } 676 } 677 678 sc->as_state = AGP_ACQUIRE_FREE; 679 return 0; 680 } 681 682 static struct agp_memory * 683 agp_find_memory(struct agp_softc *sc, int id) 684 { 685 struct agp_memory *mem; 686 687 AGP_DPF("searching for memory block %d\n", id); 688 TAILQ_FOREACH(mem, &sc->as_memory, am_link) { 689 AGP_DPF("considering memory block %d\n", mem->am_id); 690 if (mem->am_id == id) 691 return mem; 692 } 693 return 0; 694 } 695 696 /* Implementation of the userland ioctl api */ 697 698 static int 699 agp_info_user(struct agp_softc *sc, agp_info *info) 700 { 701 memset(info, 0, sizeof *info); 702 info->bridge_id = sc->as_id; 703 if (sc->as_capoff != 0) 704 info->agp_mode = pci_conf_read(sc->as_pc, sc->as_tag, 705 sc->as_capoff + AGP_STATUS); 706 else 707 info->agp_mode = 0; /* i810 doesn't have real AGP */ 708 info->aper_base = sc->as_apaddr; 709 info->aper_size = AGP_GET_APERTURE(sc) >> 20; 710 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT; 711 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT; 712 713 return 0; 714 } 715 716 static int 717 agp_setup_user(struct agp_softc *sc, agp_setup *setup) 718 { 719 return AGP_ENABLE(sc, setup->agp_mode); 720 } 721 722 static int 723 agp_allocate_user(struct agp_softc *sc, agp_allocate *alloc) 724 { 725 struct agp_memory *mem; 726 727 mem = AGP_ALLOC_MEMORY(sc, 728 alloc->type, 729 alloc->pg_count << AGP_PAGE_SHIFT); 730 if (mem) { 731 alloc->key = mem->am_id; 732 alloc->physical = mem->am_physical; 733 return 0; 734 } else { 735 return ENOMEM; 736 } 737 } 738 739 static int 740 agp_deallocate_user(struct agp_softc *sc, int id) 741 { 742 struct agp_memory *mem = agp_find_memory(sc, id); 743 744 if (mem) { 745 AGP_FREE_MEMORY(sc, mem); 746 return 0; 747 } else { 748 return ENOENT; 749 } 750 } 751 752 static int 753 agp_bind_user(struct agp_softc *sc, agp_bind *bind) 754 { 755 struct agp_memory *mem = agp_find_memory(sc, bind->key); 756 757 if (!mem) 758 return ENOENT; 759 760 return AGP_BIND_MEMORY(sc, mem, bind->pg_start << AGP_PAGE_SHIFT); 761 } 762 763 static int 764 agp_unbind_user(struct agp_softc *sc, agp_unbind *unbind) 765 { 766 struct agp_memory *mem = agp_find_memory(sc, unbind->key); 767 768 if (!mem) 769 return ENOENT; 770 771 return AGP_UNBIND_MEMORY(sc, mem); 772 } 773 774 int 775 agpopen(dev_t dev, int oflags, int devtype, struct proc *p) 776 { 777 struct agp_softc *sc = device_lookup(&agp_cd, AGPUNIT(dev)); 778 779 if (sc == NULL) 780 return ENXIO; 781 782 if (sc->as_chipc == NULL) 783 return ENXIO; 784 785 if (!sc->as_isopen) 786 sc->as_isopen = 1; 787 else 788 return EBUSY; 789 790 return 0; 791 } 792 793 int 794 agpclose(dev_t dev, int fflag, int devtype, struct proc *p) 795 { 796 struct agp_softc *sc = device_lookup(&agp_cd, AGPUNIT(dev)); 797 798 /* 799 * Clear the GATT and force release on last close 800 */ 801 if (sc->as_state == AGP_ACQUIRE_USER) 802 agp_release_helper(sc, AGP_ACQUIRE_USER); 803 sc->as_isopen = 0; 804 805 return 0; 806 } 807 808 int 809 agpioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p) 810 { 811 struct agp_softc *sc = device_lookup(&agp_cd, AGPUNIT(dev)); 812 813 if (sc == NULL) 814 return ENODEV; 815 816 if ((fflag & FWRITE) == 0 && cmd != AGPIOC_INFO) 817 return EPERM; 818 819 switch (cmd) { 820 case AGPIOC_INFO: 821 return agp_info_user(sc, (agp_info *) data); 822 823 case AGPIOC_ACQUIRE: 824 return agp_acquire_helper(sc, AGP_ACQUIRE_USER); 825 826 case AGPIOC_RELEASE: 827 return agp_release_helper(sc, AGP_ACQUIRE_USER); 828 829 case AGPIOC_SETUP: 830 return agp_setup_user(sc, (agp_setup *)data); 831 832 case AGPIOC_ALLOCATE: 833 return agp_allocate_user(sc, (agp_allocate *)data); 834 835 case AGPIOC_DEALLOCATE: 836 return agp_deallocate_user(sc, *(int *) data); 837 838 case AGPIOC_BIND: 839 return agp_bind_user(sc, (agp_bind *)data); 840 841 case AGPIOC_UNBIND: 842 return agp_unbind_user(sc, (agp_unbind *)data); 843 844 } 845 846 return EINVAL; 847 } 848 849 paddr_t 850 agpmmap(dev_t dev, off_t offset, int prot) 851 { 852 struct agp_softc *sc = device_lookup(&agp_cd, AGPUNIT(dev)); 853 854 if (offset > AGP_GET_APERTURE(sc)) 855 return -1; 856 857 return (bus_space_mmap(sc->as_apt, sc->as_apaddr, offset, prot, 858 BUS_SPACE_MAP_LINEAR)); 859 } 860 861 /* Implementation of the kernel api */ 862 863 void * 864 agp_find_device(int unit) 865 { 866 return device_lookup(&agp_cd, unit); 867 } 868 869 enum agp_acquire_state 870 agp_state(void *devcookie) 871 { 872 struct agp_softc *sc = devcookie; 873 return sc->as_state; 874 } 875 876 void 877 agp_get_info(void *devcookie, struct agp_info *info) 878 { 879 struct agp_softc *sc = devcookie; 880 881 info->ai_mode = pci_conf_read(sc->as_pc, sc->as_tag, 882 sc->as_capoff + AGP_STATUS); 883 info->ai_aperture_base = sc->as_apaddr; 884 info->ai_aperture_size = sc->as_apsize; /* XXXfvdl inconsistent */ 885 info->ai_memory_allowed = sc->as_maxmem; 886 info->ai_memory_used = sc->as_allocated; 887 } 888 889 int 890 agp_acquire(void *dev) 891 { 892 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL); 893 } 894 895 int 896 agp_release(void *dev) 897 { 898 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL); 899 } 900 901 int 902 agp_enable(void *dev, u_int32_t mode) 903 { 904 struct agp_softc *sc = dev; 905 906 return AGP_ENABLE(sc, mode); 907 } 908 909 void *agp_alloc_memory(void *dev, int type, vsize_t bytes) 910 { 911 struct agp_softc *sc = dev; 912 913 return (void *)AGP_ALLOC_MEMORY(sc, type, bytes); 914 } 915 916 void agp_free_memory(void *dev, void *handle) 917 { 918 struct agp_softc *sc = dev; 919 struct agp_memory *mem = (struct agp_memory *) handle; 920 AGP_FREE_MEMORY(sc, mem); 921 } 922 923 int agp_bind_memory(void *dev, void *handle, off_t offset) 924 { 925 struct agp_softc *sc = dev; 926 struct agp_memory *mem = (struct agp_memory *) handle; 927 928 return AGP_BIND_MEMORY(sc, mem, offset); 929 } 930 931 int agp_unbind_memory(void *dev, void *handle) 932 { 933 struct agp_softc *sc = dev; 934 struct agp_memory *mem = (struct agp_memory *) handle; 935 936 return AGP_UNBIND_MEMORY(sc, mem); 937 } 938 939 void agp_memory_info(void *dev, void *handle, struct agp_memory_info *mi) 940 { 941 struct agp_memory *mem = (struct agp_memory *) handle; 942 943 mi->ami_size = mem->am_size; 944 mi->ami_physical = mem->am_physical; 945 mi->ami_offset = mem->am_offset; 946 mi->ami_is_bound = mem->am_is_bound; 947 } 948 949 int 950 agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, int flags, 951 bus_dmamap_t *mapp, caddr_t *vaddr, bus_addr_t *baddr, 952 bus_dma_segment_t *seg, int nseg, int *rseg) 953 954 { 955 int error, level = 0; 956 957 if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0, 958 seg, nseg, rseg, BUS_DMA_NOWAIT)) != 0) 959 goto out; 960 level++; 961 962 if ((error = bus_dmamem_map(tag, seg, *rseg, size, vaddr, 963 BUS_DMA_NOWAIT | flags)) != 0) 964 goto out; 965 level++; 966 967 if ((error = bus_dmamap_create(tag, size, *rseg, size, 0, 968 BUS_DMA_NOWAIT, mapp)) != 0) 969 goto out; 970 level++; 971 972 if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL, 973 BUS_DMA_NOWAIT)) != 0) 974 goto out; 975 976 *baddr = (*mapp)->dm_segs[0].ds_addr; 977 978 return 0; 979 out: 980 switch (level) { 981 case 3: 982 bus_dmamap_destroy(tag, *mapp); 983 /* FALLTHROUGH */ 984 case 2: 985 bus_dmamem_unmap(tag, *vaddr, size); 986 /* FALLTHROUGH */ 987 case 1: 988 bus_dmamem_free(tag, seg, *rseg); 989 break; 990 default: 991 break; 992 } 993 994 return error; 995 } 996 997 void 998 agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map, 999 caddr_t vaddr, bus_dma_segment_t *seg, int nseg) 1000 { 1001 1002 bus_dmamap_unload(tag, map); 1003 bus_dmamap_destroy(tag, map); 1004 bus_dmamem_unmap(tag, vaddr, size); 1005 bus_dmamem_free(tag, seg, nseg); 1006 } 1007