1 /* $NetBSD: agp.c,v 1.77 2011/02/15 08:57:01 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2000 Doug Rabson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD: src/sys/pci/agp.c,v 1.12 2001/05/19 01:28:07 alfred Exp $ 29 */ 30 31 /* 32 * Copyright (c) 2001 Wasabi Systems, Inc. 33 * All rights reserved. 34 * 35 * Written by Frank van der Linden for Wasabi Systems, Inc. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed for the NetBSD Project by 48 * Wasabi Systems, Inc. 49 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 50 * or promote products derived from this software without specific prior 51 * written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 57 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 67 #include <sys/cdefs.h> 68 __KERNEL_RCSID(0, "$NetBSD: agp.c,v 1.77 2011/02/15 08:57:01 jmcneill Exp $"); 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/malloc.h> 73 #include <sys/kernel.h> 74 #include <sys/device.h> 75 #include <sys/conf.h> 76 #include <sys/ioctl.h> 77 #include <sys/fcntl.h> 78 #include <sys/agpio.h> 79 #include <sys/proc.h> 80 #include <sys/mutex.h> 81 82 #include <dev/pci/pcireg.h> 83 #include <dev/pci/pcivar.h> 84 #include <dev/pci/agpvar.h> 85 #include <dev/pci/agpreg.h> 86 #include <dev/pci/pcidevs.h> 87 88 #include <sys/bus.h> 89 90 MALLOC_DEFINE(M_AGP, "AGP", "AGP memory"); 91 92 /* Helper functions for implementing chipset mini drivers. */ 93 /* XXXfvdl get rid of this one. */ 94 95 extern struct cfdriver agp_cd; 96 97 static int agp_info_user(struct agp_softc *, agp_info *); 98 static int agp_setup_user(struct agp_softc *, agp_setup *); 99 static int agp_allocate_user(struct agp_softc *, agp_allocate *); 100 static int agp_deallocate_user(struct agp_softc *, int); 101 static int agp_bind_user(struct agp_softc *, agp_bind *); 102 static int agp_unbind_user(struct agp_softc *, agp_unbind *); 103 static int agp_generic_enable_v2(struct agp_softc *, struct pci_attach_args *, 104 int, u_int32_t); 105 static int agp_generic_enable_v3(struct agp_softc *, struct pci_attach_args *, 106 int, u_int32_t); 107 static int agpdev_match(struct pci_attach_args *); 108 static bool agp_resume(device_t, const pmf_qual_t *); 109 110 #include "agp_ali.h" 111 #include "agp_amd.h" 112 #include "agp_i810.h" 113 #include "agp_intel.h" 114 #include "agp_sis.h" 115 #include "agp_via.h" 116 #include "agp_amd64.h" 117 118 const struct agp_product { 119 uint32_t ap_vendor; 120 uint32_t ap_product; 121 int (*ap_match)(const struct pci_attach_args *); 122 int (*ap_attach)(device_t, device_t, void *); 123 } agp_products[] = { 124 #if NAGP_AMD64 > 0 125 { PCI_VENDOR_ALI, PCI_PRODUCT_ALI_M1689, 126 agp_amd64_match, agp_amd64_attach }, 127 #endif 128 129 #if NAGP_ALI > 0 130 { PCI_VENDOR_ALI, -1, 131 NULL, agp_ali_attach }, 132 #endif 133 134 #if NAGP_AMD64 > 0 135 { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_AGP8151_DEV, 136 agp_amd64_match, agp_amd64_attach }, 137 #endif 138 139 #if NAGP_AMD > 0 140 { PCI_VENDOR_AMD, -1, 141 agp_amd_match, agp_amd_attach }, 142 #endif 143 144 #if NAGP_I810 > 0 145 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH, 146 NULL, agp_i810_attach }, 147 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_DC100_MCH, 148 NULL, agp_i810_attach }, 149 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH, 150 NULL, agp_i810_attach }, 151 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB, 152 NULL, agp_i810_attach }, 153 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB, 154 NULL, agp_i810_attach }, 155 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1, 156 NULL, agp_i810_attach }, 157 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82845G_DRAM, 158 NULL, agp_i810_attach }, 159 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82855GM_MCH, 160 NULL, agp_i810_attach }, 161 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82865_HB, 162 NULL, agp_i810_attach }, 163 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915G_HB, 164 NULL, agp_i810_attach }, 165 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82915GM_HB, 166 NULL, agp_i810_attach }, 167 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945P_MCH, 168 NULL, agp_i810_attach }, 169 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GM_HB, 170 NULL, agp_i810_attach }, 171 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82945GME_HB, 172 NULL, agp_i810_attach }, 173 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82965Q_HB, 174 NULL, agp_i810_attach }, 175 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82965PM_HB, 176 NULL, agp_i810_attach }, 177 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82965G_HB, 178 NULL, agp_i810_attach }, 179 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q35_HB, 180 NULL, agp_i810_attach }, 181 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G33_HB, 182 NULL, agp_i810_attach }, 183 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q33_HB, 184 NULL, agp_i810_attach }, 185 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G35_HB, 186 NULL, agp_i810_attach }, 187 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82946GZ_HB, 188 NULL, agp_i810_attach }, 189 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82GM45_HB, 190 NULL, agp_i810_attach }, 191 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82IGD_E_HB, 192 NULL, agp_i810_attach }, 193 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82Q45_HB, 194 NULL, agp_i810_attach }, 195 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G45_HB, 196 NULL, agp_i810_attach }, 197 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82G41_HB, 198 NULL, agp_i810_attach }, 199 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_E7221_HB, 200 NULL, agp_i810_attach }, 201 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82965GME_HB, 202 NULL, agp_i810_attach }, 203 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82B43_HB, 204 NULL, agp_i810_attach }, 205 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_IRONLAKE_D_HB, 206 NULL, agp_i810_attach }, 207 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_IRONLAKE_M_HB, 208 NULL, agp_i810_attach }, 209 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_IRONLAKE_MA_HB, 210 NULL, agp_i810_attach }, 211 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_IRONLAKE_MC2_HB, 212 NULL, agp_i810_attach }, 213 #endif 214 215 #if NAGP_INTEL > 0 216 { PCI_VENDOR_INTEL, -1, 217 NULL, agp_intel_attach }, 218 #endif 219 220 #if NAGP_AMD64 > 0 221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_PCHB, 222 agp_amd64_match, agp_amd64_attach }, 223 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_PCHB, 224 agp_amd64_match, agp_amd64_attach }, 225 #endif 226 227 #if NAGP_AMD64 > 0 228 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_755, 229 agp_amd64_match, agp_amd64_attach }, 230 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_760, 231 agp_amd64_match, agp_amd64_attach }, 232 #endif 233 234 #if NAGP_SIS > 0 235 { PCI_VENDOR_SIS, -1, 236 NULL, agp_sis_attach }, 237 #endif 238 239 #if NAGP_AMD64 > 0 240 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_K8M800_0, 241 agp_amd64_match, agp_amd64_attach }, 242 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_K8T890_0, 243 agp_amd64_match, agp_amd64_attach }, 244 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_K8HTB_0, 245 agp_amd64_match, agp_amd64_attach }, 246 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_K8HTB, 247 agp_amd64_match, agp_amd64_attach }, 248 #endif 249 250 #if NAGP_VIA > 0 251 { PCI_VENDOR_VIATECH, -1, 252 NULL, agp_via_attach }, 253 #endif 254 255 { 0, 0, 256 NULL, NULL }, 257 }; 258 259 static const struct agp_product * 260 agp_lookup(const struct pci_attach_args *pa) 261 { 262 const struct agp_product *ap; 263 264 /* First find the vendor. */ 265 for (ap = agp_products; ap->ap_attach != NULL; ap++) { 266 if (PCI_VENDOR(pa->pa_id) == ap->ap_vendor) 267 break; 268 } 269 270 if (ap->ap_attach == NULL) 271 return (NULL); 272 273 /* Now find the product within the vendor's domain. */ 274 for (; ap->ap_attach != NULL; ap++) { 275 if (PCI_VENDOR(pa->pa_id) != ap->ap_vendor) { 276 /* Ran out of this vendor's section of the table. */ 277 return (NULL); 278 } 279 if (ap->ap_product == PCI_PRODUCT(pa->pa_id)) { 280 /* Exact match. */ 281 break; 282 } 283 if (ap->ap_product == (uint32_t) -1) { 284 /* Wildcard match. */ 285 break; 286 } 287 } 288 289 if (ap->ap_attach == NULL) 290 return (NULL); 291 292 /* Now let the product-specific driver filter the match. */ 293 if (ap->ap_match != NULL && (*ap->ap_match)(pa) == 0) 294 return (NULL); 295 296 return (ap); 297 } 298 299 static int 300 agpmatch(device_t parent, cfdata_t match, void *aux) 301 { 302 struct agpbus_attach_args *apa = aux; 303 struct pci_attach_args *pa = &apa->apa_pci_args; 304 305 if (agp_lookup(pa) == NULL) 306 return (0); 307 308 return (1); 309 } 310 311 static const int agp_max[][2] = { 312 {0, 0}, 313 {32, 4}, 314 {64, 28}, 315 {128, 96}, 316 {256, 204}, 317 {512, 440}, 318 {1024, 942}, 319 {2048, 1920}, 320 {4096, 3932} 321 }; 322 #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0])) 323 324 static void 325 agpattach(device_t parent, device_t self, void *aux) 326 { 327 struct agpbus_attach_args *apa = aux; 328 struct pci_attach_args *pa = &apa->apa_pci_args; 329 struct agp_softc *sc = device_private(self); 330 const struct agp_product *ap; 331 int memsize, i, ret; 332 333 ap = agp_lookup(pa); 334 KASSERT(ap != NULL); 335 336 aprint_naive(": AGP controller\n"); 337 338 sc->as_dev = self; 339 sc->as_dmat = pa->pa_dmat; 340 sc->as_pc = pa->pa_pc; 341 sc->as_tag = pa->pa_tag; 342 sc->as_id = pa->pa_id; 343 344 /* 345 * Work out an upper bound for agp memory allocation. This 346 * uses a heuristic table from the Linux driver. 347 */ 348 memsize = physmem >> (20 - PAGE_SHIFT); /* memsize is in MB */ 349 for (i = 0; i < agp_max_size; i++) { 350 if (memsize <= agp_max[i][0]) 351 break; 352 } 353 if (i == agp_max_size) 354 i = agp_max_size - 1; 355 sc->as_maxmem = agp_max[i][1] << 20U; 356 357 /* 358 * The mutex is used to prevent re-entry to 359 * agp_generic_bind_memory() since that function can sleep. 360 */ 361 mutex_init(&sc->as_mtx, MUTEX_DEFAULT, IPL_NONE); 362 363 TAILQ_INIT(&sc->as_memory); 364 365 ret = (*ap->ap_attach)(parent, self, pa); 366 if (ret == 0) 367 aprint_normal(": aperture at 0x%lx, size 0x%lx\n", 368 (unsigned long)sc->as_apaddr, 369 (unsigned long)AGP_GET_APERTURE(sc)); 370 else 371 sc->as_chipc = NULL; 372 373 if (!device_pmf_is_registered(self)) { 374 if (!pmf_device_register(self, NULL, agp_resume)) 375 aprint_error_dev(self, "couldn't establish power " 376 "handler\n"); 377 } 378 } 379 380 CFATTACH_DECL_NEW(agp, sizeof(struct agp_softc), 381 agpmatch, agpattach, NULL, NULL); 382 383 int 384 agp_map_aperture(struct pci_attach_args *pa, struct agp_softc *sc, int reg) 385 { 386 /* 387 * Find the aperture. Don't map it (yet), this would 388 * eat KVA. 389 */ 390 if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg, 391 PCI_MAPREG_TYPE_MEM, &sc->as_apaddr, &sc->as_apsize, 392 &sc->as_apflags) != 0) 393 return ENXIO; 394 395 sc->as_apt = pa->pa_memt; 396 397 return 0; 398 } 399 400 struct agp_gatt * 401 agp_alloc_gatt(struct agp_softc *sc) 402 { 403 u_int32_t apsize = AGP_GET_APERTURE(sc); 404 u_int32_t entries = apsize >> AGP_PAGE_SHIFT; 405 struct agp_gatt *gatt; 406 void *virtual; 407 int dummyseg; 408 409 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT); 410 if (!gatt) 411 return NULL; 412 gatt->ag_entries = entries; 413 414 if (agp_alloc_dmamem(sc->as_dmat, entries * sizeof(u_int32_t), 415 0, &gatt->ag_dmamap, &virtual, &gatt->ag_physical, 416 &gatt->ag_dmaseg, 1, &dummyseg) != 0) { 417 free(gatt, M_AGP); 418 return NULL; 419 } 420 gatt->ag_virtual = (uint32_t *)virtual; 421 422 gatt->ag_size = entries * sizeof(u_int32_t); 423 memset(gatt->ag_virtual, 0, gatt->ag_size); 424 agp_flush_cache(); 425 426 return gatt; 427 } 428 429 void 430 agp_free_gatt(struct agp_softc *sc, struct agp_gatt *gatt) 431 { 432 agp_free_dmamem(sc->as_dmat, gatt->ag_size, gatt->ag_dmamap, 433 (void *)gatt->ag_virtual, &gatt->ag_dmaseg, 1); 434 free(gatt, M_AGP); 435 } 436 437 438 int 439 agp_generic_detach(struct agp_softc *sc) 440 { 441 mutex_destroy(&sc->as_mtx); 442 agp_flush_cache(); 443 return 0; 444 } 445 446 static int 447 agpdev_match(struct pci_attach_args *pa) 448 { 449 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY && 450 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_DISPLAY_VGA) 451 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_AGP, 452 NULL, NULL)) 453 return 1; 454 455 return 0; 456 } 457 458 int 459 agp_generic_enable(struct agp_softc *sc, u_int32_t mode) 460 { 461 struct pci_attach_args pa; 462 pcireg_t tstatus, mstatus; 463 int capoff; 464 465 if (pci_find_device(&pa, agpdev_match) == 0 || 466 pci_get_capability(pa.pa_pc, pa.pa_tag, PCI_CAP_AGP, 467 &capoff, NULL) == 0) { 468 aprint_error_dev(sc->as_dev, "can't find display\n"); 469 return ENXIO; 470 } 471 472 tstatus = pci_conf_read(sc->as_pc, sc->as_tag, 473 sc->as_capoff + AGP_STATUS); 474 mstatus = pci_conf_read(pa.pa_pc, pa.pa_tag, 475 capoff + AGP_STATUS); 476 477 if (AGP_MODE_GET_MODE_3(mode) && 478 AGP_MODE_GET_MODE_3(tstatus) && 479 AGP_MODE_GET_MODE_3(mstatus)) 480 return agp_generic_enable_v3(sc, &pa, capoff, mode); 481 else 482 return agp_generic_enable_v2(sc, &pa, capoff, mode); 483 } 484 485 static int 486 agp_generic_enable_v2(struct agp_softc *sc, struct pci_attach_args *pa, 487 int capoff, u_int32_t mode) 488 { 489 pcireg_t tstatus, mstatus; 490 pcireg_t command; 491 int rq, sba, fw, rate; 492 493 tstatus = pci_conf_read(sc->as_pc, sc->as_tag, 494 sc->as_capoff + AGP_STATUS); 495 mstatus = pci_conf_read(pa->pa_pc, pa->pa_tag, 496 capoff + AGP_STATUS); 497 498 /* Set RQ to the min of mode, tstatus and mstatus */ 499 rq = AGP_MODE_GET_RQ(mode); 500 if (AGP_MODE_GET_RQ(tstatus) < rq) 501 rq = AGP_MODE_GET_RQ(tstatus); 502 if (AGP_MODE_GET_RQ(mstatus) < rq) 503 rq = AGP_MODE_GET_RQ(mstatus); 504 505 /* Set SBA if all three can deal with SBA */ 506 sba = (AGP_MODE_GET_SBA(tstatus) 507 & AGP_MODE_GET_SBA(mstatus) 508 & AGP_MODE_GET_SBA(mode)); 509 510 /* Similar for FW */ 511 fw = (AGP_MODE_GET_FW(tstatus) 512 & AGP_MODE_GET_FW(mstatus) 513 & AGP_MODE_GET_FW(mode)); 514 515 /* Figure out the max rate */ 516 rate = (AGP_MODE_GET_RATE(tstatus) 517 & AGP_MODE_GET_RATE(mstatus) 518 & AGP_MODE_GET_RATE(mode)); 519 if (rate & AGP_MODE_V2_RATE_4x) 520 rate = AGP_MODE_V2_RATE_4x; 521 else if (rate & AGP_MODE_V2_RATE_2x) 522 rate = AGP_MODE_V2_RATE_2x; 523 else 524 rate = AGP_MODE_V2_RATE_1x; 525 526 /* Construct the new mode word and tell the hardware */ 527 command = AGP_MODE_SET_RQ(0, rq); 528 command = AGP_MODE_SET_SBA(command, sba); 529 command = AGP_MODE_SET_FW(command, fw); 530 command = AGP_MODE_SET_RATE(command, rate); 531 command = AGP_MODE_SET_AGP(command, 1); 532 pci_conf_write(sc->as_pc, sc->as_tag, 533 sc->as_capoff + AGP_COMMAND, command); 534 pci_conf_write(pa->pa_pc, pa->pa_tag, capoff + AGP_COMMAND, command); 535 536 return 0; 537 } 538 539 static int 540 agp_generic_enable_v3(struct agp_softc *sc, struct pci_attach_args *pa, 541 int capoff, u_int32_t mode) 542 { 543 pcireg_t tstatus, mstatus; 544 pcireg_t command; 545 int rq, sba, fw, rate, arqsz, cal; 546 547 tstatus = pci_conf_read(sc->as_pc, sc->as_tag, 548 sc->as_capoff + AGP_STATUS); 549 mstatus = pci_conf_read(pa->pa_pc, pa->pa_tag, 550 capoff + AGP_STATUS); 551 552 /* Set RQ to the min of mode, tstatus and mstatus */ 553 rq = AGP_MODE_GET_RQ(mode); 554 if (AGP_MODE_GET_RQ(tstatus) < rq) 555 rq = AGP_MODE_GET_RQ(tstatus); 556 if (AGP_MODE_GET_RQ(mstatus) < rq) 557 rq = AGP_MODE_GET_RQ(mstatus); 558 559 /* 560 * ARQSZ - Set the value to the maximum one. 561 * Don't allow the mode register to override values. 562 */ 563 arqsz = AGP_MODE_GET_ARQSZ(mode); 564 if (AGP_MODE_GET_ARQSZ(tstatus) > arqsz) 565 arqsz = AGP_MODE_GET_ARQSZ(tstatus); 566 if (AGP_MODE_GET_ARQSZ(mstatus) > arqsz) 567 arqsz = AGP_MODE_GET_ARQSZ(mstatus); 568 569 /* Calibration cycle - don't allow override by mode register */ 570 cal = AGP_MODE_GET_CAL(tstatus); 571 if (AGP_MODE_GET_CAL(mstatus) < cal) 572 cal = AGP_MODE_GET_CAL(mstatus); 573 574 /* SBA must be supported for AGP v3. */ 575 sba = 1; 576 577 /* Set FW if all three support it. */ 578 fw = (AGP_MODE_GET_FW(tstatus) 579 & AGP_MODE_GET_FW(mstatus) 580 & AGP_MODE_GET_FW(mode)); 581 582 /* Figure out the max rate */ 583 rate = (AGP_MODE_GET_RATE(tstatus) 584 & AGP_MODE_GET_RATE(mstatus) 585 & AGP_MODE_GET_RATE(mode)); 586 if (rate & AGP_MODE_V3_RATE_8x) 587 rate = AGP_MODE_V3_RATE_8x; 588 else 589 rate = AGP_MODE_V3_RATE_4x; 590 591 /* Construct the new mode word and tell the hardware */ 592 command = AGP_MODE_SET_RQ(0, rq); 593 command = AGP_MODE_SET_ARQSZ(command, arqsz); 594 command = AGP_MODE_SET_CAL(command, cal); 595 command = AGP_MODE_SET_SBA(command, sba); 596 command = AGP_MODE_SET_FW(command, fw); 597 command = AGP_MODE_SET_RATE(command, rate); 598 command = AGP_MODE_SET_AGP(command, 1); 599 pci_conf_write(sc->as_pc, sc->as_tag, 600 sc->as_capoff + AGP_COMMAND, command); 601 pci_conf_write(pa->pa_pc, pa->pa_tag, capoff + AGP_COMMAND, command); 602 603 return 0; 604 } 605 606 struct agp_memory * 607 agp_generic_alloc_memory(struct agp_softc *sc, int type, vsize_t size) 608 { 609 struct agp_memory *mem; 610 611 if ((size & (AGP_PAGE_SIZE - 1)) != 0) 612 return 0; 613 614 if (sc->as_allocated + size > sc->as_maxmem) 615 return 0; 616 617 if (type != 0) { 618 printf("agp_generic_alloc_memory: unsupported type %d\n", 619 type); 620 return 0; 621 } 622 623 mem = malloc(sizeof *mem, M_AGP, M_WAITOK); 624 if (mem == NULL) 625 return NULL; 626 627 if (bus_dmamap_create(sc->as_dmat, size, size / PAGE_SIZE + 1, 628 size, 0, BUS_DMA_NOWAIT, &mem->am_dmamap) != 0) { 629 free(mem, M_AGP); 630 return NULL; 631 } 632 633 mem->am_id = sc->as_nextid++; 634 mem->am_size = size; 635 mem->am_type = 0; 636 mem->am_physical = 0; 637 mem->am_offset = 0; 638 mem->am_is_bound = 0; 639 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link); 640 sc->as_allocated += size; 641 642 return mem; 643 } 644 645 int 646 agp_generic_free_memory(struct agp_softc *sc, struct agp_memory *mem) 647 { 648 if (mem->am_is_bound) 649 return EBUSY; 650 651 sc->as_allocated -= mem->am_size; 652 TAILQ_REMOVE(&sc->as_memory, mem, am_link); 653 bus_dmamap_destroy(sc->as_dmat, mem->am_dmamap); 654 free(mem, M_AGP); 655 return 0; 656 } 657 658 int 659 agp_generic_bind_memory(struct agp_softc *sc, struct agp_memory *mem, 660 off_t offset) 661 { 662 off_t i, k; 663 bus_size_t done, j; 664 int error; 665 bus_dma_segment_t *segs, *seg; 666 bus_addr_t pa; 667 int contigpages, nseg; 668 669 mutex_enter(&sc->as_mtx); 670 671 if (mem->am_is_bound) { 672 aprint_error_dev(sc->as_dev, "memory already bound\n"); 673 mutex_exit(&sc->as_mtx); 674 return EINVAL; 675 } 676 677 if (offset < 0 678 || (offset & (AGP_PAGE_SIZE - 1)) != 0 679 || offset + mem->am_size > AGP_GET_APERTURE(sc)) { 680 aprint_error_dev(sc->as_dev, 681 "binding memory at bad offset %#lx\n", 682 (unsigned long) offset); 683 mutex_exit(&sc->as_mtx); 684 return EINVAL; 685 } 686 687 /* 688 * XXXfvdl 689 * The memory here needs to be directly accessable from the 690 * AGP video card, so it should be allocated using bus_dma. 691 * However, it need not be contiguous, since individual pages 692 * are translated using the GATT. 693 * 694 * Using a large chunk of contiguous memory may get in the way 695 * of other subsystems that may need one, so we try to be friendly 696 * and ask for allocation in chunks of a minimum of 8 pages 697 * of contiguous memory on average, falling back to 4, 2 and 1 698 * if really needed. Larger chunks are preferred, since allocating 699 * a bus_dma_segment per page would be overkill. 700 */ 701 702 for (contigpages = 8; contigpages > 0; contigpages >>= 1) { 703 nseg = (mem->am_size / (contigpages * PAGE_SIZE)) + 1; 704 segs = malloc(nseg * sizeof *segs, M_AGP, M_WAITOK); 705 if (segs == NULL) { 706 mutex_exit(&sc->as_mtx); 707 return ENOMEM; 708 } 709 if (bus_dmamem_alloc(sc->as_dmat, mem->am_size, PAGE_SIZE, 0, 710 segs, nseg, &mem->am_nseg, 711 contigpages > 1 ? 712 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) != 0) { 713 free(segs, M_AGP); 714 continue; 715 } 716 if (bus_dmamem_map(sc->as_dmat, segs, mem->am_nseg, 717 mem->am_size, &mem->am_virtual, BUS_DMA_WAITOK) != 0) { 718 bus_dmamem_free(sc->as_dmat, segs, mem->am_nseg); 719 free(segs, M_AGP); 720 continue; 721 } 722 if (bus_dmamap_load(sc->as_dmat, mem->am_dmamap, 723 mem->am_virtual, mem->am_size, NULL, BUS_DMA_WAITOK) != 0) { 724 bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, 725 mem->am_size); 726 bus_dmamem_free(sc->as_dmat, segs, mem->am_nseg); 727 free(segs, M_AGP); 728 continue; 729 } 730 mem->am_dmaseg = segs; 731 break; 732 } 733 734 if (contigpages == 0) { 735 mutex_exit(&sc->as_mtx); 736 return ENOMEM; 737 } 738 739 740 /* 741 * Bind the individual pages and flush the chipset's 742 * TLB. 743 */ 744 done = 0; 745 for (i = 0; i < mem->am_dmamap->dm_nsegs; i++) { 746 seg = &mem->am_dmamap->dm_segs[i]; 747 /* 748 * Install entries in the GATT, making sure that if 749 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not 750 * aligned to PAGE_SIZE, we don't modify too many GATT 751 * entries. 752 */ 753 for (j = 0; j < seg->ds_len && (done + j) < mem->am_size; 754 j += AGP_PAGE_SIZE) { 755 pa = seg->ds_addr + j; 756 AGP_DPF(("binding offset %#lx to pa %#lx\n", 757 (unsigned long)(offset + done + j), 758 (unsigned long)pa)); 759 error = AGP_BIND_PAGE(sc, offset + done + j, pa); 760 if (error) { 761 /* 762 * Bail out. Reverse all the mappings 763 * and unwire the pages. 764 */ 765 for (k = 0; k < done + j; k += AGP_PAGE_SIZE) 766 AGP_UNBIND_PAGE(sc, offset + k); 767 768 bus_dmamap_unload(sc->as_dmat, mem->am_dmamap); 769 bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, 770 mem->am_size); 771 bus_dmamem_free(sc->as_dmat, mem->am_dmaseg, 772 mem->am_nseg); 773 free(mem->am_dmaseg, M_AGP); 774 mutex_exit(&sc->as_mtx); 775 return error; 776 } 777 } 778 done += seg->ds_len; 779 } 780 781 /* 782 * Flush the CPU cache since we are providing a new mapping 783 * for these pages. 784 */ 785 agp_flush_cache(); 786 787 /* 788 * Make sure the chipset gets the new mappings. 789 */ 790 AGP_FLUSH_TLB(sc); 791 792 mem->am_offset = offset; 793 mem->am_is_bound = 1; 794 795 mutex_exit(&sc->as_mtx); 796 797 return 0; 798 } 799 800 int 801 agp_generic_unbind_memory(struct agp_softc *sc, struct agp_memory *mem) 802 { 803 int i; 804 805 mutex_enter(&sc->as_mtx); 806 807 if (!mem->am_is_bound) { 808 aprint_error_dev(sc->as_dev, "memory is not bound\n"); 809 mutex_exit(&sc->as_mtx); 810 return EINVAL; 811 } 812 813 814 /* 815 * Unbind the individual pages and flush the chipset's 816 * TLB. Unwire the pages so they can be swapped. 817 */ 818 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE) 819 AGP_UNBIND_PAGE(sc, mem->am_offset + i); 820 821 agp_flush_cache(); 822 AGP_FLUSH_TLB(sc); 823 824 bus_dmamap_unload(sc->as_dmat, mem->am_dmamap); 825 bus_dmamem_unmap(sc->as_dmat, mem->am_virtual, mem->am_size); 826 bus_dmamem_free(sc->as_dmat, mem->am_dmaseg, mem->am_nseg); 827 828 free(mem->am_dmaseg, M_AGP); 829 830 mem->am_offset = 0; 831 mem->am_is_bound = 0; 832 833 mutex_exit(&sc->as_mtx); 834 835 return 0; 836 } 837 838 /* Helper functions for implementing user/kernel api */ 839 840 static int 841 agp_acquire_helper(struct agp_softc *sc, enum agp_acquire_state state) 842 { 843 if (sc->as_state != AGP_ACQUIRE_FREE) 844 return EBUSY; 845 sc->as_state = state; 846 847 return 0; 848 } 849 850 static int 851 agp_release_helper(struct agp_softc *sc, enum agp_acquire_state state) 852 { 853 854 if (sc->as_state == AGP_ACQUIRE_FREE) 855 return 0; 856 857 if (sc->as_state != state) 858 return EBUSY; 859 860 sc->as_state = AGP_ACQUIRE_FREE; 861 return 0; 862 } 863 864 static struct agp_memory * 865 agp_find_memory(struct agp_softc *sc, int id) 866 { 867 struct agp_memory *mem; 868 869 AGP_DPF(("searching for memory block %d\n", id)); 870 TAILQ_FOREACH(mem, &sc->as_memory, am_link) { 871 AGP_DPF(("considering memory block %d\n", mem->am_id)); 872 if (mem->am_id == id) 873 return mem; 874 } 875 return 0; 876 } 877 878 /* Implementation of the userland ioctl api */ 879 880 static int 881 agp_info_user(struct agp_softc *sc, agp_info *info) 882 { 883 memset(info, 0, sizeof *info); 884 info->bridge_id = sc->as_id; 885 if (sc->as_capoff != 0) 886 info->agp_mode = pci_conf_read(sc->as_pc, sc->as_tag, 887 sc->as_capoff + AGP_STATUS); 888 else 889 info->agp_mode = 0; /* i810 doesn't have real AGP */ 890 info->aper_base = sc->as_apaddr; 891 info->aper_size = AGP_GET_APERTURE(sc) >> 20; 892 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT; 893 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT; 894 895 return 0; 896 } 897 898 static int 899 agp_setup_user(struct agp_softc *sc, agp_setup *setup) 900 { 901 return AGP_ENABLE(sc, setup->agp_mode); 902 } 903 904 static int 905 agp_allocate_user(struct agp_softc *sc, agp_allocate *alloc) 906 { 907 struct agp_memory *mem; 908 909 mem = AGP_ALLOC_MEMORY(sc, 910 alloc->type, 911 alloc->pg_count << AGP_PAGE_SHIFT); 912 if (mem) { 913 alloc->key = mem->am_id; 914 alloc->physical = mem->am_physical; 915 return 0; 916 } else { 917 return ENOMEM; 918 } 919 } 920 921 static int 922 agp_deallocate_user(struct agp_softc *sc, int id) 923 { 924 struct agp_memory *mem = agp_find_memory(sc, id); 925 926 if (mem) { 927 AGP_FREE_MEMORY(sc, mem); 928 return 0; 929 } else { 930 return ENOENT; 931 } 932 } 933 934 static int 935 agp_bind_user(struct agp_softc *sc, agp_bind *bind) 936 { 937 struct agp_memory *mem = agp_find_memory(sc, bind->key); 938 939 if (!mem) 940 return ENOENT; 941 942 return AGP_BIND_MEMORY(sc, mem, bind->pg_start << AGP_PAGE_SHIFT); 943 } 944 945 static int 946 agp_unbind_user(struct agp_softc *sc, agp_unbind *unbind) 947 { 948 struct agp_memory *mem = agp_find_memory(sc, unbind->key); 949 950 if (!mem) 951 return ENOENT; 952 953 return AGP_UNBIND_MEMORY(sc, mem); 954 } 955 956 static int 957 agpopen(dev_t dev, int oflags, int devtype, struct lwp *l) 958 { 959 struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev)); 960 961 if (sc == NULL) 962 return ENXIO; 963 964 if (sc->as_chipc == NULL) 965 return ENXIO; 966 967 if (!sc->as_isopen) 968 sc->as_isopen = 1; 969 else 970 return EBUSY; 971 972 return 0; 973 } 974 975 static int 976 agpclose(dev_t dev, int fflag, int devtype, struct lwp *l) 977 { 978 struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev)); 979 struct agp_memory *mem; 980 981 if (sc == NULL) 982 return ENODEV; 983 984 /* 985 * Clear the GATT and force release on last close 986 */ 987 if (sc->as_state == AGP_ACQUIRE_USER) { 988 while ((mem = TAILQ_FIRST(&sc->as_memory))) { 989 if (mem->am_is_bound) { 990 printf("agpclose: mem %d is bound\n", 991 mem->am_id); 992 AGP_UNBIND_MEMORY(sc, mem); 993 } 994 /* 995 * XXX it is not documented, but if the protocol allows 996 * allocate->acquire->bind, it would be possible that 997 * memory ranges are allocated by the kernel here, 998 * which we shouldn't free. We'd have to keep track of 999 * the memory range's owner. 1000 * The kernel API is unsed yet, so we get away with 1001 * freeing all. 1002 */ 1003 AGP_FREE_MEMORY(sc, mem); 1004 } 1005 agp_release_helper(sc, AGP_ACQUIRE_USER); 1006 } 1007 sc->as_isopen = 0; 1008 1009 return 0; 1010 } 1011 1012 static int 1013 agpioctl(dev_t dev, u_long cmd, void *data, int fflag, struct lwp *l) 1014 { 1015 struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev)); 1016 1017 if (sc == NULL) 1018 return ENODEV; 1019 1020 if ((fflag & FWRITE) == 0 && cmd != AGPIOC_INFO) 1021 return EPERM; 1022 1023 switch (cmd) { 1024 case AGPIOC_INFO: 1025 return agp_info_user(sc, (agp_info *) data); 1026 1027 case AGPIOC_ACQUIRE: 1028 return agp_acquire_helper(sc, AGP_ACQUIRE_USER); 1029 1030 case AGPIOC_RELEASE: 1031 return agp_release_helper(sc, AGP_ACQUIRE_USER); 1032 1033 case AGPIOC_SETUP: 1034 return agp_setup_user(sc, (agp_setup *)data); 1035 1036 #ifdef __x86_64__ 1037 { 1038 /* 1039 * Handle paddr_t change from 32 bit for non PAE kernels 1040 * to 64 bit. 1041 */ 1042 #define AGPIOC_OALLOCATE _IOWR(AGPIOC_BASE, 6, agp_oallocate) 1043 1044 typedef struct _agp_oallocate { 1045 int key; /* tag of allocation */ 1046 size_t pg_count; /* number of pages */ 1047 uint32_t type; /* 0 == normal, other devspec */ 1048 u_long physical; /* device specific (some devices 1049 * need a phys address of the 1050 * actual page behind the gatt 1051 * table) */ 1052 } agp_oallocate; 1053 1054 case AGPIOC_OALLOCATE: { 1055 int ret; 1056 agp_allocate aga; 1057 agp_oallocate *oaga = data; 1058 1059 aga.type = oaga->type; 1060 aga.pg_count = oaga->pg_count; 1061 1062 if ((ret = agp_allocate_user(sc, &aga)) == 0) { 1063 oaga->key = aga.key; 1064 oaga->physical = (u_long)aga.physical; 1065 } 1066 1067 return ret; 1068 } 1069 } 1070 #endif 1071 case AGPIOC_ALLOCATE: 1072 return agp_allocate_user(sc, (agp_allocate *)data); 1073 1074 case AGPIOC_DEALLOCATE: 1075 return agp_deallocate_user(sc, *(int *) data); 1076 1077 case AGPIOC_BIND: 1078 return agp_bind_user(sc, (agp_bind *)data); 1079 1080 case AGPIOC_UNBIND: 1081 return agp_unbind_user(sc, (agp_unbind *)data); 1082 1083 } 1084 1085 return EINVAL; 1086 } 1087 1088 static paddr_t 1089 agpmmap(dev_t dev, off_t offset, int prot) 1090 { 1091 struct agp_softc *sc = device_lookup_private(&agp_cd, AGPUNIT(dev)); 1092 1093 if (sc == NULL) 1094 return ENODEV; 1095 1096 if (offset > AGP_GET_APERTURE(sc)) 1097 return -1; 1098 1099 return (bus_space_mmap(sc->as_apt, sc->as_apaddr, offset, prot, 1100 BUS_SPACE_MAP_LINEAR)); 1101 } 1102 1103 const struct cdevsw agp_cdevsw = { 1104 agpopen, agpclose, noread, nowrite, agpioctl, 1105 nostop, notty, nopoll, agpmmap, nokqfilter, D_OTHER 1106 }; 1107 1108 /* Implementation of the kernel api */ 1109 1110 void * 1111 agp_find_device(int unit) 1112 { 1113 return device_lookup_private(&agp_cd, unit); 1114 } 1115 1116 enum agp_acquire_state 1117 agp_state(void *devcookie) 1118 { 1119 struct agp_softc *sc = devcookie; 1120 1121 return sc->as_state; 1122 } 1123 1124 void 1125 agp_get_info(void *devcookie, struct agp_info *info) 1126 { 1127 struct agp_softc *sc = devcookie; 1128 1129 info->ai_mode = pci_conf_read(sc->as_pc, sc->as_tag, 1130 sc->as_capoff + AGP_STATUS); 1131 info->ai_aperture_base = sc->as_apaddr; 1132 info->ai_aperture_size = sc->as_apsize; /* XXXfvdl inconsistent */ 1133 info->ai_memory_allowed = sc->as_maxmem; 1134 info->ai_memory_used = sc->as_allocated; 1135 } 1136 1137 int 1138 agp_acquire(void *dev) 1139 { 1140 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL); 1141 } 1142 1143 int 1144 agp_release(void *dev) 1145 { 1146 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL); 1147 } 1148 1149 int 1150 agp_enable(void *dev, u_int32_t mode) 1151 { 1152 struct agp_softc *sc = dev; 1153 1154 return AGP_ENABLE(sc, mode); 1155 } 1156 1157 void * 1158 agp_alloc_memory(void *dev, int type, vsize_t bytes) 1159 { 1160 struct agp_softc *sc = dev; 1161 1162 return (void *)AGP_ALLOC_MEMORY(sc, type, bytes); 1163 } 1164 1165 void 1166 agp_free_memory(void *dev, void *handle) 1167 { 1168 struct agp_softc *sc = dev; 1169 struct agp_memory *mem = handle; 1170 1171 AGP_FREE_MEMORY(sc, mem); 1172 } 1173 1174 int 1175 agp_bind_memory(void *dev, void *handle, off_t offset) 1176 { 1177 struct agp_softc *sc = dev; 1178 struct agp_memory *mem = handle; 1179 1180 return AGP_BIND_MEMORY(sc, mem, offset); 1181 } 1182 1183 int 1184 agp_unbind_memory(void *dev, void *handle) 1185 { 1186 struct agp_softc *sc = dev; 1187 struct agp_memory *mem = handle; 1188 1189 return AGP_UNBIND_MEMORY(sc, mem); 1190 } 1191 1192 void 1193 agp_memory_info(void *dev, void *handle, struct agp_memory_info *mi) 1194 { 1195 struct agp_memory *mem = handle; 1196 1197 mi->ami_size = mem->am_size; 1198 mi->ami_physical = mem->am_physical; 1199 mi->ami_offset = mem->am_offset; 1200 mi->ami_is_bound = mem->am_is_bound; 1201 } 1202 1203 int 1204 agp_alloc_dmamem(bus_dma_tag_t tag, size_t size, int flags, 1205 bus_dmamap_t *mapp, void **vaddr, bus_addr_t *baddr, 1206 bus_dma_segment_t *seg, int nseg, int *rseg) 1207 1208 { 1209 int error, level = 0; 1210 1211 if ((error = bus_dmamem_alloc(tag, size, PAGE_SIZE, 0, 1212 seg, nseg, rseg, BUS_DMA_NOWAIT)) != 0) 1213 goto out; 1214 level++; 1215 1216 if ((error = bus_dmamem_map(tag, seg, *rseg, size, vaddr, 1217 BUS_DMA_NOWAIT | flags)) != 0) 1218 goto out; 1219 level++; 1220 1221 if ((error = bus_dmamap_create(tag, size, *rseg, size, 0, 1222 BUS_DMA_NOWAIT, mapp)) != 0) 1223 goto out; 1224 level++; 1225 1226 if ((error = bus_dmamap_load(tag, *mapp, *vaddr, size, NULL, 1227 BUS_DMA_NOWAIT)) != 0) 1228 goto out; 1229 1230 *baddr = (*mapp)->dm_segs[0].ds_addr; 1231 1232 return 0; 1233 out: 1234 switch (level) { 1235 case 3: 1236 bus_dmamap_destroy(tag, *mapp); 1237 /* FALLTHROUGH */ 1238 case 2: 1239 bus_dmamem_unmap(tag, *vaddr, size); 1240 /* FALLTHROUGH */ 1241 case 1: 1242 bus_dmamem_free(tag, seg, *rseg); 1243 break; 1244 default: 1245 break; 1246 } 1247 1248 return error; 1249 } 1250 1251 void 1252 agp_free_dmamem(bus_dma_tag_t tag, size_t size, bus_dmamap_t map, 1253 void *vaddr, bus_dma_segment_t *seg, int nseg) 1254 { 1255 bus_dmamap_unload(tag, map); 1256 bus_dmamap_destroy(tag, map); 1257 bus_dmamem_unmap(tag, vaddr, size); 1258 bus_dmamem_free(tag, seg, nseg); 1259 } 1260 1261 static bool 1262 agp_resume(device_t dv, const pmf_qual_t *qual) 1263 { 1264 agp_flush_cache(); 1265 1266 return true; 1267 } 1268