1 /* $NetBSD: twe.c,v 1.27 2002/08/06 20:47:44 kim Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /*- 40 * Copyright (c) 2000 Michael Smith 41 * Copyright (c) 2000 BSDi 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp 66 */ 67 68 /* 69 * Driver for the 3ware Escalade family of RAID controllers. 70 */ 71 72 #include <sys/cdefs.h> 73 __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.27 2002/08/06 20:47:44 kim Exp $"); 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/kernel.h> 78 #include <sys/device.h> 79 #include <sys/queue.h> 80 #include <sys/proc.h> 81 #include <sys/buf.h> 82 #include <sys/endian.h> 83 #include <sys/malloc.h> 84 #include <sys/disk.h> 85 86 #include <uvm/uvm_extern.h> 87 88 #include <machine/bswap.h> 89 #include <machine/bus.h> 90 91 #include <dev/pci/pcireg.h> 92 #include <dev/pci/pcivar.h> 93 #include <dev/pci/pcidevs.h> 94 #include <dev/pci/twereg.h> 95 #include <dev/pci/twevar.h> 96 97 #define PCI_CBIO 0x10 98 99 static void twe_aen_handler(struct twe_ccb *, int); 100 static void twe_attach(struct device *, struct device *, void *); 101 static int twe_init_connection(struct twe_softc *); 102 static int twe_intr(void *); 103 static int twe_match(struct device *, struct cfdata *, void *); 104 static int twe_param_get(struct twe_softc *, int, int, size_t, 105 void (*)(struct twe_ccb *, int), void **); 106 static void twe_poll(struct twe_softc *); 107 static int twe_print(void *, const char *); 108 static int twe_reset(struct twe_softc *); 109 static int twe_submatch(struct device *, struct cfdata *, void *); 110 static int twe_status_check(struct twe_softc *, u_int); 111 static int twe_status_wait(struct twe_softc *, u_int, int); 112 113 static inline u_int32_t twe_inl(struct twe_softc *, int); 114 static inline void twe_outl(struct twe_softc *, int, u_int32_t); 115 116 struct cfattach twe_ca = { 117 sizeof(struct twe_softc), twe_match, twe_attach 118 }; 119 120 struct { 121 const u_int aen; /* High byte indicates type of message */ 122 const char *desc; 123 } static const twe_aen_names[] = { 124 { 0x0000, "queue empty" }, 125 { 0x0001, "soft reset" }, 126 { 0x0102, "degraded mirror" }, 127 { 0x0003, "controller error" }, 128 { 0x0104, "rebuild fail" }, 129 { 0x0105, "rebuild done" }, 130 { 0x0106, "incompatible unit" }, 131 { 0x0107, "initialisation done" }, 132 { 0x0108, "unclean shutdown detected" }, 133 { 0x0109, "drive timeout" }, 134 { 0x010a, "drive error" }, 135 { 0x010b, "rebuild started" }, 136 { 0x010c, "init started" }, 137 { 0x010d, "logical unit deleted" }, 138 { 0x020f, "SMART threshold exceeded" }, 139 { 0x0015, "table undefined" }, /* XXX: Not in FreeBSD's table */ 140 { 0x0221, "ATA UDMA downgrade" }, 141 { 0x0222, "ATA UDMA upgrade" }, 142 { 0x0222, "ATA UDMA upgrade" }, 143 { 0x0223, "Sector repair occurred" }, 144 { 0x0024, "SBUF integrity check failure" }, 145 { 0x0225, "lost cached write" }, 146 { 0x0226, "drive ECC error detected" }, 147 { 0x0227, "DCB checksum error" }, 148 { 0x0228, "DCB unsupported version" }, 149 { 0x0129, "verify started" }, 150 { 0x012a, "verify failed" }, 151 { 0x012b, "verify complete" }, 152 { 0x022c, "overwrote bad sector during rebuild" }, 153 { 0x022d, "encountered bad sector during rebuild" }, 154 { 0x00ff, "aen queue full" }, 155 }; 156 157 /* 158 * The high byte of the message above determines the format, 159 * currently we know about format 0 (no unit/port specific) 160 * format 1 (unit specific message), and format 2 (port specific message). 161 */ 162 static const char *aenfmt[] = { 163 "", /* No message */ 164 "unit %d: ", /* Unit message */ 165 "port %d: " /* Port message */ 166 }; 167 168 169 static inline u_int32_t 170 twe_inl(struct twe_softc *sc, int off) 171 { 172 173 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 174 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 175 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off)); 176 } 177 178 static inline void 179 twe_outl(struct twe_softc *sc, int off, u_int32_t val) 180 { 181 182 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 183 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 184 BUS_SPACE_BARRIER_WRITE); 185 } 186 187 /* 188 * Match a supported board. 189 */ 190 static int 191 twe_match(struct device *parent, struct cfdata *cfdata, void *aux) 192 { 193 struct pci_attach_args *pa; 194 195 pa = aux; 196 197 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE && 198 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE || 199 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC)); 200 } 201 202 /* 203 * Attach a supported board. 204 * 205 * XXX This doesn't fail gracefully. 206 */ 207 static void 208 twe_attach(struct device *parent, struct device *self, void *aux) 209 { 210 struct pci_attach_args *pa; 211 struct twe_softc *sc; 212 pci_chipset_tag_t pc; 213 pci_intr_handle_t ih; 214 pcireg_t csr; 215 const char *intrstr; 216 int size, i, rv, rseg; 217 size_t max_segs, max_xfer; 218 struct twe_param *dtp, *ctp; 219 bus_dma_segment_t seg; 220 struct twe_cmd *tc; 221 struct twe_attach_args twea; 222 struct twe_ccb *ccb; 223 224 sc = (struct twe_softc *)self; 225 pa = aux; 226 pc = pa->pa_pc; 227 sc->sc_dmat = pa->pa_dmat; 228 SIMPLEQ_INIT(&sc->sc_ccb_queue); 229 SLIST_INIT(&sc->sc_ccb_freelist); 230 231 printf(": 3ware Escalade\n"); 232 233 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, 234 &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) { 235 printf("%s: can't map i/o space\n", sc->sc_dv.dv_xname); 236 return; 237 } 238 239 /* Enable the device. */ 240 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 241 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 242 csr | PCI_COMMAND_MASTER_ENABLE); 243 244 /* Map and establish the interrupt. */ 245 if (pci_intr_map(pa, &ih)) { 246 printf("%s: can't map interrupt\n", sc->sc_dv.dv_xname); 247 return; 248 } 249 intrstr = pci_intr_string(pc, ih); 250 sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc); 251 if (sc->sc_ih == NULL) { 252 printf("%s: can't establish interrupt", sc->sc_dv.dv_xname); 253 if (intrstr != NULL) 254 printf(" at %s", intrstr); 255 printf("\n"); 256 return; 257 } 258 if (intrstr != NULL) 259 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr); 260 261 /* 262 * Allocate and initialise the command blocks and CCBs. 263 */ 264 size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT; 265 266 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1, 267 &rseg, BUS_DMA_NOWAIT)) != 0) { 268 printf("%s: unable to allocate commands, rv = %d\n", 269 sc->sc_dv.dv_xname, rv); 270 return; 271 } 272 273 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 274 (caddr_t *)&sc->sc_cmds, 275 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 276 printf("%s: unable to map commands, rv = %d\n", 277 sc->sc_dv.dv_xname, rv); 278 return; 279 } 280 281 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0, 282 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 283 printf("%s: unable to create command DMA map, rv = %d\n", 284 sc->sc_dv.dv_xname, rv); 285 return; 286 } 287 288 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds, 289 size, NULL, BUS_DMA_NOWAIT)) != 0) { 290 printf("%s: unable to load command DMA map, rv = %d\n", 291 sc->sc_dv.dv_xname, rv); 292 return; 293 } 294 295 sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr; 296 memset(sc->sc_cmds, 0, size); 297 298 ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_NOWAIT); 299 sc->sc_ccbs = ccb; 300 tc = (struct twe_cmd *)sc->sc_cmds; 301 max_segs = twe_get_maxsegs(); 302 max_xfer = twe_get_maxxfer(max_segs); 303 304 for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) { 305 ccb->ccb_cmd = tc; 306 ccb->ccb_cmdid = i; 307 ccb->ccb_flags = 0; 308 rv = bus_dmamap_create(sc->sc_dmat, max_xfer, 309 max_segs, PAGE_SIZE, 0, 310 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 311 &ccb->ccb_dmamap_xfer); 312 if (rv != 0) { 313 printf("%s: can't create dmamap, rv = %d\n", 314 sc->sc_dv.dv_xname, rv); 315 return; 316 } 317 /* Save one CCB for parameter retrieval. */ 318 if (i != 0) 319 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, 320 ccb_chain.slist); 321 } 322 323 /* Wait for the controller to become ready. */ 324 if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) { 325 printf("%s: microcontroller not ready\n", sc->sc_dv.dv_xname); 326 return; 327 } 328 329 twe_outl(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS); 330 331 /* Reset the controller. */ 332 if (twe_reset(sc)) { 333 printf("%s: reset failed\n", sc->sc_dv.dv_xname); 334 return; 335 } 336 337 /* Find attached units. */ 338 rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY, 339 TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, (void **)&dtp); 340 if (rv != 0) { 341 printf("%s: can't detect attached units (%d)\n", 342 sc->sc_dv.dv_xname, rv); 343 return; 344 } 345 346 /* For each detected unit, collect size and store in an array. */ 347 for (i = 0, sc->sc_nunits = 0; i < TWE_MAX_UNITS; i++) { 348 /* Unit present? */ 349 if ((dtp->tp_data[i] & TWE_PARAM_UNITSTATUS_Online) == 0) { 350 sc->sc_dsize[i] = 0; 351 continue; 352 } 353 354 rv = twe_param_get(sc, TWE_PARAM_UNITINFO + i, 355 TWE_PARAM_UNITINFO_Capacity, 4, NULL, (void **)&ctp); 356 if (rv != 0) { 357 printf("%s: error %d fetching capacity for unit %d\n", 358 sc->sc_dv.dv_xname, rv, i); 359 continue; 360 } 361 362 sc->sc_dsize[i] = le32toh(*(u_int32_t *)ctp->tp_data); 363 free(ctp, M_DEVBUF); 364 sc->sc_nunits++; 365 } 366 free(dtp, M_DEVBUF); 367 368 /* Initialise connection with controller and enable interrupts. */ 369 twe_init_connection(sc); 370 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR | 371 TWE_CTL_UNMASK_RESP_INTR | 372 TWE_CTL_ENABLE_INTRS); 373 374 /* Attach sub-devices. */ 375 for (i = 0; i < TWE_MAX_UNITS; i++) { 376 if (sc->sc_dsize[i] == 0) 377 continue; 378 twea.twea_unit = i; 379 config_found_sm(&sc->sc_dv, &twea, twe_print, twe_submatch); 380 } 381 } 382 383 /* 384 * Reset the controller. Currently only useful at attach time; must be 385 * called with interrupts blocked. 386 */ 387 static int 388 twe_reset(struct twe_softc *sc) 389 { 390 struct twe_param *tp; 391 u_int aen, status; 392 volatile u_int32_t junk; 393 int got, rv; 394 395 /* Issue a soft reset. */ 396 twe_outl(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET | 397 TWE_CTL_CLEAR_HOST_INTR | 398 TWE_CTL_CLEAR_ATTN_INTR | 399 TWE_CTL_MASK_CMD_INTR | 400 TWE_CTL_MASK_RESP_INTR | 401 TWE_CTL_CLEAR_ERROR_STS | 402 TWE_CTL_DISABLE_INTRS); 403 404 if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 15)) { 405 printf("%s: no attention interrupt\n", 406 sc->sc_dv.dv_xname); 407 return (-1); 408 } 409 410 /* Pull AENs out of the controller; look for a soft reset AEN. */ 411 for (got = 0;;) { 412 rv = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 413 2, NULL, (void **)&tp); 414 if (rv != 0) 415 printf("%s: error %d while draining response queue\n", 416 sc->sc_dv.dv_xname, rv); 417 aen = TWE_AEN_CODE(le16toh(*(u_int16_t *)tp->tp_data)); 418 free(tp, M_DEVBUF); 419 if (aen == TWE_AEN_QUEUE_EMPTY) 420 break; 421 if (aen == TWE_AEN_SOFT_RESET) 422 got = 1; 423 } 424 if (!got) { 425 printf("%s: reset not reported\n", sc->sc_dv.dv_xname); 426 return (-1); 427 } 428 429 /* Check controller status. */ 430 status = twe_inl(sc, TWE_REG_STS); 431 if (twe_status_check(sc, status)) { 432 printf("%s: controller errors detected\n", 433 sc->sc_dv.dv_xname); 434 return (-1); 435 } 436 437 /* Drain the response queue. */ 438 for (;;) { 439 status = twe_inl(sc, TWE_REG_STS); 440 if (twe_status_check(sc, status) != 0) { 441 printf("%s: can't drain response queue\n", 442 sc->sc_dv.dv_xname); 443 return (-1); 444 } 445 if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0) 446 break; 447 junk = twe_inl(sc, TWE_REG_RESP_QUEUE); 448 } 449 450 return (0); 451 } 452 453 /* 454 * Print autoconfiguration message for a sub-device. 455 */ 456 static int 457 twe_print(void *aux, const char *pnp) 458 { 459 struct twe_attach_args *twea; 460 461 twea = aux; 462 463 if (pnp != NULL) 464 printf("block device at %s", pnp); 465 printf(" unit %d", twea->twea_unit); 466 return (UNCONF); 467 } 468 469 /* 470 * Match a sub-device. 471 */ 472 static int 473 twe_submatch(struct device *parent, struct cfdata *cf, void *aux) 474 { 475 struct twe_attach_args *twea; 476 477 twea = aux; 478 479 if (cf->tweacf_unit != TWECF_UNIT_DEFAULT && 480 cf->tweacf_unit != twea->twea_unit) 481 return (0); 482 483 return ((*cf->cf_attach->ca_match)(parent, cf, aux)); 484 } 485 486 /* 487 * Interrupt service routine. 488 */ 489 static int 490 twe_intr(void *arg) 491 { 492 struct twe_softc *sc; 493 u_int status; 494 int caught, rv; 495 496 sc = arg; 497 caught = 0; 498 status = twe_inl(sc, TWE_REG_STS); 499 twe_status_check(sc, status); 500 501 /* Host interrupts - purpose unknown. */ 502 if ((status & TWE_STS_HOST_INTR) != 0) { 503 #ifdef DIAGNOSTIC 504 printf("%s: host interrupt\n", sc->sc_dv.dv_xname); 505 #endif 506 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR); 507 caught = 1; 508 } 509 510 /* 511 * Attention interrupts, signalled when a controller or child device 512 * state change has occurred. 513 */ 514 if ((status & TWE_STS_ATTN_INTR) != 0) { 515 if ((sc->sc_flags & TWEF_AEN) == 0) { 516 rv = twe_param_get(sc, TWE_PARAM_AEN, 517 TWE_PARAM_AEN_UnitCode, 2, twe_aen_handler, 518 NULL); 519 if (rv != 0) { 520 printf("%s: unable to retrieve AEN (%d)\n", 521 sc->sc_dv.dv_xname, rv); 522 twe_outl(sc, TWE_REG_CTL, 523 TWE_CTL_CLEAR_ATTN_INTR); 524 } else 525 sc->sc_flags |= TWEF_AEN; 526 } 527 caught = 1; 528 } 529 530 /* 531 * Command interrupts, signalled when the controller can accept more 532 * commands. We don't use this; instead, we try to submit commands 533 * when we receive them, and when other commands have completed. 534 * Mask it so we don't get another one. 535 */ 536 if ((status & TWE_STS_CMD_INTR) != 0) { 537 #ifdef DIAGNOSTIC 538 printf("%s: command interrupt\n", sc->sc_dv.dv_xname); 539 #endif 540 twe_outl(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR); 541 caught = 1; 542 } 543 544 if ((status & TWE_STS_RESP_INTR) != 0) { 545 twe_poll(sc); 546 caught = 1; 547 } 548 549 return (caught); 550 } 551 552 /* 553 * Handle an AEN returned by the controller. 554 */ 555 static void 556 twe_aen_handler(struct twe_ccb *ccb, int error) 557 { 558 struct twe_softc *sc; 559 struct twe_param *tp; 560 const char *str; 561 u_int aen; 562 int i, hu, rv; 563 564 sc = (struct twe_softc *)ccb->ccb_tx.tx_dv; 565 tp = ccb->ccb_tx.tx_context; 566 twe_ccb_unmap(sc, ccb); 567 568 if (error) { 569 printf("%s: error retrieving AEN\n", sc->sc_dv.dv_xname); 570 aen = TWE_AEN_QUEUE_EMPTY; 571 } else 572 aen = le16toh(*(u_int16_t *)tp->tp_data); 573 free(tp, M_DEVBUF); 574 twe_ccb_free(sc, ccb); 575 576 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) { 577 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); 578 sc->sc_flags &= ~TWEF_AEN; 579 return; 580 } 581 582 str = "<unknown>"; 583 i = 0; 584 hu = 0; 585 586 while (i < sizeof(twe_aen_names) / sizeof(twe_aen_names[0])) { 587 if (TWE_AEN_CODE(twe_aen_names[i].aen) == TWE_AEN_CODE(aen)) { 588 str = twe_aen_names[i].desc; 589 hu = TWE_AEN_UNIT(twe_aen_names[i].aen); 590 break; 591 } 592 i++; 593 } 594 printf("%s: ", sc->sc_dv.dv_xname); 595 printf(aenfmt[hu], TWE_AEN_UNIT(aen)); 596 printf("AEN 0x%04x (%s) received\n", TWE_AEN_CODE(aen), str); 597 598 /* 599 * Chain another retrieval in case interrupts have been 600 * coalesced. 601 */ 602 rv = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 2, 603 twe_aen_handler, NULL); 604 if (rv != 0) 605 printf("%s: unable to retrieve AEN (%d)\n", 606 sc->sc_dv.dv_xname, rv); 607 } 608 609 /* 610 * Execute a TWE_OP_GET_PARAM command. If a callback function is provided, 611 * it will be called with generated context when the command has completed. 612 * If no callback is provided, the command will be executed synchronously 613 * and a pointer to a buffer containing the data returned. 614 * 615 * The caller or callback is responsible for freeing the buffer. 616 */ 617 static int 618 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size, 619 void (*func)(struct twe_ccb *, int), void **pbuf) 620 { 621 struct twe_ccb *ccb; 622 struct twe_cmd *tc; 623 struct twe_param *tp; 624 int rv, s; 625 626 rv = twe_ccb_alloc(sc, &ccb, 627 TWE_CCB_PARAM | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); 628 if (rv != 0) 629 return (rv); 630 631 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); 632 if (pbuf != NULL) 633 *pbuf = tp; 634 635 ccb->ccb_data = tp; 636 ccb->ccb_datasize = TWE_SECTOR_SIZE; 637 ccb->ccb_tx.tx_handler = func; 638 ccb->ccb_tx.tx_context = tp; 639 ccb->ccb_tx.tx_dv = &sc->sc_dv; 640 641 tc = ccb->ccb_cmd; 642 tc->tc_size = 2; 643 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5); 644 tc->tc_unit = 0; 645 tc->tc_count = htole16(1); 646 647 /* Fill in the outbound parameter data. */ 648 tp->tp_table_id = htole16(table_id); 649 tp->tp_param_id = param_id; 650 tp->tp_param_size = size; 651 652 /* Map the transfer. */ 653 if ((rv = twe_ccb_map(sc, ccb)) != 0) { 654 twe_ccb_free(sc, ccb); 655 free(tp, M_DEVBUF); 656 return (rv); 657 } 658 659 /* Submit the command and either wait or let the callback handle it. */ 660 if (func == NULL) { 661 s = splbio(); 662 rv = twe_ccb_poll(sc, ccb, 5); 663 twe_ccb_unmap(sc, ccb); 664 twe_ccb_free(sc, ccb); 665 splx(s); 666 if (rv != 0) 667 free(tp, M_DEVBUF); 668 } else { 669 twe_ccb_enqueue(sc, ccb); 670 rv = 0; 671 } 672 673 return (rv); 674 } 675 676 /* 677 * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error. 678 * Must be called with interrupts blocked. 679 */ 680 static int 681 twe_init_connection(struct twe_softc *sc) 682 { 683 struct twe_ccb *ccb; 684 struct twe_cmd *tc; 685 int rv; 686 687 if ((rv = twe_ccb_alloc(sc, &ccb, 0)) != 0) 688 return (rv); 689 690 /* Build the command. */ 691 tc = ccb->ccb_cmd; 692 tc->tc_size = 3; 693 tc->tc_opcode = TWE_OP_INIT_CONNECTION; 694 tc->tc_unit = 0; 695 tc->tc_count = htole16(TWE_MAX_CMDS); 696 tc->tc_args.init_connection.response_queue_pointer = 0; 697 698 /* Submit the command for immediate execution. */ 699 rv = twe_ccb_poll(sc, ccb, 5); 700 twe_ccb_free(sc, ccb); 701 return (rv); 702 } 703 704 /* 705 * Poll the controller for completed commands. Must be called with 706 * interrupts blocked. 707 */ 708 static void 709 twe_poll(struct twe_softc *sc) 710 { 711 struct twe_ccb *ccb; 712 int found; 713 u_int status, cmdid; 714 715 found = 0; 716 717 for (;;) { 718 status = twe_inl(sc, TWE_REG_STS); 719 twe_status_check(sc, status); 720 721 if ((status & TWE_STS_RESP_QUEUE_EMPTY)) 722 break; 723 724 found = 1; 725 cmdid = twe_inl(sc, TWE_REG_RESP_QUEUE); 726 cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT; 727 if (cmdid >= TWE_MAX_QUEUECNT) { 728 printf("%s: bad completion\n", sc->sc_dv.dv_xname); 729 continue; 730 } 731 732 ccb = sc->sc_ccbs + cmdid; 733 if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) { 734 printf("%s: bad completion (not active)\n", 735 sc->sc_dv.dv_xname); 736 continue; 737 } 738 ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE; 739 740 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 741 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, 742 sizeof(struct twe_cmd), 743 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 744 745 /* Pass notification to upper layers. */ 746 if (ccb->ccb_tx.tx_handler != NULL) 747 (*ccb->ccb_tx.tx_handler)(ccb, 748 ccb->ccb_cmd->tc_status != 0 ? EIO : 0); 749 } 750 751 /* If any commands have completed, run the software queue. */ 752 if (found) 753 twe_ccb_enqueue(sc, NULL); 754 } 755 756 /* 757 * Wait for `status' to be set in the controller status register. Return 758 * zero if found, non-zero if the operation timed out. 759 */ 760 static int 761 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo) 762 { 763 764 for (timo *= 10; timo != 0; timo--) { 765 if ((twe_inl(sc, TWE_REG_STS) & status) == status) 766 break; 767 delay(100000); 768 } 769 770 return (timo == 0); 771 } 772 773 /* 774 * Complain if the status bits aren't what we expect. 775 */ 776 static int 777 twe_status_check(struct twe_softc *sc, u_int status) 778 { 779 int rv; 780 781 rv = 0; 782 783 if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) { 784 printf("%s: missing status bits: 0x%08x\n", sc->sc_dv.dv_xname, 785 status & ~TWE_STS_EXPECTED_BITS); 786 rv = -1; 787 } 788 789 if ((status & TWE_STS_UNEXPECTED_BITS) != 0) { 790 printf("%s: unexpected status bits: 0x%08x\n", 791 sc->sc_dv.dv_xname, status & TWE_STS_UNEXPECTED_BITS); 792 rv = -1; 793 } 794 795 return (rv); 796 } 797 798 /* 799 * Allocate and initialise a CCB. 800 */ 801 int 802 twe_ccb_alloc(struct twe_softc *sc, struct twe_ccb **ccbp, int flags) 803 { 804 struct twe_cmd *tc; 805 struct twe_ccb *ccb; 806 int s; 807 808 s = splbio(); 809 if ((flags & TWE_CCB_PARAM) != 0) 810 ccb = sc->sc_ccbs; 811 else { 812 /* Allocate a CCB and command block. */ 813 if (SLIST_FIRST(&sc->sc_ccb_freelist) == NULL) { 814 splx(s); 815 return (EAGAIN); 816 } 817 ccb = SLIST_FIRST(&sc->sc_ccb_freelist); 818 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); 819 } 820 #ifdef DIAGNOSTIC 821 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) 822 panic("twe_ccb_alloc: CCB already allocated"); 823 flags |= TWE_CCB_ALLOCED; 824 #endif 825 splx(s); 826 827 /* Initialise some fields and return. */ 828 ccb->ccb_tx.tx_handler = NULL; 829 ccb->ccb_flags = flags; 830 tc = ccb->ccb_cmd; 831 tc->tc_status = 0; 832 tc->tc_flags = 0; 833 tc->tc_cmdid = ccb->ccb_cmdid; 834 *ccbp = ccb; 835 836 return (0); 837 } 838 839 /* 840 * Free a CCB. 841 */ 842 void 843 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb) 844 { 845 int s; 846 847 s = splbio(); 848 if ((ccb->ccb_flags & TWE_CCB_PARAM) == 0) 849 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist); 850 ccb->ccb_flags = 0; 851 splx(s); 852 } 853 854 /* 855 * Map the specified CCB's command block and data buffer (if any) into 856 * controller visible space. Perform DMA synchronisation. 857 */ 858 int 859 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb) 860 { 861 struct twe_cmd *tc; 862 int flags, nsegs, i, s, rv; 863 void *data; 864 865 /* 866 * The data as a whole must be 512-byte aligned. 867 */ 868 if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) { 869 s = splvm(); 870 /* XXX */ 871 ccb->ccb_abuf = uvm_km_kmemalloc(kmem_map, NULL, 872 ccb->ccb_datasize, UVM_KMF_NOWAIT); 873 splx(s); 874 data = (void *)ccb->ccb_abuf; 875 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 876 memcpy(data, ccb->ccb_data, ccb->ccb_datasize); 877 } else { 878 ccb->ccb_abuf = (vaddr_t)0; 879 data = ccb->ccb_data; 880 } 881 882 /* 883 * Map the data buffer into bus space and build the S/G list. 884 */ 885 rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data, 886 ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 887 ((ccb->ccb_flags & TWE_CCB_DATA_IN) ? 888 BUS_DMA_READ : BUS_DMA_WRITE)); 889 if (rv != 0) { 890 if (ccb->ccb_abuf != (vaddr_t)0) { 891 s = splvm(); 892 /* XXX */ 893 uvm_km_free(kmem_map, ccb->ccb_abuf, 894 ccb->ccb_datasize); 895 splx(s); 896 } 897 return (rv); 898 } 899 900 nsegs = ccb->ccb_dmamap_xfer->dm_nsegs; 901 tc = ccb->ccb_cmd; 902 tc->tc_size += 2 * nsegs; 903 904 /* The location of the S/G list is dependant upon command type. */ 905 switch (tc->tc_opcode >> 5) { 906 case 2: 907 for (i = 0; i < nsegs; i++) { 908 tc->tc_args.param.sgl[i].tsg_address = 909 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); 910 tc->tc_args.param.sgl[i].tsg_length = 911 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); 912 } 913 /* XXX Needed? */ 914 for (; i < TWE_SG_SIZE; i++) { 915 tc->tc_args.param.sgl[i].tsg_address = 0; 916 tc->tc_args.param.sgl[i].tsg_length = 0; 917 } 918 break; 919 case 3: 920 for (i = 0; i < nsegs; i++) { 921 tc->tc_args.io.sgl[i].tsg_address = 922 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); 923 tc->tc_args.io.sgl[i].tsg_length = 924 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); 925 } 926 /* XXX Needed? */ 927 for (; i < TWE_SG_SIZE; i++) { 928 tc->tc_args.io.sgl[i].tsg_address = 0; 929 tc->tc_args.io.sgl[i].tsg_length = 0; 930 } 931 break; 932 #ifdef DEBUG 933 default: 934 panic("twe_ccb_map: oops"); 935 #endif 936 } 937 938 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 939 flags = BUS_DMASYNC_PREREAD; 940 else 941 flags = 0; 942 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 943 flags |= BUS_DMASYNC_PREWRITE; 944 945 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 946 ccb->ccb_datasize, flags); 947 return (0); 948 } 949 950 /* 951 * Unmap the specified CCB's command block and data buffer (if any) and 952 * perform DMA synchronisation. 953 */ 954 void 955 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb) 956 { 957 int flags, s; 958 959 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 960 flags = BUS_DMASYNC_POSTREAD; 961 else 962 flags = 0; 963 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 964 flags |= BUS_DMASYNC_POSTWRITE; 965 966 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 967 ccb->ccb_datasize, flags); 968 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer); 969 970 if (ccb->ccb_abuf != (vaddr_t)0) { 971 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 972 memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf, 973 ccb->ccb_datasize); 974 s = splvm(); 975 /* XXX */ 976 uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize); 977 splx(s); 978 } 979 } 980 981 /* 982 * Submit a command to the controller and poll on completion. Return 983 * non-zero on timeout (but don't check status, as some command types don't 984 * return status). Must be called with interrupts blocked. 985 */ 986 int 987 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo) 988 { 989 int rv; 990 991 if ((rv = twe_ccb_submit(sc, ccb)) != 0) 992 return (rv); 993 994 for (timo *= 1000; timo != 0; timo--) { 995 twe_poll(sc); 996 if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0) 997 break; 998 DELAY(100); 999 } 1000 1001 return (timo == 0); 1002 } 1003 1004 /* 1005 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 1006 * the order that they were enqueued and try to submit their command blocks 1007 * to the controller for execution. 1008 */ 1009 void 1010 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb) 1011 { 1012 int s; 1013 1014 s = splbio(); 1015 1016 if (ccb != NULL) 1017 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq); 1018 1019 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) { 1020 if (twe_ccb_submit(sc, ccb)) 1021 break; 1022 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain.simpleq); 1023 } 1024 1025 splx(s); 1026 } 1027 1028 /* 1029 * Submit the command block associated with the specified CCB to the 1030 * controller for execution. Must be called with interrupts blocked. 1031 */ 1032 int 1033 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb) 1034 { 1035 bus_addr_t pa; 1036 int rv; 1037 u_int status; 1038 1039 /* Check to see if we can post a command. */ 1040 status = twe_inl(sc, TWE_REG_STS); 1041 twe_status_check(sc, status); 1042 1043 if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) { 1044 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1045 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, sizeof(struct twe_cmd), 1046 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1047 ccb->ccb_flags |= TWE_CCB_ACTIVE; 1048 pa = sc->sc_cmds_paddr + 1049 ccb->ccb_cmdid * sizeof(struct twe_cmd); 1050 twe_outl(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa); 1051 rv = 0; 1052 } else 1053 rv = EBUSY; 1054 1055 return (rv); 1056 } 1057