1 /* $NetBSD: twe.c,v 1.5 2000/12/28 22:59:15 sommerfeld Exp $ */ 2 3 /*- 4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /*- 40 * Copyright (c) 2000 Michael Smith 41 * Copyright (c) 2000 BSDi 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp 66 */ 67 68 /* 69 * Driver for the 3ware Escalade family of RAID controllers. 70 */ 71 72 #include "opt_twe.h" 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/kernel.h> 77 #include <sys/device.h> 78 #include <sys/queue.h> 79 #include <sys/proc.h> 80 #include <sys/buf.h> 81 #include <sys/endian.h> 82 #include <sys/malloc.h> 83 #include <sys/disk.h> 84 85 #include <uvm/uvm_extern.h> 86 87 #include <machine/bswap.h> 88 #include <machine/bus.h> 89 90 #include <dev/pci/pcireg.h> 91 #include <dev/pci/pcivar.h> 92 #include <dev/pci/pcidevs.h> 93 #include <dev/pci/twereg.h> 94 #include <dev/pci/twevar.h> 95 96 #define TWE_INL(sc, port) \ 97 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, port) 98 #define TWE_OUTL(sc, port, val) \ 99 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, port, val) 100 101 #if TWE_MAX_QUEUECNT == TWE_MAX_CMDS 102 #define TWE_REAL_MAX_QUEUECNT TWE_MAX_CMDS 103 #else 104 #define TWE_REAL_MAX_QUEUECNT TWE_MAX_CMDS + 1 105 #endif 106 107 #define PCI_CBIO 0x10 108 109 static void twe_aen_handler(struct twe_ccb *, int); 110 static void twe_attach(struct device *, struct device *, void *); 111 static int twe_init_connection(struct twe_softc *); 112 static int twe_intr(void *); 113 static int twe_match(struct device *, struct cfdata *, void *); 114 static void *twe_param_get(struct twe_softc *, int, int, size_t, 115 void (*)(struct twe_ccb *, int)); 116 static void twe_poll(struct twe_softc *); 117 static int twe_print(void *, const char *); 118 static int twe_reset(struct twe_softc *); 119 static int twe_submatch(struct device *, struct cfdata *, void *); 120 static int twe_status_check(struct twe_softc *, u_int); 121 static int twe_status_wait(struct twe_softc *, u_int, int); 122 123 struct cfattach twe_ca = { 124 sizeof(struct twe_softc), twe_match, twe_attach 125 }; 126 127 struct { 128 const u_int aen; /* High byte non-zero if w/unit */ 129 const char *desc; 130 } static const twe_aen_names[] = { 131 { 0x0000, "queue empty" }, 132 { 0x0001, "soft reset" }, 133 { 0x0102, "degraded mirror" }, 134 { 0x0003, "controller error" }, 135 { 0x0104, "rebuild fail" }, 136 { 0x0105, "rebuild done" }, 137 { 0x0106, "incompatible unit" }, 138 { 0x0107, "init done" }, 139 { 0x0108, "unclean shutdown" }, 140 { 0x0109, "aport timeout" }, 141 { 0x010a, "drive error" }, 142 { 0x010b, "rebuild started" }, 143 { 0x0015, "table undefined" }, 144 { 0x00ff, "aen queue full" }, 145 }; 146 147 /* 148 * Match a supported board. 149 */ 150 static int 151 twe_match(struct device *parent, struct cfdata *cfdata, void *aux) 152 { 153 struct pci_attach_args *pa; 154 155 pa = aux; 156 157 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE && 158 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE); 159 } 160 161 /* 162 * Attach a supported board. 163 * 164 * XXX This doesn't fail gracefully. 165 */ 166 static void 167 twe_attach(struct device *parent, struct device *self, void *aux) 168 { 169 struct pci_attach_args *pa; 170 struct twe_softc *sc; 171 pci_chipset_tag_t pc; 172 pci_intr_handle_t ih; 173 pcireg_t csr; 174 const char *intrstr; 175 int size, i, rv, rseg; 176 struct twe_param *dtp, *ctp; 177 bus_dma_segment_t seg; 178 struct twe_cmd *tc; 179 struct twe_attach_args twea; 180 struct twe_ccb *ccb; 181 182 sc = (struct twe_softc *)self; 183 pa = aux; 184 pc = pa->pa_pc; 185 sc->sc_dmat = pa->pa_dmat; 186 SIMPLEQ_INIT(&sc->sc_ccb_queue); 187 SLIST_INIT(&sc->sc_ccb_freelist); 188 189 printf(": 3ware Escalade\n"); 190 191 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, 192 &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) { 193 printf("%s: can't map i/o space\n", sc->sc_dv.dv_xname); 194 return; 195 } 196 197 /* Enable the device. */ 198 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 199 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 200 csr | PCI_COMMAND_MASTER_ENABLE); 201 202 /* Map and establish the interrupt. */ 203 if (pci_intr_map(pa, &ih)) { 204 printf("%s: can't map interrupt\n", sc->sc_dv.dv_xname); 205 return; 206 } 207 intrstr = pci_intr_string(pc, ih); 208 sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc); 209 if (sc->sc_ih == NULL) { 210 printf("%s: can't establish interrupt", sc->sc_dv.dv_xname); 211 if (intrstr != NULL) 212 printf(" at %s", intrstr); 213 printf("\n"); 214 return; 215 } 216 if (intrstr != NULL) 217 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname, intrstr); 218 219 /* 220 * Allocate and initialise the command blocks and CCBs. 221 */ 222 size = sizeof(struct twe_cmd) * TWE_REAL_MAX_QUEUECNT; 223 224 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1, 225 &rseg, BUS_DMA_NOWAIT)) != 0) { 226 printf("%s: unable to allocate commands, rv = %d\n", 227 sc->sc_dv.dv_xname, rv); 228 return; 229 } 230 231 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 232 (caddr_t *)&sc->sc_cmds, 233 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 234 printf("%s: unable to map commands, rv = %d\n", 235 sc->sc_dv.dv_xname, rv); 236 return; 237 } 238 239 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0, 240 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 241 printf("%s: unable to create command DMA map, rv = %d\n", 242 sc->sc_dv.dv_xname, rv); 243 return; 244 } 245 246 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds, 247 size, NULL, BUS_DMA_NOWAIT)) != 0) { 248 printf("%s: unable to load command DMA map, rv = %d\n", 249 sc->sc_dv.dv_xname, rv); 250 return; 251 } 252 253 sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr; 254 memset(sc->sc_cmds, 0, size); 255 256 ccb = malloc(sizeof(*ccb) * TWE_REAL_MAX_QUEUECNT, M_DEVBUF, M_WAITOK); 257 if (ccb == NULL) { 258 printf("%s: unable to allocate CCBs\n", sc->sc_dv.dv_xname); 259 return; 260 } 261 262 sc->sc_ccbs = ccb; 263 tc = (struct twe_cmd *)sc->sc_cmds; 264 265 for (i = 0; i < TWE_REAL_MAX_QUEUECNT; i++, tc++, ccb++) { 266 ccb->ccb_cmd = tc; 267 ccb->ccb_cmdid = i; 268 ccb->ccb_flags = 0; 269 rv = bus_dmamap_create(sc->sc_dmat, TWE_MAX_XFER, 270 TWE_MAX_SEGS, PAGE_SIZE, 0, 271 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 272 &ccb->ccb_dmamap_xfer); 273 if (rv != 0) 274 break; 275 /* Save one CCB for parameter retrieval. */ 276 if (i != 0) 277 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, 278 ccb_chain.slist); 279 } 280 if ((sc->sc_nccbs = i) <= TWE_MIN_QUEUECNT) { 281 printf("%s: too few CCBs available\n", sc->sc_dv.dv_xname); 282 return; 283 } 284 if (sc->sc_nccbs != TWE_REAL_MAX_QUEUECNT) 285 printf("%s: %d/%d CCBs usable\n", sc->sc_dv.dv_xname, 286 sc->sc_nccbs, TWE_REAL_MAX_QUEUECNT); 287 288 /* Wait for the controller to become ready. */ 289 if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) { 290 printf("%s: microcontroller not ready\n", sc->sc_dv.dv_xname); 291 return; 292 } 293 294 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS); 295 296 /* Reset the controller. */ 297 if (twe_reset(sc)) { 298 printf("%s: reset failed\n", sc->sc_dv.dv_xname); 299 return; 300 } 301 302 /* Find attached units. */ 303 dtp = twe_param_get(sc, TWE_PARAM_UNITSUMMARY, 304 TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL); 305 if (dtp == NULL) { 306 printf("%s: can't detect attached units\n", 307 sc->sc_dv.dv_xname); 308 return; 309 } 310 311 /* For each detected unit, collect size and store in an array. */ 312 for (i = 0, sc->sc_nunits = 0; i < TWE_MAX_UNITS; i++) { 313 /* Unit present? */ 314 if ((dtp->tp_data[i] & TWE_PARAM_UNITSTATUS_Online) == 0) { 315 sc->sc_dsize[i] = 0; 316 continue; 317 } 318 319 ctp = twe_param_get(sc, TWE_PARAM_UNITINFO + i, 320 TWE_PARAM_UNITINFO_Capacity, 4, NULL); 321 if (ctp == NULL) { 322 printf("%s: error fetching capacity for unit %d\n", 323 sc->sc_dv.dv_xname, i); 324 continue; 325 } 326 327 sc->sc_dsize[i] = le32toh(*(u_int32_t *)ctp->tp_data); 328 free(ctp, M_DEVBUF); 329 sc->sc_nunits++; 330 } 331 free(dtp, M_DEVBUF); 332 333 /* Initialise connection with controller and enable interrupts. */ 334 twe_init_connection(sc); 335 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR | 336 TWE_CTL_UNMASK_RESP_INTR | 337 TWE_CTL_ENABLE_INTRS); 338 339 /* Attach sub-devices. */ 340 for (i = 0; i < TWE_MAX_UNITS; i++) { 341 if (sc->sc_dsize[i] == 0) 342 continue; 343 twea.twea_unit = i; 344 config_found_sm(&sc->sc_dv, &twea, twe_print, twe_submatch); 345 } 346 } 347 348 /* 349 * Reset the controller. Currently only useful at attach time; must be 350 * called with interrupts blocked. 351 */ 352 static int 353 twe_reset(struct twe_softc *sc) 354 { 355 struct twe_param *tp; 356 u_int aen, status; 357 volatile u_int32_t junk; 358 int got; 359 360 /* Issue a soft reset. */ 361 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET | 362 TWE_CTL_CLEAR_HOST_INTR | 363 TWE_CTL_CLEAR_ATTN_INTR | 364 TWE_CTL_MASK_CMD_INTR | 365 TWE_CTL_MASK_RESP_INTR | 366 TWE_CTL_CLEAR_ERROR_STS | 367 TWE_CTL_DISABLE_INTRS); 368 369 if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 15)) { 370 printf("%s: no attention interrupt\n", 371 sc->sc_dv.dv_xname); 372 return (-1); 373 } 374 375 /* Pull AENs out of the controller; look for a soft reset AEN. */ 376 for (got = 0;;) { 377 tp = twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 378 2, NULL); 379 if (tp == NULL) 380 return (-1); 381 aen = TWE_AEN_CODE(le16toh(*(u_int16_t *)tp->tp_data)); 382 free(tp, M_DEVBUF); 383 if (aen == TWE_AEN_QUEUE_EMPTY) 384 break; 385 if (aen == TWE_AEN_SOFT_RESET) 386 got = 1; 387 } 388 if (!got) { 389 printf("%s: reset not reported\n", sc->sc_dv.dv_xname); 390 return (-1); 391 } 392 393 /* Check controller status. */ 394 status = TWE_INL(sc, TWE_REG_STS); 395 if (twe_status_check(sc, status)) { 396 printf("%s: controller errors detected\n", 397 sc->sc_dv.dv_xname); 398 return (-1); 399 } 400 401 /* Drain the response queue. */ 402 for (;;) { 403 status = TWE_INL(sc, TWE_REG_STS); 404 if (twe_status_check(sc, status) != 0) { 405 printf("%s: can't drain response queue\n", 406 sc->sc_dv.dv_xname); 407 return (-1); 408 } 409 if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0) 410 break; 411 junk = TWE_INL(sc, TWE_REG_RESP_QUEUE); 412 } 413 414 return (0); 415 } 416 417 /* 418 * Print autoconfiguration message for a sub-device. 419 */ 420 static int 421 twe_print(void *aux, const char *pnp) 422 { 423 struct twe_attach_args *twea; 424 425 twea = aux; 426 427 if (pnp != NULL) 428 printf("block device at %s", pnp); 429 printf(" unit %d", twea->twea_unit); 430 return (UNCONF); 431 } 432 433 /* 434 * Match a sub-device. 435 */ 436 static int 437 twe_submatch(struct device *parent, struct cfdata *cf, void *aux) 438 { 439 struct twe_attach_args *twea; 440 441 twea = aux; 442 443 if (cf->tweacf_unit != TWECF_UNIT_DEFAULT && 444 cf->tweacf_unit != twea->twea_unit) 445 return (0); 446 447 return ((*cf->cf_attach->ca_match)(parent, cf, aux)); 448 } 449 450 /* 451 * Interrupt service routine. 452 */ 453 static int 454 twe_intr(void *arg) 455 { 456 struct twe_softc *sc; 457 u_int status; 458 int caught; 459 460 sc = arg; 461 caught = 0; 462 status = TWE_INL(sc, TWE_REG_STS); 463 twe_status_check(sc, status); 464 465 /* Host interrupts - purpose unknown. */ 466 if ((status & TWE_STS_HOST_INTR) != 0) { 467 #ifdef DIAGNOSTIC 468 printf("%s: host interrupt\n", sc->sc_dv.dv_xname); 469 #endif 470 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR); 471 caught = 1; 472 } 473 474 /* 475 * Attention interrupts, signalled when a controller or child device 476 * state change has occured. 477 */ 478 if ((status & TWE_STS_ATTN_INTR) != 0) { 479 twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 2, 480 twe_aen_handler); 481 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); 482 caught = 1; 483 } 484 485 /* 486 * Command interrupts, signalled when the controller can accept more 487 * commands. We don't use this; instead, we try to submit commands 488 * when we receive them, and when other commands have completed. 489 * Mask it so we don't get another one. 490 */ 491 if ((status & TWE_STS_CMD_INTR) != 0) { 492 #ifdef DIAGNOSTIC 493 printf("%s: command interrupt\n", sc->sc_dv.dv_xname); 494 #endif 495 TWE_OUTL(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR); 496 caught = 1; 497 } 498 499 if ((status & TWE_STS_RESP_INTR) != 0) { 500 twe_poll(sc); 501 caught = 1; 502 } 503 504 return (caught); 505 } 506 507 /* 508 * Handle an AEN returned by the controller. 509 */ 510 static void 511 twe_aen_handler(struct twe_ccb *ccb, int error) 512 { 513 struct twe_softc *sc; 514 struct twe_param *tp; 515 const char *str; 516 u_int aen; 517 int i, hu; 518 519 sc = (struct twe_softc *)ccb->ccb_tx.tx_dv; 520 tp = ccb->ccb_tx.tx_context; 521 twe_ccb_unmap(sc, ccb); 522 523 if (error) { 524 printf("%s: error retrieving AEN\n", sc->sc_dv.dv_xname); 525 aen = TWE_AEN_QUEUE_EMPTY; 526 } else 527 aen = le16toh(*(u_int16_t *)tp->tp_data); 528 free(tp, M_DEVBUF); 529 twe_ccb_free(sc, ccb); 530 531 if (TWE_AEN_CODE(aen) != TWE_AEN_QUEUE_EMPTY) { 532 str = "<unknown>"; 533 i = 0; 534 hu = 0; 535 536 while (i < sizeof(twe_aen_names) / sizeof(twe_aen_names[0])) { 537 if (TWE_AEN_CODE(twe_aen_names[i].aen) == 538 TWE_AEN_CODE(aen)) { 539 str = twe_aen_names[i].desc; 540 hu = (TWE_AEN_UNIT(twe_aen_names[i].aen) != 0); 541 break; 542 } 543 i++; 544 } 545 printf("%s: AEN 0x%04x (%s) received", sc->sc_dv.dv_xname, 546 TWE_AEN_CODE(aen), str); 547 if (hu != 0) 548 printf(" for unit %d", TWE_AEN_UNIT(aen)); 549 printf("\n"); 550 551 /* 552 * Chain another retrieval in case interrupts have been 553 * coalesced. 554 */ 555 twe_param_get(sc, TWE_PARAM_AEN, TWE_PARAM_AEN_UnitCode, 2, 556 twe_aen_handler); 557 } 558 } 559 560 /* 561 * Execute a TWE_OP_GET_PARAM command. If a callback function is provided, 562 * it will be called with generated context when the command has completed. 563 * If no callback is provided, the command will be executed synchronously 564 * and a pointer to a buffer containing the data returned. 565 * 566 * The caller or callback is responsible for freeing the buffer. 567 */ 568 static void * 569 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size, 570 void (*func)(struct twe_ccb *, int)) 571 { 572 struct twe_ccb *ccb; 573 struct twe_cmd *tc; 574 struct twe_param *tp; 575 int rv, s; 576 577 if (twe_ccb_alloc(sc, &ccb, TWE_CCB_PARAM | TWE_CCB_DATA_IN | 578 TWE_CCB_DATA_OUT) != 0) 579 return (NULL); 580 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); 581 582 ccb->ccb_data = tp; 583 ccb->ccb_datasize = TWE_SECTOR_SIZE; 584 ccb->ccb_tx.tx_handler = func; 585 ccb->ccb_tx.tx_context = tp; 586 ccb->ccb_tx.tx_dv = &sc->sc_dv; 587 588 tc = ccb->ccb_cmd; 589 tc->tc_size = 2; 590 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5); 591 tc->tc_unit = 0; 592 tc->tc_count = htole16(1); 593 594 /* Fill in the outbound parameter data. */ 595 tp->tp_table_id = htole16(table_id); 596 tp->tp_param_id = param_id; 597 tp->tp_param_size = size; 598 599 /* Map the transfer. */ 600 if (twe_ccb_map(sc, ccb) != 0) { 601 twe_ccb_free(sc, ccb); 602 free(tp, M_DEVBUF); 603 return (NULL); 604 } 605 606 /* Submit the command and either wait or let the callback handle it. */ 607 if (func == NULL) { 608 s = splbio(); 609 if ((rv = twe_ccb_submit(sc, ccb)) == 0) 610 rv = twe_ccb_poll(sc, ccb, 5); 611 twe_ccb_unmap(sc, ccb); 612 twe_ccb_free(sc, ccb); 613 splx(s); 614 if (rv != 0) { 615 free(tp, M_DEVBUF); 616 tp = NULL; 617 } 618 } else { 619 twe_ccb_enqueue(sc, ccb); 620 tp = NULL; 621 } 622 623 return (tp); 624 } 625 626 /* 627 * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error. 628 * Must be called with interrupts blocked. 629 */ 630 static int 631 twe_init_connection(struct twe_softc *sc) 632 { 633 struct twe_ccb *ccb; 634 struct twe_cmd *tc; 635 int rv; 636 637 if ((rv = twe_ccb_alloc(sc, &ccb, 0)) != 0) 638 return (rv); 639 640 /* Build the command. */ 641 tc = ccb->ccb_cmd; 642 tc->tc_size = 3; 643 tc->tc_opcode = TWE_OP_INIT_CONNECTION; 644 tc->tc_unit = 0; 645 tc->tc_count = htole16(TWE_MAX_CMDS); 646 tc->tc_args.init_connection.response_queue_pointer = 0; 647 648 /* Submit the command for immediate execution. */ 649 if ((rv = twe_ccb_submit(sc, ccb)) == 0) 650 rv = twe_ccb_poll(sc, ccb, 5); 651 twe_ccb_free(sc, ccb); 652 return (rv); 653 } 654 655 /* 656 * Poll the controller for completed commands. Must be called with 657 * interrupts blocked. 658 */ 659 static void 660 twe_poll(struct twe_softc *sc) 661 { 662 struct twe_ccb *ccb; 663 int found; 664 u_int status, cmdid; 665 666 found = 0; 667 668 for (;;) { 669 status = TWE_INL(sc, TWE_REG_STS); 670 twe_status_check(sc, status); 671 672 if ((status & TWE_STS_RESP_QUEUE_EMPTY)) 673 break; 674 675 found = 1; 676 cmdid = TWE_INL(sc, TWE_REG_RESP_QUEUE); 677 cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT; 678 if (cmdid >= TWE_REAL_MAX_QUEUECNT) { 679 printf("%s: bad completion\n", sc->sc_dv.dv_xname); 680 continue; 681 } 682 683 ccb = sc->sc_ccbs + cmdid; 684 if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) { 685 printf("%s: bad completion (not active)\n", 686 sc->sc_dv.dv_xname); 687 continue; 688 } 689 ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE; 690 691 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 692 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, 693 sizeof(struct twe_cmd), 694 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 695 696 /* Pass notification to upper layers. */ 697 if (ccb->ccb_tx.tx_handler != NULL) 698 (*ccb->ccb_tx.tx_handler)(ccb, 699 ccb->ccb_cmd->tc_status != 0 ? EIO : 0); 700 } 701 702 /* If any commands have completed, run the software queue. */ 703 if (found) 704 twe_ccb_enqueue(sc, NULL); 705 } 706 707 /* 708 * Wait for `status' to be set in the controller status register. Return 709 * zero if found, non-zero if the operation timed out. 710 */ 711 static int 712 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo) 713 { 714 715 for (; timo != 0; timo--) { 716 if ((TWE_INL(sc, TWE_REG_STS) & status) == status) 717 break; 718 delay(100000); 719 } 720 721 return (timo == 0); 722 } 723 724 /* 725 * Complain if the status bits aren't what we expect. 726 */ 727 static int 728 twe_status_check(struct twe_softc *sc, u_int status) 729 { 730 int rv; 731 732 rv = 0; 733 734 if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) { 735 printf("%s: missing status bits: 0x%08x\n", sc->sc_dv.dv_xname, 736 status & ~TWE_STS_EXPECTED_BITS); 737 rv = -1; 738 } 739 740 if ((status & TWE_STS_UNEXPECTED_BITS) != 0) { 741 printf("%s: unexpected status bits: 0x%08x\n", 742 sc->sc_dv.dv_xname, status & TWE_STS_UNEXPECTED_BITS); 743 rv = -1; 744 } 745 746 return (rv); 747 } 748 749 /* 750 * Allocate and initialise a CCB. 751 */ 752 int 753 twe_ccb_alloc(struct twe_softc *sc, struct twe_ccb **ccbp, int flags) 754 { 755 struct twe_cmd *tc; 756 struct twe_ccb *ccb; 757 int s; 758 759 if ((flags & TWE_CCB_PARAM) != 0) 760 ccb = sc->sc_ccbs; 761 else { 762 s = splbio(); 763 /* Allocate a CCB and command block. */ 764 if (SLIST_FIRST(&sc->sc_ccb_freelist) == NULL) { 765 splx(s); 766 return (EAGAIN); 767 } 768 ccb = SLIST_FIRST(&sc->sc_ccb_freelist); 769 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); 770 splx(s); 771 } 772 773 #ifdef DIAGNOSTIC 774 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) 775 panic("twe_ccb_alloc: CCB already allocated"); 776 flags |= TWE_CCB_ALLOCED; 777 #endif 778 779 /* Initialise some fields and return. */ 780 ccb->ccb_tx.tx_handler = NULL; 781 ccb->ccb_flags = flags; 782 tc = ccb->ccb_cmd; 783 tc->tc_status = 0; 784 tc->tc_flags = 0; 785 tc->tc_cmdid = ccb->ccb_cmdid; 786 *ccbp = ccb; 787 788 return (0); 789 } 790 791 /* 792 * Free a CCB. 793 */ 794 void 795 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb) 796 { 797 int s; 798 799 s = splbio(); 800 if ((ccb->ccb_flags & TWE_CCB_PARAM) == 0) 801 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist); 802 ccb->ccb_flags = 0; 803 splx(s); 804 } 805 806 /* 807 * Map the specified CCB's command block and data buffer (if any) into 808 * controller visible space. Perform DMA synchronisation. 809 */ 810 int 811 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb) 812 { 813 struct twe_cmd *tc; 814 int flags, nsegs, i, s; 815 void *data; 816 817 /* The data as a whole must be 512-byte aligned. */ 818 if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) { 819 s = splimp(); 820 /* XXX */ 821 ccb->ccb_abuf = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object, 822 ccb->ccb_datasize, UVM_KMF_NOWAIT); 823 splx(s); 824 data = (void *)ccb->ccb_abuf; 825 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 826 memcpy(data, ccb->ccb_data, ccb->ccb_datasize); 827 } else { 828 ccb->ccb_abuf = (vaddr_t)0; 829 data = ccb->ccb_data; 830 } 831 832 /* Map the data buffer into bus space and build the S/G list. */ 833 bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data, 834 ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT); 835 836 nsegs = ccb->ccb_dmamap_xfer->dm_nsegs; 837 tc = ccb->ccb_cmd; 838 tc->tc_size += 2 * nsegs; 839 840 /* The location of the S/G list is dependant upon command type. */ 841 switch (tc->tc_opcode >> 5) { 842 case 2: 843 for (i = 0; i < nsegs; i++) { 844 tc->tc_args.param.sgl[i].tsg_address = 845 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); 846 tc->tc_args.param.sgl[i].tsg_length = 847 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); 848 } 849 /* XXX Needed? */ 850 for (; i < TWE_SG_SIZE; i++) { 851 tc->tc_args.param.sgl[i].tsg_address = 0; 852 tc->tc_args.param.sgl[i].tsg_length = 0; 853 } 854 break; 855 case 3: 856 for (i = 0; i < nsegs; i++) { 857 tc->tc_args.io.sgl[i].tsg_address = 858 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); 859 tc->tc_args.io.sgl[i].tsg_length = 860 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); 861 } 862 /* XXX Needed? */ 863 for (; i < TWE_SG_SIZE; i++) { 864 tc->tc_args.io.sgl[i].tsg_address = 0; 865 tc->tc_args.io.sgl[i].tsg_length = 0; 866 } 867 break; 868 #ifdef DEBUG 869 default: 870 panic("twe_ccb_map: oops"); 871 #endif 872 } 873 874 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 875 flags = BUS_DMASYNC_PREREAD; 876 else 877 flags = 0; 878 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 879 flags |= BUS_DMASYNC_PREWRITE; 880 881 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 882 ccb->ccb_datasize, flags); 883 return (0); 884 } 885 886 /* 887 * Unmap the specified CCB's command block and data buffer (if any) and 888 * perform DMA synchronisation. 889 */ 890 void 891 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb) 892 { 893 int flags, s; 894 895 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 896 flags = BUS_DMASYNC_POSTREAD; 897 else 898 flags = 0; 899 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 900 flags |= BUS_DMASYNC_POSTWRITE; 901 902 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 903 ccb->ccb_datasize, flags); 904 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer); 905 906 if (ccb->ccb_abuf != (vaddr_t)0) { 907 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 908 memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf, 909 ccb->ccb_datasize); 910 s = splimp(); 911 /* XXX */ 912 uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize); 913 splx(s); 914 } 915 } 916 917 /* 918 * Wait for the specified CCB to complete. Return non-zero on timeout (but 919 * don't check status, as some command types don't return status). Must be 920 * called with interrupts blocked. 921 */ 922 int 923 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo) 924 { 925 926 for (; timo != 0; timo--) { 927 twe_poll(sc); 928 if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0) 929 break; 930 DELAY(100000); 931 } 932 933 return (timo == 0); 934 } 935 936 /* 937 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 938 * the order that they were enqueued and try to submit their command blocks 939 * to the controller for execution. 940 */ 941 void 942 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb) 943 { 944 int s; 945 946 s = splbio(); 947 948 if (ccb != NULL) 949 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq); 950 951 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) { 952 if (twe_ccb_submit(sc, ccb)) 953 break; 954 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq); 955 } 956 957 splx(s); 958 } 959 960 /* 961 * Submit the command block associated with the specified CCB to the 962 * controller for execution. Must be called with interrupts blocked. 963 */ 964 int 965 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb) 966 { 967 bus_addr_t pa; 968 int rv; 969 u_int status; 970 971 /* Check to see if we can post a command. */ 972 status = TWE_INL(sc, TWE_REG_STS); 973 twe_status_check(sc, status); 974 975 if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) { 976 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 977 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, sizeof(struct twe_cmd), 978 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 979 ccb->ccb_flags |= TWE_CCB_ACTIVE; 980 pa = sc->sc_cmds_paddr + 981 ccb->ccb_cmdid * sizeof(struct twe_cmd); 982 TWE_OUTL(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa); 983 rv = 0; 984 } else 985 rv = EBUSY; 986 987 return (rv); 988 } 989