1 /* $NetBSD: twe.c,v 1.68 2005/06/28 00:28:42 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001, 2002, 2003, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran; and by Jason R. Thorpe of Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /*- 40 * Copyright (c) 2000 Michael Smith 41 * Copyright (c) 2000 BSDi 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp 66 */ 67 68 /* 69 * Driver for the 3ware Escalade family of RAID controllers. 70 */ 71 72 #include <sys/cdefs.h> 73 __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.68 2005/06/28 00:28:42 thorpej Exp $"); 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/kernel.h> 78 #include <sys/device.h> 79 #include <sys/queue.h> 80 #include <sys/proc.h> 81 #include <sys/buf.h> 82 #include <sys/endian.h> 83 #include <sys/malloc.h> 84 #include <sys/conf.h> 85 #include <sys/disk.h> 86 #include <sys/sysctl.h> 87 #include <sys/syslog.h> 88 89 #include <uvm/uvm_extern.h> 90 91 #include <machine/bswap.h> 92 #include <machine/bus.h> 93 94 #include <dev/pci/pcireg.h> 95 #include <dev/pci/pcivar.h> 96 #include <dev/pci/pcidevs.h> 97 #include <dev/pci/twereg.h> 98 #include <dev/pci/twevar.h> 99 #include <dev/pci/tweio.h> 100 101 #include "locators.h" 102 103 #define PCI_CBIO 0x10 104 105 static int twe_aen_get(struct twe_softc *, uint16_t *); 106 static void twe_aen_handler(struct twe_ccb *, int); 107 static void twe_aen_enqueue(struct twe_softc *sc, uint16_t, int); 108 static uint16_t twe_aen_dequeue(struct twe_softc *); 109 110 static void twe_attach(struct device *, struct device *, void *); 111 static int twe_init_connection(struct twe_softc *); 112 static int twe_intr(void *); 113 static int twe_match(struct device *, struct cfdata *, void *); 114 static int twe_param_set(struct twe_softc *, int, int, size_t, void *); 115 static void twe_poll(struct twe_softc *); 116 static int twe_print(void *, const char *); 117 static int twe_reset(struct twe_softc *); 118 static int twe_submatch(struct device *, struct cfdata *, 119 const locdesc_t *, void *); 120 static int twe_status_check(struct twe_softc *, u_int); 121 static int twe_status_wait(struct twe_softc *, u_int, int); 122 static void twe_describe_controller(struct twe_softc *); 123 static void twe_clear_pci_abort(struct twe_softc *sc); 124 static void twe_clear_pci_parity_error(struct twe_softc *sc); 125 126 static int twe_add_unit(struct twe_softc *, int); 127 static int twe_del_unit(struct twe_softc *, int); 128 129 static inline u_int32_t twe_inl(struct twe_softc *, int); 130 static inline void twe_outl(struct twe_softc *, int, u_int32_t); 131 132 extern struct cfdriver twe_cd; 133 134 CFATTACH_DECL(twe, sizeof(struct twe_softc), 135 twe_match, twe_attach, NULL, NULL); 136 137 /* FreeBSD driver revision for sysctl expected by the 3ware cli */ 138 const char twever[] = "1.50.01.002"; 139 140 /* 141 * Tables to convert numeric codes to strings. 142 */ 143 const struct twe_code_table twe_table_status[] = { 144 { 0x00, "successful completion" }, 145 146 /* info */ 147 { 0x42, "command in progress" }, 148 { 0x6c, "retrying interface CRC error from UDMA command" }, 149 150 /* warning */ 151 { 0x81, "redundant/inconsequential request ignored" }, 152 { 0x8e, "failed to write zeroes to LBA 0" }, 153 { 0x8f, "failed to profile TwinStor zones" }, 154 155 /* fatal */ 156 { 0xc1, "aborted due to system command or reconfiguration" }, 157 { 0xc4, "aborted" }, 158 { 0xc5, "access error" }, 159 { 0xc6, "access violation" }, 160 { 0xc7, "device failure" }, /* high byte may be port # */ 161 { 0xc8, "controller error" }, 162 { 0xc9, "timed out" }, 163 { 0xcb, "invalid unit number" }, 164 { 0xcf, "unit not available" }, 165 { 0xd2, "undefined opcode" }, 166 { 0xdb, "request incompatible with unit" }, 167 { 0xdc, "invalid request" }, 168 { 0xff, "firmware error, reset requested" }, 169 170 { 0, NULL } 171 }; 172 173 const struct twe_code_table twe_table_unitstate[] = { 174 { TWE_PARAM_UNITSTATUS_Normal, "Normal" }, 175 { TWE_PARAM_UNITSTATUS_Initialising, "Initializing" }, 176 { TWE_PARAM_UNITSTATUS_Degraded, "Degraded" }, 177 { TWE_PARAM_UNITSTATUS_Rebuilding, "Rebuilding" }, 178 { TWE_PARAM_UNITSTATUS_Verifying, "Verifying" }, 179 { TWE_PARAM_UNITSTATUS_Corrupt, "Corrupt" }, 180 { TWE_PARAM_UNITSTATUS_Missing, "Missing" }, 181 182 { 0, NULL } 183 }; 184 185 const struct twe_code_table twe_table_unittype[] = { 186 /* array descriptor configuration */ 187 { TWE_AD_CONFIG_RAID0, "RAID0" }, 188 { TWE_AD_CONFIG_RAID1, "RAID1" }, 189 { TWE_AD_CONFIG_TwinStor, "TwinStor" }, 190 { TWE_AD_CONFIG_RAID5, "RAID5" }, 191 { TWE_AD_CONFIG_RAID10, "RAID10" }, 192 { TWE_UD_CONFIG_JBOD, "JBOD" }, 193 194 { 0, NULL } 195 }; 196 197 const struct twe_code_table twe_table_stripedepth[] = { 198 { TWE_AD_STRIPE_4k, "4K" }, 199 { TWE_AD_STRIPE_8k, "8K" }, 200 { TWE_AD_STRIPE_16k, "16K" }, 201 { TWE_AD_STRIPE_32k, "32K" }, 202 { TWE_AD_STRIPE_64k, "64K" }, 203 { TWE_AD_STRIPE_128k, "128K" }, 204 { TWE_AD_STRIPE_256k, "256K" }, 205 { TWE_AD_STRIPE_512k, "512K" }, 206 { TWE_AD_STRIPE_1024k, "1024K" }, 207 208 { 0, NULL } 209 }; 210 211 /* 212 * Asynchronous event notification messages are qualified: 213 * a - not unit/port specific 214 * u - unit specific 215 * p - port specific 216 * 217 * They are further qualified with a severity: 218 * E - LOG_EMERG 219 * a - LOG_ALERT 220 * c - LOG_CRIT 221 * e - LOG_ERR 222 * w - LOG_WARNING 223 * n - LOG_NOTICE 224 * i - LOG_INFO 225 * d - LOG_DEBUG 226 * blank - just use printf 227 */ 228 const struct twe_code_table twe_table_aen[] = { 229 { 0x00, "a queue empty" }, 230 { 0x01, "a soft reset" }, 231 { 0x02, "uc degraded mode" }, 232 { 0x03, "aa controller error" }, 233 { 0x04, "uE rebuild fail" }, 234 { 0x05, "un rebuild done" }, 235 { 0x06, "ue incomplete unit" }, 236 { 0x07, "un initialization done" }, 237 { 0x08, "uw unclean shutdown detected" }, 238 { 0x09, "pe drive timeout" }, 239 { 0x0a, "pc drive error" }, 240 { 0x0b, "un rebuild started" }, 241 { 0x0c, "un initialization started" }, 242 { 0x0d, "ui logical unit deleted" }, 243 { 0x0f, "pc SMART threshold exceeded" }, 244 { 0x15, "a table undefined" }, /* XXX: Not in FreeBSD's table */ 245 { 0x21, "pe ATA UDMA downgrade" }, 246 { 0x22, "pi ATA UDMA upgrade" }, 247 { 0x23, "pw sector repair occurred" }, 248 { 0x24, "aa SBUF integrity check failure" }, 249 { 0x25, "pa lost cached write" }, 250 { 0x26, "pa drive ECC error detected" }, 251 { 0x27, "pe DCB checksum error" }, 252 { 0x28, "pn DCB unsupported version" }, 253 { 0x29, "ui verify started" }, 254 { 0x2a, "ua verify failed" }, 255 { 0x2b, "ui verify complete" }, 256 { 0x2c, "pw overwrote bad sector during rebuild" }, 257 { 0x2d, "pa encountered bad sector during rebuild" }, 258 { 0x2e, "pe replacement drive too small" }, 259 { 0x2f, "ue array not previously initialized" }, 260 { 0x30, "p drive not supported" }, 261 { 0xff, "a aen queue full" }, 262 263 { 0, NULL }, 264 }; 265 266 const char * 267 twe_describe_code(const struct twe_code_table *table, uint32_t code) 268 { 269 270 for (; table->string != NULL; table++) { 271 if (table->code == code) 272 return (table->string); 273 } 274 return (NULL); 275 } 276 277 static inline u_int32_t 278 twe_inl(struct twe_softc *sc, int off) 279 { 280 281 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 282 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 283 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off)); 284 } 285 286 static inline void 287 twe_outl(struct twe_softc *sc, int off, u_int32_t val) 288 { 289 290 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 291 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 292 BUS_SPACE_BARRIER_WRITE); 293 } 294 295 /* 296 * Match a supported board. 297 */ 298 static int 299 twe_match(struct device *parent, struct cfdata *cfdata, void *aux) 300 { 301 struct pci_attach_args *pa; 302 303 pa = aux; 304 305 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE && 306 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE || 307 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC)); 308 } 309 310 /* 311 * Attach a supported board. 312 * 313 * XXX This doesn't fail gracefully. 314 */ 315 static void 316 twe_attach(struct device *parent, struct device *self, void *aux) 317 { 318 struct pci_attach_args *pa; 319 struct twe_softc *sc; 320 pci_chipset_tag_t pc; 321 pci_intr_handle_t ih; 322 pcireg_t csr; 323 const char *intrstr; 324 int s, size, i, rv, rseg; 325 size_t max_segs, max_xfer; 326 bus_dma_segment_t seg; 327 struct ctlname ctlnames[] = CTL_NAMES; 328 const struct sysctlnode *node; 329 struct twe_cmd *tc; 330 struct twe_ccb *ccb; 331 332 sc = (struct twe_softc *)self; 333 pa = aux; 334 pc = pa->pa_pc; 335 sc->sc_dmat = pa->pa_dmat; 336 SIMPLEQ_INIT(&sc->sc_ccb_queue); 337 SLIST_INIT(&sc->sc_ccb_freelist); 338 339 aprint_naive(": RAID controller\n"); 340 aprint_normal(": 3ware Escalade\n"); 341 342 ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_NOWAIT); 343 if (ccb == NULL) { 344 aprint_error("%s: unable to allocate memory for ccbs\n", 345 sc->sc_dv.dv_xname); 346 return; 347 } 348 349 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, 350 &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) { 351 aprint_error("%s: can't map i/o space\n", sc->sc_dv.dv_xname); 352 return; 353 } 354 355 /* Enable the device. */ 356 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 357 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 358 csr | PCI_COMMAND_MASTER_ENABLE); 359 360 /* Map and establish the interrupt. */ 361 if (pci_intr_map(pa, &ih)) { 362 aprint_error("%s: can't map interrupt\n", sc->sc_dv.dv_xname); 363 return; 364 } 365 366 intrstr = pci_intr_string(pc, ih); 367 sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, twe_intr, sc); 368 if (sc->sc_ih == NULL) { 369 aprint_error("%s: can't establish interrupt%s%s\n", 370 sc->sc_dv.dv_xname, 371 (intrstr) ? " at " : "", 372 (intrstr) ? intrstr : ""); 373 return; 374 } 375 376 if (intrstr != NULL) 377 aprint_normal("%s: interrupting at %s\n", 378 sc->sc_dv.dv_xname, intrstr); 379 380 /* 381 * Allocate and initialise the command blocks and CCBs. 382 */ 383 size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT; 384 385 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1, 386 &rseg, BUS_DMA_NOWAIT)) != 0) { 387 aprint_error("%s: unable to allocate commands, rv = %d\n", 388 sc->sc_dv.dv_xname, rv); 389 return; 390 } 391 392 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 393 (caddr_t *)&sc->sc_cmds, 394 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 395 aprint_error("%s: unable to map commands, rv = %d\n", 396 sc->sc_dv.dv_xname, rv); 397 return; 398 } 399 400 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0, 401 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 402 aprint_error("%s: unable to create command DMA map, rv = %d\n", 403 sc->sc_dv.dv_xname, rv); 404 return; 405 } 406 407 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds, 408 size, NULL, BUS_DMA_NOWAIT)) != 0) { 409 aprint_error("%s: unable to load command DMA map, rv = %d\n", 410 sc->sc_dv.dv_xname, rv); 411 return; 412 } 413 414 sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr; 415 memset(sc->sc_cmds, 0, size); 416 417 sc->sc_ccbs = ccb; 418 tc = (struct twe_cmd *)sc->sc_cmds; 419 max_segs = twe_get_maxsegs(); 420 max_xfer = twe_get_maxxfer(max_segs); 421 422 for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) { 423 ccb->ccb_cmd = tc; 424 ccb->ccb_cmdid = i; 425 ccb->ccb_flags = 0; 426 rv = bus_dmamap_create(sc->sc_dmat, max_xfer, 427 max_segs, PAGE_SIZE, 0, 428 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 429 &ccb->ccb_dmamap_xfer); 430 if (rv != 0) { 431 aprint_error("%s: can't create dmamap, rv = %d\n", 432 sc->sc_dv.dv_xname, rv); 433 return; 434 } 435 436 /* Save the first CCB for AEN retrieval. */ 437 if (i != 0) 438 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, 439 ccb_chain.slist); 440 } 441 442 /* Wait for the controller to become ready. */ 443 if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) { 444 aprint_error("%s: microcontroller not ready\n", 445 sc->sc_dv.dv_xname); 446 return; 447 } 448 449 twe_outl(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS); 450 451 /* Reset the controller. */ 452 s = splbio(); 453 rv = twe_reset(sc); 454 splx(s); 455 if (rv) { 456 aprint_error("%s: reset failed\n", sc->sc_dv.dv_xname); 457 return; 458 } 459 460 /* Initialise connection with controller. */ 461 twe_init_connection(sc); 462 463 twe_describe_controller(sc); 464 465 /* Find and attach RAID array units. */ 466 sc->sc_nunits = 0; 467 for (i = 0; i < TWE_MAX_UNITS; i++) 468 (void) twe_add_unit(sc, i); 469 470 /* ...and finally, enable interrupts. */ 471 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR | 472 TWE_CTL_UNMASK_RESP_INTR | 473 TWE_CTL_ENABLE_INTRS); 474 475 /* sysctl set-up for 3ware cli */ 476 if (sysctl_createv(NULL, 0, NULL, NULL, 477 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", 478 NULL, NULL, 0, NULL, 0, 479 CTL_HW, CTL_EOL) != 0) { 480 printf("%s: could not create %s sysctl node\n", 481 sc->sc_dv.dv_xname, ctlnames[CTL_HW].ctl_name); 482 return; 483 } 484 if (sysctl_createv(NULL, 0, NULL, &node, 485 0, CTLTYPE_NODE, sc->sc_dv.dv_xname, 486 SYSCTL_DESCR("twe driver information"), 487 NULL, 0, NULL, 0, 488 CTL_HW, CTL_CREATE, CTL_EOL) != 0) { 489 printf("%s: could not create %s.%s sysctl node\n", 490 sc->sc_dv.dv_xname, ctlnames[CTL_HW].ctl_name, 491 sc->sc_dv.dv_xname); 492 return; 493 } 494 if ((i = sysctl_createv(NULL, 0, NULL, NULL, 495 0, CTLTYPE_STRING, "driver_version", 496 SYSCTL_DESCR("twe0 driver version"), 497 NULL, 0, &twever, 0, 498 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL)) 499 != 0) { 500 printf("%s: could not create %s.%s.driver_version sysctl\n", 501 sc->sc_dv.dv_xname, ctlnames[CTL_HW].ctl_name, 502 sc->sc_dv.dv_xname); 503 return; 504 } 505 } 506 507 void 508 twe_register_callbacks(struct twe_softc *sc, int unit, 509 const struct twe_callbacks *tcb) 510 { 511 512 sc->sc_units[unit].td_callbacks = tcb; 513 } 514 515 static void 516 twe_recompute_openings(struct twe_softc *sc) 517 { 518 struct twe_drive *td; 519 int unit, openings; 520 521 if (sc->sc_nunits != 0) 522 openings = (TWE_MAX_QUEUECNT - 1) / sc->sc_nunits; 523 else 524 openings = 0; 525 if (openings == sc->sc_openings) 526 return; 527 sc->sc_openings = openings; 528 529 #ifdef TWE_DEBUG 530 printf("%s: %d array%s, %d openings per array\n", 531 sc->sc_dv.dv_xname, sc->sc_nunits, 532 sc->sc_nunits == 1 ? "" : "s", sc->sc_openings); 533 #endif 534 535 for (unit = 0; unit < TWE_MAX_UNITS; unit++) { 536 td = &sc->sc_units[unit]; 537 if (td->td_dev != NULL) 538 (*td->td_callbacks->tcb_openings)(td->td_dev, 539 sc->sc_openings); 540 } 541 } 542 543 static int 544 twe_add_unit(struct twe_softc *sc, int unit) 545 { 546 struct twe_param *dtp, *atp; 547 struct twe_array_descriptor *ad; 548 struct twe_drive *td; 549 struct twe_attach_args twea; 550 uint32_t newsize; 551 int rv; 552 uint16_t dsize; 553 uint8_t newtype, newstripe; 554 int help[2]; 555 locdesc_t *ldesc = (void *)help; /* XXX */ 556 557 if (unit < 0 || unit >= TWE_MAX_UNITS) 558 return (EINVAL); 559 560 /* Find attached units. */ 561 rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY, 562 TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, &dtp); 563 if (rv != 0) { 564 aprint_error("%s: error %d fetching unit summary\n", 565 sc->sc_dv.dv_xname, rv); 566 return (rv); 567 } 568 569 /* For each detected unit, collect size and store in an array. */ 570 td = &sc->sc_units[unit]; 571 572 /* Unit present? */ 573 if ((dtp->tp_data[unit] & TWE_PARAM_UNITSTATUS_Online) == 0) { 574 /* 575 * XXX Should we check to see if a device has been 576 * XXX attached at this index and detach it if it 577 * XXX has? ("rescan" semantics) 578 */ 579 rv = 0; 580 goto out; 581 } 582 583 rv = twe_param_get_2(sc, TWE_PARAM_UNITINFO + unit, 584 TWE_PARAM_UNITINFO_DescriptorSize, &dsize); 585 if (rv != 0) { 586 aprint_error("%s: error %d fetching descriptor size " 587 "for unit %d\n", sc->sc_dv.dv_xname, rv, unit); 588 goto out; 589 } 590 591 rv = twe_param_get(sc, TWE_PARAM_UNITINFO + unit, 592 TWE_PARAM_UNITINFO_Descriptor, dsize - 3, NULL, &atp); 593 if (rv != 0) { 594 aprint_error("%s: error %d fetching array descriptor " 595 "for unit %d\n", sc->sc_dv.dv_xname, rv, unit); 596 goto out; 597 } 598 599 ad = (struct twe_array_descriptor *)atp->tp_data; 600 newtype = ad->configuration; 601 newstripe = ad->stripe_size; 602 free(atp, M_DEVBUF); 603 604 rv = twe_param_get_4(sc, TWE_PARAM_UNITINFO + unit, 605 TWE_PARAM_UNITINFO_Capacity, &newsize); 606 if (rv != 0) { 607 aprint_error( 608 "%s: error %d fetching capacity for unit %d\n", 609 sc->sc_dv.dv_xname, rv, unit); 610 goto out; 611 } 612 613 /* 614 * Have a device, so we need to attach it. If there is currently 615 * something sitting at the slot, and the parameters are different, 616 * then we detach the old device before attaching the new one. 617 */ 618 if (td->td_dev != NULL && 619 td->td_size == newsize && 620 td->td_type == newtype && 621 td->td_stripe == newstripe) { 622 /* Same as the old device; just keep using it. */ 623 rv = 0; 624 goto out; 625 } else if (td->td_dev != NULL) { 626 /* Detach the old device first. */ 627 (void) config_detach(td->td_dev, DETACH_FORCE); 628 td->td_dev = NULL; 629 } else if (td->td_size == 0) 630 sc->sc_nunits++; 631 632 /* 633 * Committed to the new array unit; assign its parameters and 634 * recompute the number of available command openings. 635 */ 636 td->td_size = newsize; 637 td->td_type = newtype; 638 td->td_stripe = newstripe; 639 twe_recompute_openings(sc); 640 641 twea.twea_unit = unit; 642 643 ldesc->len = 1; 644 ldesc->locs[TWECF_UNIT] = unit; 645 646 td->td_dev = config_found_sm_loc(&sc->sc_dv, "twe", NULL, &twea, 647 twe_print, twe_submatch); 648 649 rv = 0; 650 out: 651 free(dtp, M_DEVBUF); 652 return (rv); 653 } 654 655 static int 656 twe_del_unit(struct twe_softc *sc, int unit) 657 { 658 struct twe_drive *td; 659 660 if (unit < 0 || unit >= TWE_MAX_UNITS) 661 return (EINVAL); 662 663 td = &sc->sc_units[unit]; 664 if (td->td_size != 0) 665 sc->sc_nunits--; 666 td->td_size = 0; 667 td->td_type = 0; 668 td->td_stripe = 0; 669 if (td->td_dev != NULL) { 670 (void) config_detach(td->td_dev, DETACH_FORCE); 671 td->td_dev = NULL; 672 } 673 twe_recompute_openings(sc); 674 return (0); 675 } 676 677 /* 678 * Reset the controller. 679 * MUST BE CALLED AT splbio()! 680 */ 681 static int 682 twe_reset(struct twe_softc *sc) 683 { 684 uint16_t aen; 685 u_int status; 686 volatile u_int32_t junk; 687 int got, rv; 688 689 /* Issue a soft reset. */ 690 twe_outl(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET | 691 TWE_CTL_CLEAR_HOST_INTR | 692 TWE_CTL_CLEAR_ATTN_INTR | 693 TWE_CTL_MASK_CMD_INTR | 694 TWE_CTL_MASK_RESP_INTR | 695 TWE_CTL_CLEAR_ERROR_STS | 696 TWE_CTL_DISABLE_INTRS); 697 698 /* Wait for attention... */ 699 if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 30)) { 700 printf("%s: timeout waiting for attention interrupt\n", 701 sc->sc_dv.dv_xname); 702 return (-1); 703 } 704 705 /* ...and ACK it. */ 706 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); 707 708 /* 709 * Pull AENs out of the controller; look for a soft reset AEN. 710 * Open code this, since we want to detect reset even if the 711 * queue for management tools is full. 712 * 713 * Note that since: 714 * - interrupts are blocked 715 * - we have reset the controller 716 * - acknowledged the pending ATTENTION 717 * that there is no way a pending asynchronous AEN fetch would 718 * finish, so clear the flag. 719 */ 720 sc->sc_flags &= ~TWEF_AEN; 721 for (got = 0;;) { 722 rv = twe_aen_get(sc, &aen); 723 if (rv != 0) 724 printf("%s: error %d while draining event queue\n", 725 sc->sc_dv.dv_xname, rv); 726 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) 727 break; 728 if (TWE_AEN_CODE(aen) == TWE_AEN_SOFT_RESET) 729 got = 1; 730 twe_aen_enqueue(sc, aen, 1); 731 } 732 733 if (!got) { 734 printf("%s: reset not reported\n", sc->sc_dv.dv_xname); 735 return (-1); 736 } 737 738 /* Check controller status. */ 739 status = twe_inl(sc, TWE_REG_STS); 740 if (twe_status_check(sc, status)) { 741 printf("%s: controller errors detected\n", 742 sc->sc_dv.dv_xname); 743 return (-1); 744 } 745 746 /* Drain the response queue. */ 747 for (;;) { 748 status = twe_inl(sc, TWE_REG_STS); 749 if (twe_status_check(sc, status) != 0) { 750 printf("%s: can't drain response queue\n", 751 sc->sc_dv.dv_xname); 752 return (-1); 753 } 754 if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0) 755 break; 756 junk = twe_inl(sc, TWE_REG_RESP_QUEUE); 757 } 758 759 return (0); 760 } 761 762 /* 763 * Print autoconfiguration message for a sub-device. 764 */ 765 static int 766 twe_print(void *aux, const char *pnp) 767 { 768 struct twe_attach_args *twea; 769 770 twea = aux; 771 772 if (pnp != NULL) 773 aprint_normal("block device at %s", pnp); 774 aprint_normal(" unit %d", twea->twea_unit); 775 return (UNCONF); 776 } 777 778 /* 779 * Match a sub-device. 780 */ 781 static int 782 twe_submatch(struct device *parent, struct cfdata *cf, 783 const locdesc_t *ldesc, void *aux) 784 { 785 786 if (cf->cf_loc[TWECF_UNIT] != TWECF_UNIT_DEFAULT && 787 cf->cf_loc[TWECF_UNIT] != ldesc->locs[TWECF_UNIT]) 788 return (0); 789 790 return (config_match(parent, cf, aux)); 791 } 792 793 /* 794 * Interrupt service routine. 795 */ 796 static int 797 twe_intr(void *arg) 798 { 799 struct twe_softc *sc; 800 u_int status; 801 int caught, rv; 802 803 sc = arg; 804 caught = 0; 805 status = twe_inl(sc, TWE_REG_STS); 806 twe_status_check(sc, status); 807 808 /* Host interrupts - purpose unknown. */ 809 if ((status & TWE_STS_HOST_INTR) != 0) { 810 #ifdef DEBUG 811 printf("%s: host interrupt\n", sc->sc_dv.dv_xname); 812 #endif 813 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR); 814 caught = 1; 815 } 816 817 /* 818 * Attention interrupts, signalled when a controller or child device 819 * state change has occurred. 820 */ 821 if ((status & TWE_STS_ATTN_INTR) != 0) { 822 rv = twe_aen_get(sc, NULL); 823 if (rv != 0) 824 printf("%s: unable to retrieve AEN (%d)\n", 825 sc->sc_dv.dv_xname, rv); 826 else 827 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); 828 caught = 1; 829 } 830 831 /* 832 * Command interrupts, signalled when the controller can accept more 833 * commands. We don't use this; instead, we try to submit commands 834 * when we receive them, and when other commands have completed. 835 * Mask it so we don't get another one. 836 */ 837 if ((status & TWE_STS_CMD_INTR) != 0) { 838 #ifdef DEBUG 839 printf("%s: command interrupt\n", sc->sc_dv.dv_xname); 840 #endif 841 twe_outl(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR); 842 caught = 1; 843 } 844 845 if ((status & TWE_STS_RESP_INTR) != 0) { 846 twe_poll(sc); 847 caught = 1; 848 } 849 850 return (caught); 851 } 852 853 /* 854 * Fetch an AEN. Even though this is really like parameter 855 * retrieval, we handle this specially, because we issue this 856 * AEN retrieval command from interrupt context, and thus 857 * reserve a CCB for it to avoid resource shortage. 858 * 859 * XXX There are still potential resource shortages we could 860 * XXX encounter. Consider pre-allocating all AEN-related 861 * XXX resources. 862 * 863 * MUST BE CALLED AT splbio()! 864 */ 865 static int 866 twe_aen_get(struct twe_softc *sc, uint16_t *aenp) 867 { 868 struct twe_ccb *ccb; 869 struct twe_cmd *tc; 870 struct twe_param *tp; 871 int rv; 872 873 /* 874 * If we're already retrieving an AEN, just wait; another 875 * retrieval will be chained after the current one completes. 876 */ 877 if (sc->sc_flags & TWEF_AEN) { 878 /* 879 * It is a fatal software programming error to attempt 880 * to fetch an AEN synchronously when an AEN fetch is 881 * already pending. 882 */ 883 KASSERT(aenp == NULL); 884 return (0); 885 } 886 887 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); 888 if (tp == NULL) 889 return (ENOMEM); 890 891 ccb = twe_ccb_alloc(sc, 892 TWE_CCB_AEN | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); 893 KASSERT(ccb != NULL); 894 895 ccb->ccb_data = tp; 896 ccb->ccb_datasize = TWE_SECTOR_SIZE; 897 ccb->ccb_tx.tx_handler = (aenp == NULL) ? twe_aen_handler : NULL; 898 ccb->ccb_tx.tx_context = tp; 899 ccb->ccb_tx.tx_dv = &sc->sc_dv; 900 901 tc = ccb->ccb_cmd; 902 tc->tc_size = 2; 903 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5); 904 tc->tc_unit = 0; 905 tc->tc_count = htole16(1); 906 907 /* Fill in the outbound parameter data. */ 908 tp->tp_table_id = htole16(TWE_PARAM_AEN); 909 tp->tp_param_id = TWE_PARAM_AEN_UnitCode; 910 tp->tp_param_size = 2; 911 912 /* Map the transfer. */ 913 if ((rv = twe_ccb_map(sc, ccb)) != 0) { 914 twe_ccb_free(sc, ccb); 915 goto done; 916 } 917 918 /* Enqueue the command and wait. */ 919 if (aenp != NULL) { 920 rv = twe_ccb_poll(sc, ccb, 5); 921 twe_ccb_unmap(sc, ccb); 922 twe_ccb_free(sc, ccb); 923 if (rv == 0) 924 *aenp = le16toh(*(uint16_t *)tp->tp_data); 925 free(tp, M_DEVBUF); 926 } else { 927 sc->sc_flags |= TWEF_AEN; 928 twe_ccb_enqueue(sc, ccb); 929 rv = 0; 930 } 931 932 done: 933 return (rv); 934 } 935 936 /* 937 * Handle an AEN returned by the controller. 938 * MUST BE CALLED AT splbio()! 939 */ 940 static void 941 twe_aen_handler(struct twe_ccb *ccb, int error) 942 { 943 struct twe_softc *sc; 944 struct twe_param *tp; 945 uint16_t aen; 946 int rv; 947 948 sc = (struct twe_softc *)ccb->ccb_tx.tx_dv; 949 tp = ccb->ccb_tx.tx_context; 950 twe_ccb_unmap(sc, ccb); 951 952 sc->sc_flags &= ~TWEF_AEN; 953 954 if (error) { 955 printf("%s: error retrieving AEN\n", sc->sc_dv.dv_xname); 956 aen = TWE_AEN_QUEUE_EMPTY; 957 } else 958 aen = le16toh(*(u_int16_t *)tp->tp_data); 959 free(tp, M_DEVBUF); 960 twe_ccb_free(sc, ccb); 961 962 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) { 963 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); 964 return; 965 } 966 967 twe_aen_enqueue(sc, aen, 0); 968 969 /* 970 * Chain another retrieval in case interrupts have been 971 * coalesced. 972 */ 973 rv = twe_aen_get(sc, NULL); 974 if (rv != 0) 975 printf("%s: unable to retrieve AEN (%d)\n", 976 sc->sc_dv.dv_xname, rv); 977 } 978 979 static void 980 twe_aen_enqueue(struct twe_softc *sc, uint16_t aen, int quiet) 981 { 982 const char *str, *msg; 983 int s, next, nextnext, level; 984 985 /* 986 * First report the AEN on the console. Maybe. 987 */ 988 if (! quiet) { 989 str = twe_describe_code(twe_table_aen, TWE_AEN_CODE(aen)); 990 if (str == NULL) { 991 printf("%s: unknown AEN 0x%04x\n", 992 sc->sc_dv.dv_xname, aen); 993 } else { 994 msg = str + 3; 995 switch (str[1]) { 996 case 'E': level = LOG_EMERG; break; 997 case 'a': level = LOG_ALERT; break; 998 case 'c': level = LOG_CRIT; break; 999 case 'e': level = LOG_ERR; break; 1000 case 'w': level = LOG_WARNING; break; 1001 case 'n': level = LOG_NOTICE; break; 1002 case 'i': level = LOG_INFO; break; 1003 case 'd': level = LOG_DEBUG; break; 1004 default: 1005 /* Don't use syslog. */ 1006 level = -1; 1007 } 1008 1009 if (level < 0) { 1010 switch (str[0]) { 1011 case 'u': 1012 case 'p': 1013 printf("%s: %s %d: %s\n", 1014 sc->sc_dv.dv_xname, 1015 str[0] == 'u' ? "unit" : "port", 1016 TWE_AEN_UNIT(aen), msg); 1017 break; 1018 1019 default: 1020 printf("%s: %s\n", 1021 sc->sc_dv.dv_xname, msg); 1022 } 1023 } else { 1024 switch (str[0]) { 1025 case 'u': 1026 case 'p': 1027 log(level, "%s: %s %d: %s\n", 1028 sc->sc_dv.dv_xname, 1029 str[0] == 'u' ? "unit" : "port", 1030 TWE_AEN_UNIT(aen), msg); 1031 break; 1032 1033 default: 1034 log(level, "%s: %s\n", 1035 sc->sc_dv.dv_xname, msg); 1036 } 1037 } 1038 } 1039 } 1040 1041 /* Now enqueue the AEN for mangement tools. */ 1042 s = splbio(); 1043 1044 next = (sc->sc_aen_head + 1) % TWE_AEN_Q_LENGTH; 1045 nextnext = (sc->sc_aen_head + 2) % TWE_AEN_Q_LENGTH; 1046 1047 /* 1048 * If this is the last free slot, then queue up a "queue 1049 * full" message. 1050 */ 1051 if (nextnext == sc->sc_aen_tail) 1052 aen = TWE_AEN_QUEUE_FULL; 1053 1054 if (next != sc->sc_aen_tail) { 1055 sc->sc_aen_queue[sc->sc_aen_head] = aen; 1056 sc->sc_aen_head = next; 1057 } 1058 1059 if (sc->sc_flags & TWEF_AENQ_WAIT) { 1060 sc->sc_flags &= ~TWEF_AENQ_WAIT; 1061 wakeup(&sc->sc_aen_queue); 1062 } 1063 1064 splx(s); 1065 } 1066 1067 /* NOTE: Must be called at splbio(). */ 1068 static uint16_t 1069 twe_aen_dequeue(struct twe_softc *sc) 1070 { 1071 uint16_t aen; 1072 1073 if (sc->sc_aen_tail == sc->sc_aen_head) 1074 aen = TWE_AEN_QUEUE_EMPTY; 1075 else { 1076 aen = sc->sc_aen_queue[sc->sc_aen_tail]; 1077 sc->sc_aen_tail = (sc->sc_aen_tail + 1) % TWE_AEN_Q_LENGTH; 1078 } 1079 1080 return (aen); 1081 } 1082 1083 /* 1084 * These are short-hand functions that execute TWE_OP_GET_PARAM to 1085 * fetch 1, 2, and 4 byte parameter values, respectively. 1086 */ 1087 int 1088 twe_param_get_1(struct twe_softc *sc, int table_id, int param_id, 1089 uint8_t *valp) 1090 { 1091 struct twe_param *tp; 1092 int rv; 1093 1094 rv = twe_param_get(sc, table_id, param_id, 1, NULL, &tp); 1095 if (rv != 0) 1096 return (rv); 1097 *valp = *(uint8_t *)tp->tp_data; 1098 free(tp, M_DEVBUF); 1099 return (0); 1100 } 1101 1102 int 1103 twe_param_get_2(struct twe_softc *sc, int table_id, int param_id, 1104 uint16_t *valp) 1105 { 1106 struct twe_param *tp; 1107 int rv; 1108 1109 rv = twe_param_get(sc, table_id, param_id, 2, NULL, &tp); 1110 if (rv != 0) 1111 return (rv); 1112 *valp = le16toh(*(uint16_t *)tp->tp_data); 1113 free(tp, M_DEVBUF); 1114 return (0); 1115 } 1116 1117 int 1118 twe_param_get_4(struct twe_softc *sc, int table_id, int param_id, 1119 uint32_t *valp) 1120 { 1121 struct twe_param *tp; 1122 int rv; 1123 1124 rv = twe_param_get(sc, table_id, param_id, 4, NULL, &tp); 1125 if (rv != 0) 1126 return (rv); 1127 *valp = le32toh(*(uint32_t *)tp->tp_data); 1128 free(tp, M_DEVBUF); 1129 return (0); 1130 } 1131 1132 /* 1133 * Execute a TWE_OP_GET_PARAM command. If a callback function is provided, 1134 * it will be called with generated context when the command has completed. 1135 * If no callback is provided, the command will be executed synchronously 1136 * and a pointer to a buffer containing the data returned. 1137 * 1138 * The caller or callback is responsible for freeing the buffer. 1139 * 1140 * NOTE: We assume we can sleep here to wait for a CCB to become available. 1141 */ 1142 int 1143 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size, 1144 void (*func)(struct twe_ccb *, int), struct twe_param **pbuf) 1145 { 1146 struct twe_ccb *ccb; 1147 struct twe_cmd *tc; 1148 struct twe_param *tp; 1149 int rv, s; 1150 1151 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); 1152 if (tp == NULL) 1153 return ENOMEM; 1154 1155 ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); 1156 KASSERT(ccb != NULL); 1157 1158 ccb->ccb_data = tp; 1159 ccb->ccb_datasize = TWE_SECTOR_SIZE; 1160 ccb->ccb_tx.tx_handler = func; 1161 ccb->ccb_tx.tx_context = tp; 1162 ccb->ccb_tx.tx_dv = &sc->sc_dv; 1163 1164 tc = ccb->ccb_cmd; 1165 tc->tc_size = 2; 1166 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5); 1167 tc->tc_unit = 0; 1168 tc->tc_count = htole16(1); 1169 1170 /* Fill in the outbound parameter data. */ 1171 tp->tp_table_id = htole16(table_id); 1172 tp->tp_param_id = param_id; 1173 tp->tp_param_size = size; 1174 1175 /* Map the transfer. */ 1176 if ((rv = twe_ccb_map(sc, ccb)) != 0) { 1177 twe_ccb_free(sc, ccb); 1178 goto done; 1179 } 1180 1181 /* Submit the command and either wait or let the callback handle it. */ 1182 if (func == NULL) { 1183 s = splbio(); 1184 rv = twe_ccb_poll(sc, ccb, 5); 1185 twe_ccb_unmap(sc, ccb); 1186 twe_ccb_free(sc, ccb); 1187 splx(s); 1188 } else { 1189 #ifdef DEBUG 1190 if (pbuf != NULL) 1191 panic("both func and pbuf defined"); 1192 #endif 1193 twe_ccb_enqueue(sc, ccb); 1194 return 0; 1195 } 1196 1197 done: 1198 if (pbuf == NULL || rv != 0) 1199 free(tp, M_DEVBUF); 1200 else if (pbuf != NULL && rv == 0) 1201 *pbuf = tp; 1202 return rv; 1203 } 1204 1205 /* 1206 * Execute a TWE_OP_SET_PARAM command. 1207 * 1208 * NOTE: We assume we can sleep here to wait for a CCB to become available. 1209 */ 1210 static int 1211 twe_param_set(struct twe_softc *sc, int table_id, int param_id, size_t size, 1212 void *sbuf) 1213 { 1214 struct twe_ccb *ccb; 1215 struct twe_cmd *tc; 1216 struct twe_param *tp; 1217 int rv, s; 1218 1219 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); 1220 if (tp == NULL) 1221 return ENOMEM; 1222 1223 ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); 1224 KASSERT(ccb != NULL); 1225 1226 ccb->ccb_data = tp; 1227 ccb->ccb_datasize = TWE_SECTOR_SIZE; 1228 ccb->ccb_tx.tx_handler = 0; 1229 ccb->ccb_tx.tx_context = tp; 1230 ccb->ccb_tx.tx_dv = &sc->sc_dv; 1231 1232 tc = ccb->ccb_cmd; 1233 tc->tc_size = 2; 1234 tc->tc_opcode = TWE_OP_SET_PARAM | (tc->tc_size << 5); 1235 tc->tc_unit = 0; 1236 tc->tc_count = htole16(1); 1237 1238 /* Fill in the outbound parameter data. */ 1239 tp->tp_table_id = htole16(table_id); 1240 tp->tp_param_id = param_id; 1241 tp->tp_param_size = size; 1242 memcpy(tp->tp_data, sbuf, size); 1243 1244 /* Map the transfer. */ 1245 if ((rv = twe_ccb_map(sc, ccb)) != 0) { 1246 twe_ccb_free(sc, ccb); 1247 goto done; 1248 } 1249 1250 /* Submit the command and wait. */ 1251 s = splbio(); 1252 rv = twe_ccb_poll(sc, ccb, 5); 1253 twe_ccb_unmap(sc, ccb); 1254 twe_ccb_free(sc, ccb); 1255 splx(s); 1256 done: 1257 free(tp, M_DEVBUF); 1258 return (rv); 1259 } 1260 1261 /* 1262 * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error. 1263 * Must be called with interrupts blocked. 1264 */ 1265 static int 1266 twe_init_connection(struct twe_softc *sc) 1267 /*###762 [cc] warning: `twe_init_connection' was used with no prototype before its definition%%%*/ 1268 /*###762 [cc] warning: `twe_init_connection' was declared implicitly `extern' and later `static'%%%*/ 1269 { 1270 struct twe_ccb *ccb; 1271 struct twe_cmd *tc; 1272 int rv; 1273 1274 if ((ccb = twe_ccb_alloc(sc, 0)) == NULL) 1275 return (EAGAIN); 1276 1277 /* Build the command. */ 1278 tc = ccb->ccb_cmd; 1279 tc->tc_size = 3; 1280 tc->tc_opcode = TWE_OP_INIT_CONNECTION; 1281 tc->tc_unit = 0; 1282 tc->tc_count = htole16(TWE_MAX_CMDS); 1283 tc->tc_args.init_connection.response_queue_pointer = 0; 1284 1285 /* Submit the command for immediate execution. */ 1286 rv = twe_ccb_poll(sc, ccb, 5); 1287 twe_ccb_free(sc, ccb); 1288 return (rv); 1289 } 1290 1291 /* 1292 * Poll the controller for completed commands. Must be called with 1293 * interrupts blocked. 1294 */ 1295 static void 1296 twe_poll(struct twe_softc *sc) 1297 { 1298 struct twe_ccb *ccb; 1299 int found; 1300 u_int status, cmdid; 1301 1302 found = 0; 1303 1304 for (;;) { 1305 status = twe_inl(sc, TWE_REG_STS); 1306 twe_status_check(sc, status); 1307 1308 if ((status & TWE_STS_RESP_QUEUE_EMPTY)) 1309 break; 1310 1311 found = 1; 1312 cmdid = twe_inl(sc, TWE_REG_RESP_QUEUE); 1313 cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT; 1314 if (cmdid >= TWE_MAX_QUEUECNT) { 1315 printf("%s: bad cmdid %d\n", sc->sc_dv.dv_xname, cmdid); 1316 continue; 1317 } 1318 1319 ccb = sc->sc_ccbs + cmdid; 1320 if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) { 1321 printf("%s: CCB for cmdid %d not active\n", 1322 sc->sc_dv.dv_xname, cmdid); 1323 continue; 1324 } 1325 ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE; 1326 1327 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1328 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, 1329 sizeof(struct twe_cmd), 1330 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1331 1332 /* Pass notification to upper layers. */ 1333 if (ccb->ccb_tx.tx_handler != NULL) 1334 (*ccb->ccb_tx.tx_handler)(ccb, 1335 ccb->ccb_cmd->tc_status != 0 ? EIO : 0); 1336 } 1337 1338 /* If any commands have completed, run the software queue. */ 1339 if (found) 1340 twe_ccb_enqueue(sc, NULL); 1341 } 1342 1343 /* 1344 * Wait for `status' to be set in the controller status register. Return 1345 * zero if found, non-zero if the operation timed out. 1346 */ 1347 static int 1348 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo) 1349 { 1350 1351 for (timo *= 10; timo != 0; timo--) { 1352 if ((twe_inl(sc, TWE_REG_STS) & status) == status) 1353 break; 1354 delay(100000); 1355 } 1356 1357 return (timo == 0); 1358 } 1359 1360 /* 1361 * Clear a PCI parity error. 1362 */ 1363 static void 1364 twe_clear_pci_parity_error(struct twe_softc *sc) 1365 { 1366 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0, TWE_CTL_CLEAR_PARITY_ERROR); 1367 1368 //FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2); 1369 } 1370 1371 1372 /* 1373 * Clear a PCI abort. 1374 */ 1375 static void 1376 twe_clear_pci_abort(struct twe_softc *sc) 1377 { 1378 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0, TWE_CTL_CLEAR_PCI_ABORT); 1379 1380 //FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2); 1381 } 1382 1383 /* 1384 * Complain if the status bits aren't what we expect. 1385 */ 1386 static int 1387 twe_status_check(struct twe_softc *sc, u_int status) 1388 { 1389 int rv; 1390 1391 rv = 0; 1392 1393 if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) { 1394 printf("%s: missing status bits: 0x%08x\n", sc->sc_dv.dv_xname, 1395 status & ~TWE_STS_EXPECTED_BITS); 1396 rv = -1; 1397 } 1398 1399 if ((status & TWE_STS_UNEXPECTED_BITS) != 0) { 1400 printf("%s: unexpected status bits: 0x%08x\n", 1401 sc->sc_dv.dv_xname, status & TWE_STS_UNEXPECTED_BITS); 1402 rv = -1; 1403 if (status & TWE_STS_PCI_PARITY_ERROR) { 1404 printf("%s: PCI parity error: Reseat card, move card " 1405 "or buggy device present.\n", 1406 sc->sc_dv.dv_xname); 1407 twe_clear_pci_parity_error(sc); 1408 } 1409 if (status & TWE_STS_PCI_ABORT) { 1410 printf("%s: PCI abort, clearing.\n", 1411 sc->sc_dv.dv_xname); 1412 twe_clear_pci_abort(sc); 1413 } 1414 } 1415 1416 return (rv); 1417 } 1418 1419 /* 1420 * Allocate and initialise a CCB. 1421 */ 1422 static __inline void 1423 twe_ccb_init(struct twe_softc *sc, struct twe_ccb *ccb, int flags) 1424 { 1425 struct twe_cmd *tc; 1426 1427 ccb->ccb_tx.tx_handler = NULL; 1428 ccb->ccb_flags = flags; 1429 tc = ccb->ccb_cmd; 1430 tc->tc_status = 0; 1431 tc->tc_flags = 0; 1432 tc->tc_cmdid = ccb->ccb_cmdid; 1433 } 1434 1435 struct twe_ccb * 1436 twe_ccb_alloc(struct twe_softc *sc, int flags) 1437 { 1438 struct twe_ccb *ccb; 1439 int s; 1440 1441 s = splbio(); 1442 if (__predict_false((flags & TWE_CCB_AEN) != 0)) { 1443 /* Use the reserved CCB. */ 1444 ccb = sc->sc_ccbs; 1445 } else { 1446 /* Allocate a CCB and command block. */ 1447 if (__predict_false((ccb = 1448 SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) { 1449 splx(s); 1450 return (NULL); 1451 } 1452 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); 1453 } 1454 #ifdef DIAGNOSTIC 1455 if ((long)(ccb - sc->sc_ccbs) == 0 && (flags & TWE_CCB_AEN) == 0) 1456 panic("twe_ccb_alloc: got reserved CCB for non-AEN"); 1457 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) 1458 panic("twe_ccb_alloc: CCB %ld already allocated", 1459 (long)(ccb - sc->sc_ccbs)); 1460 flags |= TWE_CCB_ALLOCED; 1461 #endif 1462 splx(s); 1463 1464 twe_ccb_init(sc, ccb, flags); 1465 return (ccb); 1466 } 1467 1468 struct twe_ccb * 1469 twe_ccb_alloc_wait(struct twe_softc *sc, int flags) 1470 { 1471 struct twe_ccb *ccb; 1472 int s; 1473 1474 KASSERT((flags & TWE_CCB_AEN) == 0); 1475 1476 s = splbio(); 1477 while (__predict_false((ccb = 1478 SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) { 1479 sc->sc_flags |= TWEF_WAIT_CCB; 1480 (void) tsleep(&sc->sc_ccb_freelist, PRIBIO, "tweccb", 0); 1481 } 1482 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); 1483 #ifdef DIAGNOSTIC 1484 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) 1485 panic("twe_ccb_alloc_wait: CCB %ld already allocated", 1486 (long)(ccb - sc->sc_ccbs)); 1487 flags |= TWE_CCB_ALLOCED; 1488 #endif 1489 splx(s); 1490 1491 twe_ccb_init(sc, ccb, flags); 1492 return (ccb); 1493 } 1494 1495 /* 1496 * Free a CCB. 1497 */ 1498 void 1499 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb) 1500 { 1501 int s; 1502 1503 s = splbio(); 1504 if ((ccb->ccb_flags & TWE_CCB_AEN) == 0) { 1505 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist); 1506 if (__predict_false((sc->sc_flags & TWEF_WAIT_CCB) != 0)) { 1507 sc->sc_flags &= ~TWEF_WAIT_CCB; 1508 wakeup(&sc->sc_ccb_freelist); 1509 } 1510 } 1511 ccb->ccb_flags = 0; 1512 splx(s); 1513 } 1514 1515 /* 1516 * Map the specified CCB's command block and data buffer (if any) into 1517 * controller visible space. Perform DMA synchronisation. 1518 */ 1519 int 1520 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb) 1521 { 1522 struct twe_cmd *tc; 1523 int flags, nsegs, i, s, rv; 1524 void *data; 1525 1526 /* 1527 * The data as a whole must be 512-byte aligned. 1528 */ 1529 if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) { 1530 s = splvm(); 1531 /* XXX */ 1532 ccb->ccb_abuf = uvm_km_alloc(kmem_map, 1533 ccb->ccb_datasize, 0, UVM_KMF_NOWAIT|UVM_KMF_WIRED); 1534 splx(s); 1535 data = (void *)ccb->ccb_abuf; 1536 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 1537 memcpy(data, ccb->ccb_data, ccb->ccb_datasize); 1538 } else { 1539 ccb->ccb_abuf = (vaddr_t)0; 1540 data = ccb->ccb_data; 1541 } 1542 1543 /* 1544 * Map the data buffer into bus space and build the S/G list. 1545 */ 1546 rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data, 1547 ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1548 ((ccb->ccb_flags & TWE_CCB_DATA_IN) ? 1549 BUS_DMA_READ : BUS_DMA_WRITE)); 1550 if (rv != 0) { 1551 if (ccb->ccb_abuf != (vaddr_t)0) { 1552 s = splvm(); 1553 /* XXX */ 1554 uvm_km_free(kmem_map, ccb->ccb_abuf, 1555 ccb->ccb_datasize, UVM_KMF_WIRED); 1556 splx(s); 1557 } 1558 return (rv); 1559 } 1560 1561 nsegs = ccb->ccb_dmamap_xfer->dm_nsegs; 1562 tc = ccb->ccb_cmd; 1563 tc->tc_size += 2 * nsegs; 1564 1565 /* The location of the S/G list is dependant upon command type. */ 1566 switch (tc->tc_opcode >> 5) { 1567 case 2: 1568 for (i = 0; i < nsegs; i++) { 1569 tc->tc_args.param.sgl[i].tsg_address = 1570 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); 1571 tc->tc_args.param.sgl[i].tsg_length = 1572 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); 1573 } 1574 /* XXX Needed? */ 1575 for (; i < TWE_SG_SIZE; i++) { 1576 tc->tc_args.param.sgl[i].tsg_address = 0; 1577 tc->tc_args.param.sgl[i].tsg_length = 0; 1578 } 1579 break; 1580 case 3: 1581 for (i = 0; i < nsegs; i++) { 1582 tc->tc_args.io.sgl[i].tsg_address = 1583 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); 1584 tc->tc_args.io.sgl[i].tsg_length = 1585 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); 1586 } 1587 /* XXX Needed? */ 1588 for (; i < TWE_SG_SIZE; i++) { 1589 tc->tc_args.io.sgl[i].tsg_address = 0; 1590 tc->tc_args.io.sgl[i].tsg_length = 0; 1591 } 1592 break; 1593 default: 1594 /* 1595 * In all likelihood, this is a command passed from 1596 * management tools in userspace where no S/G list is 1597 * necessary because no data is being passed. 1598 */ 1599 break; 1600 } 1601 1602 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 1603 flags = BUS_DMASYNC_PREREAD; 1604 else 1605 flags = 0; 1606 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 1607 flags |= BUS_DMASYNC_PREWRITE; 1608 1609 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 1610 ccb->ccb_datasize, flags); 1611 return (0); 1612 } 1613 1614 /* 1615 * Unmap the specified CCB's command block and data buffer (if any) and 1616 * perform DMA synchronisation. 1617 */ 1618 void 1619 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb) 1620 { 1621 int flags, s; 1622 1623 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 1624 flags = BUS_DMASYNC_POSTREAD; 1625 else 1626 flags = 0; 1627 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 1628 flags |= BUS_DMASYNC_POSTWRITE; 1629 1630 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 1631 ccb->ccb_datasize, flags); 1632 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer); 1633 1634 if (ccb->ccb_abuf != (vaddr_t)0) { 1635 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 1636 memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf, 1637 ccb->ccb_datasize); 1638 s = splvm(); 1639 /* XXX */ 1640 uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize, 1641 UVM_KMF_WIRED); 1642 splx(s); 1643 } 1644 } 1645 1646 /* 1647 * Submit a command to the controller and poll on completion. Return 1648 * non-zero on timeout (but don't check status, as some command types don't 1649 * return status). Must be called with interrupts blocked. 1650 */ 1651 int 1652 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo) 1653 { 1654 int rv; 1655 1656 if ((rv = twe_ccb_submit(sc, ccb)) != 0) 1657 return (rv); 1658 1659 for (timo *= 1000; timo != 0; timo--) { 1660 twe_poll(sc); 1661 if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0) 1662 break; 1663 DELAY(100); 1664 } 1665 1666 return (timo == 0); 1667 } 1668 1669 /* 1670 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 1671 * the order that they were enqueued and try to submit their command blocks 1672 * to the controller for execution. 1673 */ 1674 void 1675 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb) 1676 { 1677 int s; 1678 1679 s = splbio(); 1680 1681 if (ccb != NULL) 1682 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq); 1683 1684 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) { 1685 if (twe_ccb_submit(sc, ccb)) 1686 break; 1687 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain.simpleq); 1688 } 1689 1690 splx(s); 1691 } 1692 1693 /* 1694 * Submit the command block associated with the specified CCB to the 1695 * controller for execution. Must be called with interrupts blocked. 1696 */ 1697 int 1698 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb) 1699 { 1700 bus_addr_t pa; 1701 int rv; 1702 u_int status; 1703 1704 /* Check to see if we can post a command. */ 1705 status = twe_inl(sc, TWE_REG_STS); 1706 twe_status_check(sc, status); 1707 1708 if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) { 1709 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1710 (caddr_t)ccb->ccb_cmd - sc->sc_cmds, sizeof(struct twe_cmd), 1711 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1712 #ifdef DIAGNOSTIC 1713 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) == 0) 1714 panic("%s: CCB %ld not ALLOCED\n", 1715 sc->sc_dv.dv_xname, (long)(ccb - sc->sc_ccbs)); 1716 #endif 1717 ccb->ccb_flags |= TWE_CCB_ACTIVE; 1718 pa = sc->sc_cmds_paddr + 1719 ccb->ccb_cmdid * sizeof(struct twe_cmd); 1720 twe_outl(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa); 1721 rv = 0; 1722 } else 1723 rv = EBUSY; 1724 1725 return (rv); 1726 } 1727 1728 1729 /* 1730 * Accept an open operation on the control device. 1731 */ 1732 static int 1733 tweopen(dev_t dev, int flag, int mode, struct proc *p) 1734 { 1735 struct twe_softc *twe; 1736 1737 if ((twe = device_lookup(&twe_cd, minor(dev))) == NULL) 1738 return (ENXIO); 1739 if ((twe->sc_flags & TWEF_OPEN) != 0) 1740 return (EBUSY); 1741 1742 twe->sc_flags |= TWEF_OPEN; 1743 return (0); 1744 } 1745 1746 /* 1747 * Accept the last close on the control device. 1748 */ 1749 static int 1750 tweclose(dev_t dev, int flag, int mode, struct proc *p) 1751 { 1752 struct twe_softc *twe; 1753 1754 twe = device_lookup(&twe_cd, minor(dev)); 1755 twe->sc_flags &= ~TWEF_OPEN; 1756 return (0); 1757 } 1758 1759 void 1760 twe_ccb_wait_handler(struct twe_ccb *ccb, int error) 1761 { 1762 1763 /* Just wake up the sleeper. */ 1764 wakeup(ccb); 1765 } 1766 1767 /* 1768 * Handle control operations. 1769 */ 1770 static int 1771 tweioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 1772 { 1773 struct twe_softc *twe; 1774 struct twe_ccb *ccb; 1775 struct twe_param *param; 1776 struct twe_usercommand *tu; 1777 struct twe_paramcommand *tp; 1778 struct twe_drivecommand *td; 1779 void *pdata = NULL; 1780 int s, error = 0; 1781 u_int8_t cmdid; 1782 1783 if (securelevel >= 2) 1784 return (EPERM); 1785 1786 twe = device_lookup(&twe_cd, minor(dev)); 1787 tu = (struct twe_usercommand *)data; 1788 tp = (struct twe_paramcommand *)data; 1789 td = (struct twe_drivecommand *)data; 1790 1791 /* This is intended to be compatible with the FreeBSD interface. */ 1792 switch (cmd) { 1793 case TWEIO_COMMAND: 1794 /* XXX mutex */ 1795 if (tu->tu_size > 0) { 1796 /* 1797 * XXX Handle > TWE_SECTOR_SIZE? Let's see if 1798 * it's really necessary, first. 1799 */ 1800 if (tu->tu_size > TWE_SECTOR_SIZE) { 1801 #ifdef TWE_DEBUG 1802 printf("%s: TWEIO_COMMAND: tu_size = %d\n", 1803 twe->sc_dv.dv_xname, tu->tu_size); 1804 #endif 1805 return EINVAL; 1806 } 1807 pdata = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK); 1808 error = copyin(tu->tu_data, pdata, tu->tu_size); 1809 if (error != 0) 1810 goto done; 1811 ccb = twe_ccb_alloc_wait(twe, 1812 TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); 1813 KASSERT(ccb != NULL); 1814 ccb->ccb_data = pdata; 1815 ccb->ccb_datasize = TWE_SECTOR_SIZE; 1816 } else { 1817 ccb = twe_ccb_alloc_wait(twe, 0); 1818 KASSERT(ccb != NULL); 1819 } 1820 1821 ccb->ccb_tx.tx_handler = twe_ccb_wait_handler; 1822 ccb->ccb_tx.tx_context = NULL; 1823 ccb->ccb_tx.tx_dv = &twe->sc_dv; 1824 1825 cmdid = ccb->ccb_cmdid; 1826 memcpy(ccb->ccb_cmd, &tu->tu_cmd, sizeof(struct twe_cmd)); 1827 ccb->ccb_cmd->tc_cmdid = cmdid; 1828 1829 /* Map the transfer. */ 1830 if ((error = twe_ccb_map(twe, ccb)) != 0) { 1831 twe_ccb_free(twe, ccb); 1832 goto done; 1833 } 1834 1835 /* Submit the command and wait up to 1 minute. */ 1836 error = 0; 1837 twe_ccb_enqueue(twe, ccb); 1838 s = splbio(); 1839 while ((ccb->ccb_flags & TWE_CCB_COMPLETE) == 0) 1840 if ((error = tsleep(ccb, PRIBIO, "tweioctl", 1841 60 * hz)) != 0) 1842 break; 1843 splx(s); 1844 1845 /* Copy the command back to the ioctl argument. */ 1846 memcpy(&tu->tu_cmd, ccb->ccb_cmd, sizeof(struct twe_cmd)); 1847 #ifdef TWE_DEBUG 1848 printf("%s: TWEIO_COMMAND: tc_opcode = 0x%02x, " 1849 "tc_status = 0x%02x\n", twe->sc_dv.dv_xname, 1850 tu->tu_cmd.tc_opcode, tu->tu_cmd.tc_status); 1851 #endif 1852 1853 s = splbio(); 1854 twe_ccb_free(twe, ccb); 1855 splx(s); 1856 1857 if (tu->tu_size > 0) 1858 error = copyout(pdata, tu->tu_data, tu->tu_size); 1859 goto done; 1860 1861 case TWEIO_STATS: 1862 return (ENOENT); 1863 1864 case TWEIO_AEN_POLL: 1865 s = splbio(); 1866 *(u_int *)data = twe_aen_dequeue(twe); 1867 splx(s); 1868 return (0); 1869 1870 case TWEIO_AEN_WAIT: 1871 s = splbio(); 1872 while ((*(u_int *)data = 1873 twe_aen_dequeue(twe)) == TWE_AEN_QUEUE_EMPTY) { 1874 twe->sc_flags |= TWEF_AENQ_WAIT; 1875 error = tsleep(&twe->sc_aen_queue, PRIBIO | PCATCH, 1876 "tweaen", 0); 1877 if (error == EINTR) { 1878 splx(s); 1879 return (error); 1880 } 1881 } 1882 splx(s); 1883 return (0); 1884 1885 case TWEIO_GET_PARAM: 1886 error = twe_param_get(twe, tp->tp_table_id, tp->tp_param_id, 1887 tp->tp_size, 0, ¶m); 1888 if (error != 0) 1889 return (error); 1890 if (param->tp_param_size > tp->tp_size) { 1891 error = EFAULT; 1892 goto done; 1893 } 1894 error = copyout(param->tp_data, tp->tp_data, 1895 param->tp_param_size); 1896 goto done; 1897 1898 case TWEIO_SET_PARAM: 1899 pdata = malloc(tp->tp_size, M_DEVBUF, M_WAITOK); 1900 if ((error = copyin(tp->tp_data, pdata, tp->tp_size)) != 0) 1901 goto done; 1902 error = twe_param_set(twe, tp->tp_table_id, tp->tp_param_id, 1903 tp->tp_size, pdata); 1904 goto done; 1905 1906 case TWEIO_RESET: 1907 s = splbio(); 1908 twe_reset(twe); 1909 splx(s); 1910 return (0); 1911 1912 case TWEIO_ADD_UNIT: 1913 /* XXX mutex */ 1914 return (twe_add_unit(twe, td->td_unit)); 1915 1916 case TWEIO_DEL_UNIT: 1917 /* XXX mutex */ 1918 return (twe_del_unit(twe, td->td_unit)); 1919 1920 default: 1921 return EINVAL; 1922 } 1923 done: 1924 if (pdata) 1925 free(pdata, M_DEVBUF); 1926 return error; 1927 } 1928 1929 const struct cdevsw twe_cdevsw = { 1930 tweopen, tweclose, noread, nowrite, tweioctl, 1931 nostop, notty, nopoll, nommap, 1932 }; 1933 1934 /* 1935 * Print some information about the controller 1936 */ 1937 static void 1938 twe_describe_controller(struct twe_softc *sc) 1939 { 1940 struct twe_param *p[6]; 1941 int i, rv = 0; 1942 uint32_t dsize; 1943 uint8_t ports; 1944 1945 /* get the port count */ 1946 rv |= twe_param_get_1(sc, TWE_PARAM_CONTROLLER, 1947 TWE_PARAM_CONTROLLER_PortCount, &ports); 1948 1949 /* get version strings */ 1950 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_Mon, 1951 16, NULL, &p[0]); 1952 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_FW, 1953 16, NULL, &p[1]); 1954 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_BIOS, 1955 16, NULL, &p[2]); 1956 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCB, 1957 8, NULL, &p[3]); 1958 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_ATA, 1959 8, NULL, &p[4]); 1960 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCI, 1961 8, NULL, &p[5]); 1962 1963 if (rv) { 1964 /* some error occurred */ 1965 aprint_error("%s: failed to fetch version information\n", 1966 sc->sc_dv.dv_xname); 1967 return; 1968 } 1969 1970 aprint_normal("%s: %d ports, Firmware %.16s, BIOS %.16s\n", 1971 sc->sc_dv.dv_xname, ports, 1972 p[1]->tp_data, p[2]->tp_data); 1973 1974 aprint_verbose("%s: Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n", 1975 sc->sc_dv.dv_xname, 1976 p[0]->tp_data, p[3]->tp_data, 1977 p[4]->tp_data, p[5]->tp_data); 1978 1979 free(p[0], M_DEVBUF); 1980 free(p[1], M_DEVBUF); 1981 free(p[2], M_DEVBUF); 1982 free(p[3], M_DEVBUF); 1983 free(p[4], M_DEVBUF); 1984 free(p[5], M_DEVBUF); 1985 1986 rv = twe_param_get(sc, TWE_PARAM_DRIVESUMMARY, 1987 TWE_PARAM_DRIVESUMMARY_Status, 16, NULL, &p[0]); 1988 if (rv) { 1989 aprint_error("%s: failed to get drive status summary\n", 1990 sc->sc_dv.dv_xname); 1991 return; 1992 } 1993 for (i = 0; i < ports; i++) { 1994 if (p[0]->tp_data[i] != TWE_PARAM_DRIVESTATUS_Present) 1995 continue; 1996 rv = twe_param_get_4(sc, TWE_PARAM_DRIVEINFO + i, 1997 TWE_PARAM_DRIVEINFO_Size, &dsize); 1998 if (rv) { 1999 aprint_error( 2000 "%s: unable to get drive size for port %d\n", 2001 sc->sc_dv.dv_xname, i); 2002 continue; 2003 } 2004 rv = twe_param_get(sc, TWE_PARAM_DRIVEINFO + i, 2005 TWE_PARAM_DRIVEINFO_Model, 40, NULL, &p[1]); 2006 if (rv) { 2007 aprint_error( 2008 "%s: unable to get drive model for port %d\n", 2009 sc->sc_dv.dv_xname, i); 2010 continue; 2011 } 2012 aprint_verbose("%s: port %d: %.40s %d MB\n", sc->sc_dv.dv_xname, 2013 i, p[1]->tp_data, dsize / 2048); 2014 free(p[1], M_DEVBUF); 2015 } 2016 free(p[0], M_DEVBUF); 2017 } 2018