1 /* $NetBSD: twe.c,v 1.107 2018/12/09 11:14:02 jdolecek Exp $ */ 2 3 /*- 4 * Copyright (c) 2000, 2001, 2002, 2003, 2004 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran; and by Jason R. Thorpe of Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 2000 Michael Smith 34 * Copyright (c) 2000 BSDi 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 47 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 56 * SUCH DAMAGE. 57 * 58 * from FreeBSD: twe.c,v 1.1 2000/05/24 23:35:23 msmith Exp 59 */ 60 61 /* 62 * Driver for the 3ware Escalade family of RAID controllers. 63 */ 64 65 #include <sys/cdefs.h> 66 __KERNEL_RCSID(0, "$NetBSD: twe.c,v 1.107 2018/12/09 11:14:02 jdolecek Exp $"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/kernel.h> 71 #include <sys/device.h> 72 #include <sys/queue.h> 73 #include <sys/proc.h> 74 #include <sys/buf.h> 75 #include <sys/endian.h> 76 #include <sys/malloc.h> 77 #include <sys/conf.h> 78 #include <sys/disk.h> 79 #include <sys/sysctl.h> 80 #include <sys/syslog.h> 81 #include <sys/kauth.h> 82 #include <sys/module.h> 83 #include <sys/bswap.h> 84 #include <sys/bus.h> 85 86 #include <dev/pci/pcireg.h> 87 #include <dev/pci/pcivar.h> 88 #include <dev/pci/pcidevs.h> 89 #include <dev/pci/twereg.h> 90 #include <dev/pci/twevar.h> 91 #include <dev/pci/tweio.h> 92 93 #include "locators.h" 94 #include "ioconf.h" 95 96 #define PCI_CBIO 0x10 97 98 static int twe_aen_get(struct twe_softc *, uint16_t *); 99 static void twe_aen_handler(struct twe_ccb *, int); 100 static void twe_aen_enqueue(struct twe_softc *sc, uint16_t, int); 101 static uint16_t twe_aen_dequeue(struct twe_softc *); 102 103 static void twe_attach(device_t, device_t, void *); 104 static int twe_rescan(device_t, const char *, const int *); 105 static int twe_init_connection(struct twe_softc *); 106 static int twe_intr(void *); 107 static int twe_match(device_t, cfdata_t, void *); 108 static int twe_param_set(struct twe_softc *, int, int, size_t, void *); 109 static void twe_poll(struct twe_softc *); 110 static int twe_print(void *, const char *); 111 static int twe_reset(struct twe_softc *); 112 static int twe_status_check(struct twe_softc *, u_int); 113 static int twe_status_wait(struct twe_softc *, u_int, int); 114 static void twe_describe_controller(struct twe_softc *); 115 static void twe_clear_pci_abort(struct twe_softc *sc); 116 static void twe_clear_pci_parity_error(struct twe_softc *sc); 117 118 static int twe_add_unit(struct twe_softc *, int); 119 static int twe_del_unit(struct twe_softc *, int); 120 static int twe_init_connection(struct twe_softc *); 121 122 static inline u_int32_t twe_inl(struct twe_softc *, int); 123 static inline void twe_outl(struct twe_softc *, int, u_int32_t); 124 125 extern struct cfdriver twe_cd; 126 127 CFATTACH_DECL3_NEW(twe, sizeof(struct twe_softc), 128 twe_match, twe_attach, NULL, NULL, twe_rescan, NULL, 0); 129 130 /* FreeBSD driver revision for sysctl expected by the 3ware cli */ 131 const char twever[] = "1.50.01.002"; 132 133 /* 134 * Tables to convert numeric codes to strings. 135 */ 136 const struct twe_code_table twe_table_status[] = { 137 { 0x00, "successful completion" }, 138 139 /* info */ 140 { 0x42, "command in progress" }, 141 { 0x6c, "retrying interface CRC error from UDMA command" }, 142 143 /* warning */ 144 { 0x81, "redundant/inconsequential request ignored" }, 145 { 0x8e, "failed to write zeroes to LBA 0" }, 146 { 0x8f, "failed to profile TwinStor zones" }, 147 148 /* fatal */ 149 { 0xc1, "aborted due to system command or reconfiguration" }, 150 { 0xc4, "aborted" }, 151 { 0xc5, "access error" }, 152 { 0xc6, "access violation" }, 153 { 0xc7, "device failure" }, /* high byte may be port # */ 154 { 0xc8, "controller error" }, 155 { 0xc9, "timed out" }, 156 { 0xcb, "invalid unit number" }, 157 { 0xcf, "unit not available" }, 158 { 0xd2, "undefined opcode" }, 159 { 0xdb, "request incompatible with unit" }, 160 { 0xdc, "invalid request" }, 161 { 0xff, "firmware error, reset requested" }, 162 163 { 0, NULL } 164 }; 165 166 const struct twe_code_table twe_table_unitstate[] = { 167 { TWE_PARAM_UNITSTATUS_Normal, "Normal" }, 168 { TWE_PARAM_UNITSTATUS_Initialising, "Initializing" }, 169 { TWE_PARAM_UNITSTATUS_Degraded, "Degraded" }, 170 { TWE_PARAM_UNITSTATUS_Rebuilding, "Rebuilding" }, 171 { TWE_PARAM_UNITSTATUS_Verifying, "Verifying" }, 172 { TWE_PARAM_UNITSTATUS_Corrupt, "Corrupt" }, 173 { TWE_PARAM_UNITSTATUS_Missing, "Missing" }, 174 175 { 0, NULL } 176 }; 177 178 const struct twe_code_table twe_table_unittype[] = { 179 /* array descriptor configuration */ 180 { TWE_AD_CONFIG_RAID0, "RAID0" }, 181 { TWE_AD_CONFIG_RAID1, "RAID1" }, 182 { TWE_AD_CONFIG_TwinStor, "TwinStor" }, 183 { TWE_AD_CONFIG_RAID5, "RAID5" }, 184 { TWE_AD_CONFIG_RAID10, "RAID10" }, 185 { TWE_UD_CONFIG_JBOD, "JBOD" }, 186 187 { 0, NULL } 188 }; 189 190 const struct twe_code_table twe_table_stripedepth[] = { 191 { TWE_AD_STRIPE_4k, "4K" }, 192 { TWE_AD_STRIPE_8k, "8K" }, 193 { TWE_AD_STRIPE_16k, "16K" }, 194 { TWE_AD_STRIPE_32k, "32K" }, 195 { TWE_AD_STRIPE_64k, "64K" }, 196 { TWE_AD_STRIPE_128k, "128K" }, 197 { TWE_AD_STRIPE_256k, "256K" }, 198 { TWE_AD_STRIPE_512k, "512K" }, 199 { TWE_AD_STRIPE_1024k, "1024K" }, 200 201 { 0, NULL } 202 }; 203 204 /* 205 * Asynchronous event notification messages are qualified: 206 * a - not unit/port specific 207 * u - unit specific 208 * p - port specific 209 * 210 * They are further qualified with a severity: 211 * E - LOG_EMERG 212 * a - LOG_ALERT 213 * c - LOG_CRIT 214 * e - LOG_ERR 215 * w - LOG_WARNING 216 * n - LOG_NOTICE 217 * i - LOG_INFO 218 * d - LOG_DEBUG 219 * blank - just use printf 220 */ 221 const struct twe_code_table twe_table_aen[] = { 222 { 0x00, "a queue empty" }, 223 { 0x01, "a soft reset" }, 224 { 0x02, "uc degraded mode" }, 225 { 0x03, "aa controller error" }, 226 { 0x04, "uE rebuild fail" }, 227 { 0x05, "un rebuild done" }, 228 { 0x06, "ue incomplete unit" }, 229 { 0x07, "un initialization done" }, 230 { 0x08, "uw unclean shutdown detected" }, 231 { 0x09, "pe drive timeout" }, 232 { 0x0a, "pc drive error" }, 233 { 0x0b, "un rebuild started" }, 234 { 0x0c, "un initialization started" }, 235 { 0x0d, "ui logical unit deleted" }, 236 { 0x0f, "pc SMART threshold exceeded" }, 237 { 0x15, "a table undefined" }, /* XXX: Not in FreeBSD's table */ 238 { 0x21, "pe ATA UDMA downgrade" }, 239 { 0x22, "pi ATA UDMA upgrade" }, 240 { 0x23, "pw sector repair occurred" }, 241 { 0x24, "aa SBUF integrity check failure" }, 242 { 0x25, "pa lost cached write" }, 243 { 0x26, "pa drive ECC error detected" }, 244 { 0x27, "pe DCB checksum error" }, 245 { 0x28, "pn DCB unsupported version" }, 246 { 0x29, "ui verify started" }, 247 { 0x2a, "ua verify failed" }, 248 { 0x2b, "ui verify complete" }, 249 { 0x2c, "pw overwrote bad sector during rebuild" }, 250 { 0x2d, "pa encountered bad sector during rebuild" }, 251 { 0x2e, "pe replacement drive too small" }, 252 { 0x2f, "ue array not previously initialized" }, 253 { 0x30, "p drive not supported" }, 254 { 0xff, "a aen queue full" }, 255 256 { 0, NULL }, 257 }; 258 259 const char * 260 twe_describe_code(const struct twe_code_table *table, uint32_t code) 261 { 262 263 for (; table->string != NULL; table++) { 264 if (table->code == code) 265 return (table->string); 266 } 267 return (NULL); 268 } 269 270 static inline u_int32_t 271 twe_inl(struct twe_softc *sc, int off) 272 { 273 274 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 275 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 276 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off)); 277 } 278 279 static inline void 280 twe_outl(struct twe_softc *sc, int off, u_int32_t val) 281 { 282 283 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 284 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 285 BUS_SPACE_BARRIER_WRITE); 286 } 287 288 /* 289 * Match a supported board. 290 */ 291 static int 292 twe_match(device_t parent, cfdata_t cfdata, void *aux) 293 { 294 struct pci_attach_args *pa; 295 296 pa = aux; 297 298 return (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_3WARE && 299 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE || 300 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_3WARE_ESCALADE_ASIC)); 301 } 302 303 /* 304 * Attach a supported board. 305 * 306 * XXX This doesn't fail gracefully. 307 */ 308 static void 309 twe_attach(device_t parent, device_t self, void *aux) 310 { 311 struct pci_attach_args *pa; 312 struct twe_softc *sc; 313 pci_chipset_tag_t pc; 314 pci_intr_handle_t ih; 315 pcireg_t csr; 316 const char *intrstr; 317 int s, size, i, rv, rseg; 318 size_t max_segs, max_xfer; 319 bus_dma_segment_t seg; 320 const struct sysctlnode *node; 321 struct twe_cmd *tc; 322 struct twe_ccb *ccb; 323 char intrbuf[PCI_INTRSTR_LEN]; 324 325 sc = device_private(self); 326 sc->sc_dev = self; 327 pa = aux; 328 pc = pa->pa_pc; 329 sc->sc_dmat = pa->pa_dmat; 330 SIMPLEQ_INIT(&sc->sc_ccb_queue); 331 SLIST_INIT(&sc->sc_ccb_freelist); 332 333 aprint_naive(": RAID controller\n"); 334 aprint_normal(": 3ware Escalade\n"); 335 336 337 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, 338 &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) { 339 aprint_error_dev(self, "can't map i/o space\n"); 340 return; 341 } 342 343 /* Enable the device. */ 344 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 345 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 346 csr | PCI_COMMAND_MASTER_ENABLE); 347 348 /* Map and establish the interrupt. */ 349 if (pci_intr_map(pa, &ih)) { 350 aprint_error_dev(self, "can't map interrupt\n"); 351 return; 352 } 353 354 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 355 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_BIO, twe_intr, sc, 356 device_xname(self)); 357 if (sc->sc_ih == NULL) { 358 aprint_error_dev(self, "can't establish interrupt%s%s\n", 359 (intrstr) ? " at " : "", 360 (intrstr) ? intrstr : ""); 361 return; 362 } 363 364 if (intrstr != NULL) 365 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 366 367 /* 368 * Allocate and initialise the command blocks and CCBs. 369 */ 370 size = sizeof(struct twe_cmd) * TWE_MAX_QUEUECNT; 371 372 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1, 373 &rseg, BUS_DMA_NOWAIT)) != 0) { 374 aprint_error_dev(self, 375 "unable to allocate commands, rv = %d\n", rv); 376 return; 377 } 378 379 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, 380 (void **)&sc->sc_cmds, 381 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 382 aprint_error_dev(self, 383 "unable to map commands, rv = %d\n", rv); 384 return; 385 } 386 387 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0, 388 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 389 aprint_error_dev(self, 390 "unable to create command DMA map, rv = %d\n", rv); 391 return; 392 } 393 394 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_cmds, 395 size, NULL, BUS_DMA_NOWAIT)) != 0) { 396 aprint_error_dev(self, 397 "unable to load command DMA map, rv = %d\n", rv); 398 return; 399 } 400 401 ccb = malloc(sizeof(*ccb) * TWE_MAX_QUEUECNT, M_DEVBUF, M_NOWAIT); 402 if (ccb == NULL) { 403 aprint_error_dev(self, "unable to allocate memory for ccbs\n"); 404 return; 405 } 406 407 sc->sc_cmds_paddr = sc->sc_dmamap->dm_segs[0].ds_addr; 408 memset(sc->sc_cmds, 0, size); 409 410 sc->sc_ccbs = ccb; 411 tc = (struct twe_cmd *)sc->sc_cmds; 412 max_segs = twe_get_maxsegs(); 413 max_xfer = twe_get_maxxfer(max_segs); 414 415 for (i = 0; i < TWE_MAX_QUEUECNT; i++, tc++, ccb++) { 416 ccb->ccb_cmd = tc; 417 ccb->ccb_cmdid = i; 418 ccb->ccb_flags = 0; 419 rv = bus_dmamap_create(sc->sc_dmat, max_xfer, 420 max_segs, PAGE_SIZE, 0, 421 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 422 &ccb->ccb_dmamap_xfer); 423 if (rv != 0) { 424 aprint_error_dev(self, 425 "can't create dmamap, rv = %d\n", rv); 426 return; 427 } 428 429 /* Save the first CCB for AEN retrieval. */ 430 if (i != 0) 431 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, 432 ccb_chain.slist); 433 } 434 435 /* Wait for the controller to become ready. */ 436 if (twe_status_wait(sc, TWE_STS_MICROCONTROLLER_READY, 6)) { 437 aprint_error_dev(self, "microcontroller not ready\n"); 438 return; 439 } 440 441 twe_outl(sc, TWE_REG_CTL, TWE_CTL_DISABLE_INTRS); 442 443 /* Reset the controller. */ 444 s = splbio(); 445 rv = twe_reset(sc); 446 splx(s); 447 if (rv) { 448 aprint_error_dev(self, "reset failed\n"); 449 return; 450 } 451 452 /* Initialise connection with controller. */ 453 twe_init_connection(sc); 454 455 twe_describe_controller(sc); 456 457 /* Find and attach RAID array units. */ 458 twe_rescan(self, "twe", 0); 459 460 /* ...and finally, enable interrupts. */ 461 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR | 462 TWE_CTL_UNMASK_RESP_INTR | 463 TWE_CTL_ENABLE_INTRS); 464 465 /* sysctl set-up for 3ware cli */ 466 if (sysctl_createv(NULL, 0, NULL, &node, 467 0, CTLTYPE_NODE, device_xname(self), 468 SYSCTL_DESCR("twe driver information"), 469 NULL, 0, NULL, 0, 470 CTL_HW, CTL_CREATE, CTL_EOL) != 0) { 471 aprint_error_dev(self, "could not create %s.%s sysctl node\n", 472 "hw", device_xname(self)); 473 return; 474 } 475 if ((i = sysctl_createv(NULL, 0, NULL, NULL, 476 0, CTLTYPE_STRING, "driver_version", 477 SYSCTL_DESCR("twe0 driver version"), 478 NULL, 0, __UNCONST(&twever), 0, 479 CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL)) 480 != 0) { 481 aprint_error_dev(self, 482 "could not create %s.%s.driver_version sysctl\n", 483 "hw", device_xname(self)); 484 return; 485 } 486 } 487 488 static int 489 twe_rescan(device_t self, const char *attr, const int *flags) 490 { 491 struct twe_softc *sc; 492 int i; 493 494 sc = device_private(self); 495 sc->sc_nunits = 0; 496 for (i = 0; i < TWE_MAX_UNITS; i++) 497 (void) twe_add_unit(sc, i); 498 return 0; 499 } 500 501 502 void 503 twe_register_callbacks(struct twe_softc *sc, int unit, 504 const struct twe_callbacks *tcb) 505 { 506 507 sc->sc_units[unit].td_callbacks = tcb; 508 } 509 510 static void 511 twe_recompute_openings(struct twe_softc *sc) 512 { 513 struct twe_drive *td; 514 int unit, openings; 515 516 if (sc->sc_nunits != 0) 517 openings = (TWE_MAX_QUEUECNT - 1) / sc->sc_nunits; 518 else 519 openings = 0; 520 if (openings == sc->sc_openings) 521 return; 522 sc->sc_openings = openings; 523 524 #ifdef TWE_DEBUG 525 printf("%s: %d array%s, %d openings per array\n", 526 device_xname(sc->sc_dev), sc->sc_nunits, 527 sc->sc_nunits == 1 ? "" : "s", sc->sc_openings); 528 #endif 529 530 for (unit = 0; unit < TWE_MAX_UNITS; unit++) { 531 td = &sc->sc_units[unit]; 532 if (td->td_dev != NULL) 533 (*td->td_callbacks->tcb_openings)(td->td_dev, 534 sc->sc_openings); 535 } 536 } 537 538 static int 539 twe_add_unit(struct twe_softc *sc, int unit) 540 { 541 struct twe_param *dtp, *atp; 542 struct twe_array_descriptor *ad; 543 struct twe_drive *td; 544 struct twe_attach_args twea; 545 uint32_t newsize; 546 int rv; 547 uint16_t dsize; 548 uint8_t newtype, newstripe; 549 int locs[TWECF_NLOCS]; 550 551 if (unit < 0 || unit >= TWE_MAX_UNITS) 552 return (EINVAL); 553 554 /* Find attached units. */ 555 rv = twe_param_get(sc, TWE_PARAM_UNITSUMMARY, 556 TWE_PARAM_UNITSUMMARY_Status, TWE_MAX_UNITS, NULL, &dtp); 557 if (rv != 0) { 558 aprint_error_dev(sc->sc_dev, 559 "error %d fetching unit summary\n", rv); 560 return (rv); 561 } 562 563 /* For each detected unit, collect size and store in an array. */ 564 td = &sc->sc_units[unit]; 565 566 /* Unit present? */ 567 if ((dtp->tp_data[unit] & TWE_PARAM_UNITSTATUS_Online) == 0) { 568 /* 569 * XXX Should we check to see if a device has been 570 * XXX attached at this index and detach it if it 571 * XXX has? ("rescan" semantics) 572 */ 573 rv = 0; 574 goto out; 575 } 576 577 rv = twe_param_get_2(sc, TWE_PARAM_UNITINFO + unit, 578 TWE_PARAM_UNITINFO_DescriptorSize, &dsize); 579 if (rv != 0) { 580 aprint_error_dev(sc->sc_dev, 581 "error %d fetching descriptor size for unit %d\n", 582 rv, unit); 583 goto out; 584 } 585 586 rv = twe_param_get(sc, TWE_PARAM_UNITINFO + unit, 587 TWE_PARAM_UNITINFO_Descriptor, dsize - 3, NULL, &atp); 588 if (rv != 0) { 589 aprint_error_dev(sc->sc_dev, 590 "error %d fetching array descriptor for unit %d\n", 591 rv, unit); 592 goto out; 593 } 594 595 ad = (struct twe_array_descriptor *)atp->tp_data; 596 newtype = ad->configuration; 597 newstripe = ad->stripe_size; 598 free(atp, M_DEVBUF); 599 600 rv = twe_param_get_4(sc, TWE_PARAM_UNITINFO + unit, 601 TWE_PARAM_UNITINFO_Capacity, &newsize); 602 if (rv != 0) { 603 aprint_error_dev(sc->sc_dev, 604 "error %d fetching capacity for unit %d\n", 605 rv, unit); 606 goto out; 607 } 608 609 /* 610 * Have a device, so we need to attach it. If there is currently 611 * something sitting at the slot, and the parameters are different, 612 * then we detach the old device before attaching the new one. 613 */ 614 if (td->td_dev != NULL && 615 td->td_size == newsize && 616 td->td_type == newtype && 617 td->td_stripe == newstripe) { 618 /* Same as the old device; just keep using it. */ 619 rv = 0; 620 goto out; 621 } else if (td->td_dev != NULL) { 622 /* Detach the old device first. */ 623 (void) config_detach(td->td_dev, DETACH_FORCE); 624 td->td_dev = NULL; 625 } else if (td->td_size == 0) 626 sc->sc_nunits++; 627 628 /* 629 * Committed to the new array unit; assign its parameters and 630 * recompute the number of available command openings. 631 */ 632 td->td_size = newsize; 633 td->td_type = newtype; 634 td->td_stripe = newstripe; 635 twe_recompute_openings(sc); 636 637 twea.twea_unit = unit; 638 639 locs[TWECF_UNIT] = unit; 640 641 td->td_dev = config_found_sm_loc(sc->sc_dev, "twe", locs, &twea, 642 twe_print, config_stdsubmatch); 643 644 rv = 0; 645 out: 646 free(dtp, M_DEVBUF); 647 return (rv); 648 } 649 650 static int 651 twe_del_unit(struct twe_softc *sc, int unit) 652 { 653 struct twe_drive *td; 654 655 if (unit < 0 || unit >= TWE_MAX_UNITS) 656 return (EINVAL); 657 658 td = &sc->sc_units[unit]; 659 if (td->td_size != 0) 660 sc->sc_nunits--; 661 td->td_size = 0; 662 td->td_type = 0; 663 td->td_stripe = 0; 664 if (td->td_dev != NULL) { 665 (void) config_detach(td->td_dev, DETACH_FORCE); 666 td->td_dev = NULL; 667 } 668 twe_recompute_openings(sc); 669 return (0); 670 } 671 672 /* 673 * Reset the controller. 674 * MUST BE CALLED AT splbio()! 675 */ 676 static int 677 twe_reset(struct twe_softc *sc) 678 { 679 uint16_t aen; 680 u_int status; 681 int got, rv; 682 683 /* Issue a soft reset. */ 684 twe_outl(sc, TWE_REG_CTL, TWE_CTL_ISSUE_SOFT_RESET | 685 TWE_CTL_CLEAR_HOST_INTR | 686 TWE_CTL_CLEAR_ATTN_INTR | 687 TWE_CTL_MASK_CMD_INTR | 688 TWE_CTL_MASK_RESP_INTR | 689 TWE_CTL_CLEAR_ERROR_STS | 690 TWE_CTL_DISABLE_INTRS); 691 692 /* Wait for attention... */ 693 if (twe_status_wait(sc, TWE_STS_ATTN_INTR, 30)) { 694 aprint_error_dev(sc->sc_dev, 695 "timeout waiting for attention interrupt\n"); 696 return (-1); 697 } 698 699 /* ...and ACK it. */ 700 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); 701 702 /* 703 * Pull AENs out of the controller; look for a soft reset AEN. 704 * Open code this, since we want to detect reset even if the 705 * queue for management tools is full. 706 * 707 * Note that since: 708 * - interrupts are blocked 709 * - we have reset the controller 710 * - acknowledged the pending ATTENTION 711 * that there is no way a pending asynchronous AEN fetch would 712 * finish, so clear the flag. 713 */ 714 sc->sc_flags &= ~TWEF_AEN; 715 for (got = 0;;) { 716 rv = twe_aen_get(sc, &aen); 717 if (rv != 0) 718 printf("%s: error %d while draining event queue\n", 719 device_xname(sc->sc_dev), rv); 720 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) 721 break; 722 if (TWE_AEN_CODE(aen) == TWE_AEN_SOFT_RESET) 723 got = 1; 724 twe_aen_enqueue(sc, aen, 1); 725 } 726 727 if (!got) { 728 printf("%s: reset not reported\n", device_xname(sc->sc_dev)); 729 return (-1); 730 } 731 732 /* Check controller status. */ 733 status = twe_inl(sc, TWE_REG_STS); 734 if (twe_status_check(sc, status)) { 735 printf("%s: controller errors detected\n", 736 device_xname(sc->sc_dev)); 737 return (-1); 738 } 739 740 /* Drain the response queue. */ 741 for (;;) { 742 status = twe_inl(sc, TWE_REG_STS); 743 if (twe_status_check(sc, status) != 0) { 744 aprint_error_dev(sc->sc_dev, 745 "can't drain response queue\n"); 746 return (-1); 747 } 748 if ((status & TWE_STS_RESP_QUEUE_EMPTY) != 0) 749 break; 750 (void)twe_inl(sc, TWE_REG_RESP_QUEUE); 751 } 752 753 return (0); 754 } 755 756 /* 757 * Print autoconfiguration message for a sub-device. 758 */ 759 static int 760 twe_print(void *aux, const char *pnp) 761 { 762 struct twe_attach_args *twea; 763 764 twea = aux; 765 766 if (pnp != NULL) 767 aprint_normal("block device at %s", pnp); 768 aprint_normal(" unit %d", twea->twea_unit); 769 return (UNCONF); 770 } 771 772 /* 773 * Interrupt service routine. 774 */ 775 static int 776 twe_intr(void *arg) 777 { 778 struct twe_softc *sc; 779 u_int status; 780 int caught, rv; 781 782 sc = arg; 783 caught = 0; 784 status = twe_inl(sc, TWE_REG_STS); 785 twe_status_check(sc, status); 786 787 /* Host interrupts - purpose unknown. */ 788 if ((status & TWE_STS_HOST_INTR) != 0) { 789 #ifdef DEBUG 790 printf("%s: host interrupt\n", device_xname(sc->sc_dev)); 791 #endif 792 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_HOST_INTR); 793 caught = 1; 794 } 795 796 /* 797 * Attention interrupts, signalled when a controller or child device 798 * state change has occurred. 799 */ 800 if ((status & TWE_STS_ATTN_INTR) != 0) { 801 rv = twe_aen_get(sc, NULL); 802 if (rv != 0) 803 aprint_error_dev(sc->sc_dev, 804 "unable to retrieve AEN (%d)\n", rv); 805 else 806 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); 807 caught = 1; 808 } 809 810 /* 811 * Command interrupts, signalled when the controller can accept more 812 * commands. We don't use this; instead, we try to submit commands 813 * when we receive them, and when other commands have completed. 814 * Mask it so we don't get another one. 815 */ 816 if ((status & TWE_STS_CMD_INTR) != 0) { 817 #ifdef DEBUG 818 printf("%s: command interrupt\n", device_xname(sc->sc_dev)); 819 #endif 820 twe_outl(sc, TWE_REG_CTL, TWE_CTL_MASK_CMD_INTR); 821 caught = 1; 822 } 823 824 if ((status & TWE_STS_RESP_INTR) != 0) { 825 twe_poll(sc); 826 caught = 1; 827 } 828 829 return (caught); 830 } 831 832 /* 833 * Fetch an AEN. Even though this is really like parameter 834 * retrieval, we handle this specially, because we issue this 835 * AEN retrieval command from interrupt context, and thus 836 * reserve a CCB for it to avoid resource shortage. 837 * 838 * XXX There are still potential resource shortages we could 839 * XXX encounter. Consider pre-allocating all AEN-related 840 * XXX resources. 841 * 842 * MUST BE CALLED AT splbio()! 843 */ 844 static int 845 twe_aen_get(struct twe_softc *sc, uint16_t *aenp) 846 { 847 struct twe_ccb *ccb; 848 struct twe_cmd *tc; 849 struct twe_param *tp; 850 int rv; 851 852 /* 853 * If we're already retrieving an AEN, just wait; another 854 * retrieval will be chained after the current one completes. 855 */ 856 if (sc->sc_flags & TWEF_AEN) { 857 /* 858 * It is a fatal software programming error to attempt 859 * to fetch an AEN synchronously when an AEN fetch is 860 * already pending. 861 */ 862 KASSERT(aenp == NULL); 863 return (0); 864 } 865 866 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); 867 if (tp == NULL) 868 return (ENOMEM); 869 870 ccb = twe_ccb_alloc(sc, 871 TWE_CCB_AEN | TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); 872 KASSERT(ccb != NULL); 873 874 ccb->ccb_data = tp; 875 ccb->ccb_datasize = TWE_SECTOR_SIZE; 876 ccb->ccb_tx.tx_handler = (aenp == NULL) ? twe_aen_handler : NULL; 877 ccb->ccb_tx.tx_context = tp; 878 ccb->ccb_tx.tx_dv = sc->sc_dev; 879 880 tc = ccb->ccb_cmd; 881 tc->tc_size = 2; 882 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5); 883 tc->tc_unit = 0; 884 tc->tc_count = htole16(1); 885 886 /* Fill in the outbound parameter data. */ 887 tp->tp_table_id = htole16(TWE_PARAM_AEN); 888 tp->tp_param_id = TWE_PARAM_AEN_UnitCode; 889 tp->tp_param_size = 2; 890 891 /* Map the transfer. */ 892 if ((rv = twe_ccb_map(sc, ccb)) != 0) { 893 twe_ccb_free(sc, ccb); 894 goto done; 895 } 896 897 /* Enqueue the command and wait. */ 898 if (aenp != NULL) { 899 rv = twe_ccb_poll(sc, ccb, 5); 900 twe_ccb_unmap(sc, ccb); 901 twe_ccb_free(sc, ccb); 902 if (rv == 0) 903 *aenp = le16toh(*(uint16_t *)tp->tp_data); 904 free(tp, M_DEVBUF); 905 } else { 906 sc->sc_flags |= TWEF_AEN; 907 twe_ccb_enqueue(sc, ccb); 908 rv = 0; 909 } 910 911 done: 912 return (rv); 913 } 914 915 /* 916 * Handle an AEN returned by the controller. 917 * MUST BE CALLED AT splbio()! 918 */ 919 static void 920 twe_aen_handler(struct twe_ccb *ccb, int error) 921 { 922 struct twe_softc *sc; 923 struct twe_param *tp; 924 uint16_t aen; 925 int rv; 926 927 sc = device_private(ccb->ccb_tx.tx_dv); 928 tp = ccb->ccb_tx.tx_context; 929 twe_ccb_unmap(sc, ccb); 930 931 sc->sc_flags &= ~TWEF_AEN; 932 933 if (error) { 934 aprint_error_dev(sc->sc_dev, "error retrieving AEN\n"); 935 aen = TWE_AEN_QUEUE_EMPTY; 936 } else 937 aen = le16toh(*(u_int16_t *)tp->tp_data); 938 free(tp, M_DEVBUF); 939 twe_ccb_free(sc, ccb); 940 941 if (TWE_AEN_CODE(aen) == TWE_AEN_QUEUE_EMPTY) { 942 twe_outl(sc, TWE_REG_CTL, TWE_CTL_CLEAR_ATTN_INTR); 943 return; 944 } 945 946 twe_aen_enqueue(sc, aen, 0); 947 948 /* 949 * Chain another retrieval in case interrupts have been 950 * coalesced. 951 */ 952 rv = twe_aen_get(sc, NULL); 953 if (rv != 0) 954 aprint_error_dev(sc->sc_dev, 955 "unable to retrieve AEN (%d)\n", rv); 956 } 957 958 static void 959 twe_aen_enqueue(struct twe_softc *sc, uint16_t aen, int quiet) 960 { 961 const char *str, *msg; 962 int s, next, nextnext, level; 963 964 /* 965 * First report the AEN on the console. Maybe. 966 */ 967 if (! quiet) { 968 str = twe_describe_code(twe_table_aen, TWE_AEN_CODE(aen)); 969 if (str == NULL) { 970 aprint_error_dev(sc->sc_dev, 971 "unknown AEN 0x%04x\n", aen); 972 } else { 973 msg = str + 3; 974 switch (str[1]) { 975 case 'E': level = LOG_EMERG; break; 976 case 'a': level = LOG_ALERT; break; 977 case 'c': level = LOG_CRIT; break; 978 case 'e': level = LOG_ERR; break; 979 case 'w': level = LOG_WARNING; break; 980 case 'n': level = LOG_NOTICE; break; 981 case 'i': level = LOG_INFO; break; 982 case 'd': level = LOG_DEBUG; break; 983 default: 984 /* Don't use syslog. */ 985 level = -1; 986 } 987 988 if (level < 0) { 989 switch (str[0]) { 990 case 'u': 991 case 'p': 992 printf("%s: %s %d: %s\n", 993 device_xname(sc->sc_dev), 994 str[0] == 'u' ? "unit" : "port", 995 TWE_AEN_UNIT(aen), msg); 996 break; 997 998 default: 999 printf("%s: %s\n", 1000 device_xname(sc->sc_dev), msg); 1001 } 1002 } else { 1003 switch (str[0]) { 1004 case 'u': 1005 case 'p': 1006 log(level, "%s: %s %d: %s\n", 1007 device_xname(sc->sc_dev), 1008 str[0] == 'u' ? "unit" : "port", 1009 TWE_AEN_UNIT(aen), msg); 1010 break; 1011 1012 default: 1013 log(level, "%s: %s\n", 1014 device_xname(sc->sc_dev), msg); 1015 } 1016 } 1017 } 1018 } 1019 1020 /* Now enqueue the AEN for mangement tools. */ 1021 s = splbio(); 1022 1023 next = (sc->sc_aen_head + 1) % TWE_AEN_Q_LENGTH; 1024 nextnext = (sc->sc_aen_head + 2) % TWE_AEN_Q_LENGTH; 1025 1026 /* 1027 * If this is the last free slot, then queue up a "queue 1028 * full" message. 1029 */ 1030 if (nextnext == sc->sc_aen_tail) 1031 aen = TWE_AEN_QUEUE_FULL; 1032 1033 if (next != sc->sc_aen_tail) { 1034 sc->sc_aen_queue[sc->sc_aen_head] = aen; 1035 sc->sc_aen_head = next; 1036 } 1037 1038 if (sc->sc_flags & TWEF_AENQ_WAIT) { 1039 sc->sc_flags &= ~TWEF_AENQ_WAIT; 1040 wakeup(&sc->sc_aen_queue); 1041 } 1042 1043 splx(s); 1044 } 1045 1046 /* NOTE: Must be called at splbio(). */ 1047 static uint16_t 1048 twe_aen_dequeue(struct twe_softc *sc) 1049 { 1050 uint16_t aen; 1051 1052 if (sc->sc_aen_tail == sc->sc_aen_head) 1053 aen = TWE_AEN_QUEUE_EMPTY; 1054 else { 1055 aen = sc->sc_aen_queue[sc->sc_aen_tail]; 1056 sc->sc_aen_tail = (sc->sc_aen_tail + 1) % TWE_AEN_Q_LENGTH; 1057 } 1058 1059 return (aen); 1060 } 1061 1062 /* 1063 * These are short-hand functions that execute TWE_OP_GET_PARAM to 1064 * fetch 1, 2, and 4 byte parameter values, respectively. 1065 */ 1066 int 1067 twe_param_get_1(struct twe_softc *sc, int table_id, int param_id, 1068 uint8_t *valp) 1069 { 1070 struct twe_param *tp; 1071 int rv; 1072 1073 rv = twe_param_get(sc, table_id, param_id, 1, NULL, &tp); 1074 if (rv != 0) 1075 return (rv); 1076 *valp = *(uint8_t *)tp->tp_data; 1077 free(tp, M_DEVBUF); 1078 return (0); 1079 } 1080 1081 int 1082 twe_param_get_2(struct twe_softc *sc, int table_id, int param_id, 1083 uint16_t *valp) 1084 { 1085 struct twe_param *tp; 1086 int rv; 1087 1088 rv = twe_param_get(sc, table_id, param_id, 2, NULL, &tp); 1089 if (rv != 0) 1090 return (rv); 1091 *valp = le16toh(*(uint16_t *)tp->tp_data); 1092 free(tp, M_DEVBUF); 1093 return (0); 1094 } 1095 1096 int 1097 twe_param_get_4(struct twe_softc *sc, int table_id, int param_id, 1098 uint32_t *valp) 1099 { 1100 struct twe_param *tp; 1101 int rv; 1102 1103 rv = twe_param_get(sc, table_id, param_id, 4, NULL, &tp); 1104 if (rv != 0) 1105 return (rv); 1106 *valp = le32toh(*(uint32_t *)tp->tp_data); 1107 free(tp, M_DEVBUF); 1108 return (0); 1109 } 1110 1111 /* 1112 * Execute a TWE_OP_GET_PARAM command. If a callback function is provided, 1113 * it will be called with generated context when the command has completed. 1114 * If no callback is provided, the command will be executed synchronously 1115 * and a pointer to a buffer containing the data returned. 1116 * 1117 * The caller or callback is responsible for freeing the buffer. 1118 * 1119 * NOTE: We assume we can sleep here to wait for a CCB to become available. 1120 */ 1121 int 1122 twe_param_get(struct twe_softc *sc, int table_id, int param_id, size_t size, 1123 void (*func)(struct twe_ccb *, int), struct twe_param **pbuf) 1124 { 1125 struct twe_ccb *ccb; 1126 struct twe_cmd *tc; 1127 struct twe_param *tp; 1128 int rv, s; 1129 1130 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); 1131 if (tp == NULL) 1132 return ENOMEM; 1133 1134 ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); 1135 KASSERT(ccb != NULL); 1136 1137 ccb->ccb_data = tp; 1138 ccb->ccb_datasize = TWE_SECTOR_SIZE; 1139 ccb->ccb_tx.tx_handler = func; 1140 ccb->ccb_tx.tx_context = tp; 1141 ccb->ccb_tx.tx_dv = sc->sc_dev; 1142 1143 tc = ccb->ccb_cmd; 1144 tc->tc_size = 2; 1145 tc->tc_opcode = TWE_OP_GET_PARAM | (tc->tc_size << 5); 1146 tc->tc_unit = 0; 1147 tc->tc_count = htole16(1); 1148 1149 /* Fill in the outbound parameter data. */ 1150 tp->tp_table_id = htole16(table_id); 1151 tp->tp_param_id = param_id; 1152 tp->tp_param_size = size; 1153 1154 /* Map the transfer. */ 1155 if ((rv = twe_ccb_map(sc, ccb)) != 0) { 1156 twe_ccb_free(sc, ccb); 1157 goto done; 1158 } 1159 1160 /* Submit the command and either wait or let the callback handle it. */ 1161 if (func == NULL) { 1162 s = splbio(); 1163 rv = twe_ccb_poll(sc, ccb, 5); 1164 twe_ccb_unmap(sc, ccb); 1165 twe_ccb_free(sc, ccb); 1166 splx(s); 1167 } else { 1168 #ifdef DEBUG 1169 if (pbuf != NULL) 1170 panic("both func and pbuf defined"); 1171 #endif 1172 twe_ccb_enqueue(sc, ccb); 1173 return 0; 1174 } 1175 1176 done: 1177 if (pbuf == NULL || rv != 0) 1178 free(tp, M_DEVBUF); 1179 else if (pbuf != NULL && rv == 0) 1180 *pbuf = tp; 1181 return rv; 1182 } 1183 1184 /* 1185 * Execute a TWE_OP_SET_PARAM command. 1186 * 1187 * NOTE: We assume we can sleep here to wait for a CCB to become available. 1188 */ 1189 static int 1190 twe_param_set(struct twe_softc *sc, int table_id, int param_id, size_t size, 1191 void *sbuf) 1192 { 1193 struct twe_ccb *ccb; 1194 struct twe_cmd *tc; 1195 struct twe_param *tp; 1196 int rv, s; 1197 1198 tp = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_NOWAIT); 1199 if (tp == NULL) 1200 return ENOMEM; 1201 1202 ccb = twe_ccb_alloc_wait(sc, TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); 1203 KASSERT(ccb != NULL); 1204 1205 ccb->ccb_data = tp; 1206 ccb->ccb_datasize = TWE_SECTOR_SIZE; 1207 ccb->ccb_tx.tx_handler = 0; 1208 ccb->ccb_tx.tx_context = tp; 1209 ccb->ccb_tx.tx_dv = sc->sc_dev; 1210 1211 tc = ccb->ccb_cmd; 1212 tc->tc_size = 2; 1213 tc->tc_opcode = TWE_OP_SET_PARAM | (tc->tc_size << 5); 1214 tc->tc_unit = 0; 1215 tc->tc_count = htole16(1); 1216 1217 /* Fill in the outbound parameter data. */ 1218 tp->tp_table_id = htole16(table_id); 1219 tp->tp_param_id = param_id; 1220 tp->tp_param_size = size; 1221 memcpy(tp->tp_data, sbuf, size); 1222 1223 /* Map the transfer. */ 1224 if ((rv = twe_ccb_map(sc, ccb)) != 0) { 1225 twe_ccb_free(sc, ccb); 1226 goto done; 1227 } 1228 1229 /* Submit the command and wait. */ 1230 s = splbio(); 1231 rv = twe_ccb_poll(sc, ccb, 5); 1232 twe_ccb_unmap(sc, ccb); 1233 twe_ccb_free(sc, ccb); 1234 splx(s); 1235 done: 1236 free(tp, M_DEVBUF); 1237 return (rv); 1238 } 1239 1240 /* 1241 * Execute a TWE_OP_INIT_CONNECTION command. Return non-zero on error. 1242 * Must be called with interrupts blocked. 1243 */ 1244 static int 1245 twe_init_connection(struct twe_softc *sc) 1246 { 1247 struct twe_ccb *ccb; 1248 struct twe_cmd *tc; 1249 int rv; 1250 1251 if ((ccb = twe_ccb_alloc(sc, 0)) == NULL) 1252 return (EAGAIN); 1253 1254 /* Build the command. */ 1255 tc = ccb->ccb_cmd; 1256 tc->tc_size = 3; 1257 tc->tc_opcode = TWE_OP_INIT_CONNECTION; 1258 tc->tc_unit = 0; 1259 tc->tc_count = htole16(TWE_MAX_CMDS); 1260 tc->tc_args.init_connection.response_queue_pointer = 0; 1261 1262 /* Submit the command for immediate execution. */ 1263 rv = twe_ccb_poll(sc, ccb, 5); 1264 twe_ccb_free(sc, ccb); 1265 return (rv); 1266 } 1267 1268 /* 1269 * Poll the controller for completed commands. Must be called with 1270 * interrupts blocked. 1271 */ 1272 static void 1273 twe_poll(struct twe_softc *sc) 1274 { 1275 struct twe_ccb *ccb; 1276 int found; 1277 u_int status, cmdid; 1278 1279 found = 0; 1280 1281 for (;;) { 1282 status = twe_inl(sc, TWE_REG_STS); 1283 twe_status_check(sc, status); 1284 1285 if ((status & TWE_STS_RESP_QUEUE_EMPTY)) 1286 break; 1287 1288 found = 1; 1289 cmdid = twe_inl(sc, TWE_REG_RESP_QUEUE); 1290 cmdid = (cmdid & TWE_RESP_MASK) >> TWE_RESP_SHIFT; 1291 if (cmdid >= TWE_MAX_QUEUECNT) { 1292 aprint_error_dev(sc->sc_dev, "bad cmdid %d\n", cmdid); 1293 continue; 1294 } 1295 1296 ccb = sc->sc_ccbs + cmdid; 1297 if ((ccb->ccb_flags & TWE_CCB_ACTIVE) == 0) { 1298 printf("%s: CCB for cmdid %d not active\n", 1299 device_xname(sc->sc_dev), cmdid); 1300 continue; 1301 } 1302 ccb->ccb_flags ^= TWE_CCB_COMPLETE | TWE_CCB_ACTIVE; 1303 1304 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1305 (char *)ccb->ccb_cmd - (char *)sc->sc_cmds, 1306 sizeof(struct twe_cmd), 1307 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1308 1309 /* Pass notification to upper layers. */ 1310 if (ccb->ccb_tx.tx_handler != NULL) 1311 (*ccb->ccb_tx.tx_handler)(ccb, 1312 ccb->ccb_cmd->tc_status != 0 ? EIO : 0); 1313 } 1314 1315 /* If any commands have completed, run the software queue. */ 1316 if (found) 1317 twe_ccb_enqueue(sc, NULL); 1318 } 1319 1320 /* 1321 * Wait for `status' to be set in the controller status register. Return 1322 * zero if found, non-zero if the operation timed out. 1323 */ 1324 static int 1325 twe_status_wait(struct twe_softc *sc, u_int32_t status, int timo) 1326 { 1327 1328 for (timo *= 10; timo != 0; timo--) { 1329 if ((twe_inl(sc, TWE_REG_STS) & status) == status) 1330 break; 1331 delay(100000); 1332 } 1333 1334 return (timo == 0); 1335 } 1336 1337 /* 1338 * Clear a PCI parity error. 1339 */ 1340 static void 1341 twe_clear_pci_parity_error(struct twe_softc *sc) 1342 { 1343 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0, 1344 TWE_CTL_CLEAR_PARITY_ERROR); 1345 1346 //FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PARITY_ERROR, 2); 1347 } 1348 1349 1350 /* 1351 * Clear a PCI abort. 1352 */ 1353 static void 1354 twe_clear_pci_abort(struct twe_softc *sc) 1355 { 1356 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0x0, TWE_CTL_CLEAR_PCI_ABORT); 1357 1358 //FreeBSD: pci_write_config(sc->twe_dev, PCIR_STATUS, TWE_PCI_CLEAR_PCI_ABORT, 2); 1359 } 1360 1361 /* 1362 * Complain if the status bits aren't what we expect. 1363 */ 1364 static int 1365 twe_status_check(struct twe_softc *sc, u_int status) 1366 { 1367 int rv; 1368 1369 rv = 0; 1370 1371 if ((status & TWE_STS_EXPECTED_BITS) != TWE_STS_EXPECTED_BITS) { 1372 aprint_error_dev(sc->sc_dev, "missing status bits: 0x%08x\n", 1373 status & ~TWE_STS_EXPECTED_BITS); 1374 rv = -1; 1375 } 1376 1377 if ((status & TWE_STS_UNEXPECTED_BITS) != 0) { 1378 aprint_error_dev(sc->sc_dev, "unexpected status bits: 0x%08x\n", 1379 status & TWE_STS_UNEXPECTED_BITS); 1380 rv = -1; 1381 if (status & TWE_STS_PCI_PARITY_ERROR) { 1382 aprint_error_dev(sc->sc_dev, "PCI parity error: Reseat" 1383 " card, move card or buggy device present.\n"); 1384 twe_clear_pci_parity_error(sc); 1385 } 1386 if (status & TWE_STS_PCI_ABORT) { 1387 aprint_error_dev(sc->sc_dev, "PCI abort, clearing.\n"); 1388 twe_clear_pci_abort(sc); 1389 } 1390 } 1391 1392 return (rv); 1393 } 1394 1395 /* 1396 * Allocate and initialise a CCB. 1397 */ 1398 static inline void 1399 twe_ccb_init(struct twe_softc *sc, struct twe_ccb *ccb, int flags) 1400 { 1401 struct twe_cmd *tc; 1402 1403 ccb->ccb_tx.tx_handler = NULL; 1404 ccb->ccb_flags = flags; 1405 tc = ccb->ccb_cmd; 1406 tc->tc_status = 0; 1407 tc->tc_flags = 0; 1408 tc->tc_cmdid = ccb->ccb_cmdid; 1409 } 1410 1411 struct twe_ccb * 1412 twe_ccb_alloc(struct twe_softc *sc, int flags) 1413 { 1414 struct twe_ccb *ccb; 1415 int s; 1416 1417 s = splbio(); 1418 if (__predict_false((flags & TWE_CCB_AEN) != 0)) { 1419 /* Use the reserved CCB. */ 1420 ccb = sc->sc_ccbs; 1421 } else { 1422 /* Allocate a CCB and command block. */ 1423 if (__predict_false((ccb = 1424 SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) { 1425 splx(s); 1426 return (NULL); 1427 } 1428 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); 1429 } 1430 #ifdef DIAGNOSTIC 1431 if ((long)(ccb - sc->sc_ccbs) == 0 && (flags & TWE_CCB_AEN) == 0) 1432 panic("twe_ccb_alloc: got reserved CCB for non-AEN"); 1433 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) 1434 panic("twe_ccb_alloc: CCB %ld already allocated", 1435 (long)(ccb - sc->sc_ccbs)); 1436 flags |= TWE_CCB_ALLOCED; 1437 #endif 1438 splx(s); 1439 1440 twe_ccb_init(sc, ccb, flags); 1441 return (ccb); 1442 } 1443 1444 struct twe_ccb * 1445 twe_ccb_alloc_wait(struct twe_softc *sc, int flags) 1446 { 1447 struct twe_ccb *ccb; 1448 int s; 1449 1450 KASSERT((flags & TWE_CCB_AEN) == 0); 1451 1452 s = splbio(); 1453 while (__predict_false((ccb = 1454 SLIST_FIRST(&sc->sc_ccb_freelist)) == NULL)) { 1455 sc->sc_flags |= TWEF_WAIT_CCB; 1456 (void) tsleep(&sc->sc_ccb_freelist, PRIBIO, "tweccb", 0); 1457 } 1458 SLIST_REMOVE_HEAD(&sc->sc_ccb_freelist, ccb_chain.slist); 1459 #ifdef DIAGNOSTIC 1460 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) != 0) 1461 panic("twe_ccb_alloc_wait: CCB %ld already allocated", 1462 (long)(ccb - sc->sc_ccbs)); 1463 flags |= TWE_CCB_ALLOCED; 1464 #endif 1465 splx(s); 1466 1467 twe_ccb_init(sc, ccb, flags); 1468 return (ccb); 1469 } 1470 1471 /* 1472 * Free a CCB. 1473 */ 1474 void 1475 twe_ccb_free(struct twe_softc *sc, struct twe_ccb *ccb) 1476 { 1477 int s; 1478 1479 s = splbio(); 1480 if ((ccb->ccb_flags & TWE_CCB_AEN) == 0) { 1481 SLIST_INSERT_HEAD(&sc->sc_ccb_freelist, ccb, ccb_chain.slist); 1482 if (__predict_false((sc->sc_flags & TWEF_WAIT_CCB) != 0)) { 1483 sc->sc_flags &= ~TWEF_WAIT_CCB; 1484 wakeup(&sc->sc_ccb_freelist); 1485 } 1486 } 1487 ccb->ccb_flags = 0; 1488 splx(s); 1489 } 1490 1491 /* 1492 * Map the specified CCB's command block and data buffer (if any) into 1493 * controller visible space. Perform DMA synchronisation. 1494 */ 1495 int 1496 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb) 1497 { 1498 struct twe_cmd *tc; 1499 int flags, nsegs, i, s, rv; 1500 void *data; 1501 1502 /* 1503 * The data as a whole must be 512-byte aligned. 1504 */ 1505 if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) { 1506 s = splvm(); 1507 /* XXX */ 1508 rv = uvm_km_kmem_alloc(kmem_va_arena, 1509 ccb->ccb_datasize, (VM_NOSLEEP | VM_INSTANTFIT), 1510 (vmem_addr_t *)&ccb->ccb_abuf); 1511 splx(s); 1512 data = (void *)ccb->ccb_abuf; 1513 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 1514 memcpy(data, ccb->ccb_data, ccb->ccb_datasize); 1515 } else { 1516 ccb->ccb_abuf = (vaddr_t)0; 1517 data = ccb->ccb_data; 1518 } 1519 1520 /* 1521 * Map the data buffer into bus space and build the S/G list. 1522 */ 1523 rv = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap_xfer, data, 1524 ccb->ccb_datasize, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 1525 ((ccb->ccb_flags & TWE_CCB_DATA_IN) ? 1526 BUS_DMA_READ : BUS_DMA_WRITE)); 1527 if (rv != 0) { 1528 if (ccb->ccb_abuf != (vaddr_t)0) { 1529 s = splvm(); 1530 /* XXX */ 1531 uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf, 1532 ccb->ccb_datasize); 1533 splx(s); 1534 } 1535 return (rv); 1536 } 1537 1538 nsegs = ccb->ccb_dmamap_xfer->dm_nsegs; 1539 tc = ccb->ccb_cmd; 1540 tc->tc_size += 2 * nsegs; 1541 1542 /* The location of the S/G list is dependent upon command type. */ 1543 switch (tc->tc_opcode >> 5) { 1544 case 2: 1545 for (i = 0; i < nsegs; i++) { 1546 tc->tc_args.param.sgl[i].tsg_address = 1547 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); 1548 tc->tc_args.param.sgl[i].tsg_length = 1549 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); 1550 } 1551 /* XXX Needed? */ 1552 for (; i < TWE_SG_SIZE; i++) { 1553 tc->tc_args.param.sgl[i].tsg_address = 0; 1554 tc->tc_args.param.sgl[i].tsg_length = 0; 1555 } 1556 break; 1557 case 3: 1558 for (i = 0; i < nsegs; i++) { 1559 tc->tc_args.io.sgl[i].tsg_address = 1560 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_addr); 1561 tc->tc_args.io.sgl[i].tsg_length = 1562 htole32(ccb->ccb_dmamap_xfer->dm_segs[i].ds_len); 1563 } 1564 /* XXX Needed? */ 1565 for (; i < TWE_SG_SIZE; i++) { 1566 tc->tc_args.io.sgl[i].tsg_address = 0; 1567 tc->tc_args.io.sgl[i].tsg_length = 0; 1568 } 1569 break; 1570 default: 1571 /* 1572 * In all likelihood, this is a command passed from 1573 * management tools in userspace where no S/G list is 1574 * necessary because no data is being passed. 1575 */ 1576 break; 1577 } 1578 1579 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 1580 flags = BUS_DMASYNC_PREREAD; 1581 else 1582 flags = 0; 1583 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 1584 flags |= BUS_DMASYNC_PREWRITE; 1585 1586 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 1587 ccb->ccb_datasize, flags); 1588 return (0); 1589 } 1590 1591 /* 1592 * Unmap the specified CCB's command block and data buffer (if any) and 1593 * perform DMA synchronisation. 1594 */ 1595 void 1596 twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb) 1597 { 1598 int flags, s; 1599 1600 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 1601 flags = BUS_DMASYNC_POSTREAD; 1602 else 1603 flags = 0; 1604 if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0) 1605 flags |= BUS_DMASYNC_POSTWRITE; 1606 1607 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0, 1608 ccb->ccb_datasize, flags); 1609 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer); 1610 1611 if (ccb->ccb_abuf != (vaddr_t)0) { 1612 if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0) 1613 memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf, 1614 ccb->ccb_datasize); 1615 s = splvm(); 1616 /* XXX */ 1617 uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf, 1618 ccb->ccb_datasize); 1619 splx(s); 1620 } 1621 } 1622 1623 /* 1624 * Submit a command to the controller and poll on completion. Return 1625 * non-zero on timeout (but don't check status, as some command types don't 1626 * return status). Must be called with interrupts blocked. 1627 */ 1628 int 1629 twe_ccb_poll(struct twe_softc *sc, struct twe_ccb *ccb, int timo) 1630 { 1631 int rv; 1632 1633 if ((rv = twe_ccb_submit(sc, ccb)) != 0) 1634 return (rv); 1635 1636 for (timo *= 1000; timo != 0; timo--) { 1637 twe_poll(sc); 1638 if ((ccb->ccb_flags & TWE_CCB_COMPLETE) != 0) 1639 break; 1640 DELAY(100); 1641 } 1642 1643 return (timo == 0); 1644 } 1645 1646 /* 1647 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 1648 * the order that they were enqueued and try to submit their command blocks 1649 * to the controller for execution. 1650 */ 1651 void 1652 twe_ccb_enqueue(struct twe_softc *sc, struct twe_ccb *ccb) 1653 { 1654 int s; 1655 1656 s = splbio(); 1657 1658 if (ccb != NULL) 1659 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_queue, ccb, ccb_chain.simpleq); 1660 1661 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_queue)) != NULL) { 1662 if (twe_ccb_submit(sc, ccb)) 1663 break; 1664 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_queue, ccb_chain.simpleq); 1665 } 1666 1667 splx(s); 1668 } 1669 1670 /* 1671 * Submit the command block associated with the specified CCB to the 1672 * controller for execution. Must be called with interrupts blocked. 1673 */ 1674 int 1675 twe_ccb_submit(struct twe_softc *sc, struct twe_ccb *ccb) 1676 { 1677 bus_addr_t pa; 1678 int rv; 1679 u_int status; 1680 1681 /* Check to see if we can post a command. */ 1682 status = twe_inl(sc, TWE_REG_STS); 1683 twe_status_check(sc, status); 1684 1685 if ((status & TWE_STS_CMD_QUEUE_FULL) == 0) { 1686 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1687 (char *)ccb->ccb_cmd - (char *)sc->sc_cmds, 1688 sizeof(struct twe_cmd), 1689 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1690 #ifdef DIAGNOSTIC 1691 if ((ccb->ccb_flags & TWE_CCB_ALLOCED) == 0) 1692 panic("%s: CCB %ld not ALLOCED\n", 1693 device_xname(sc->sc_dev), (long)(ccb - sc->sc_ccbs)); 1694 #endif 1695 ccb->ccb_flags |= TWE_CCB_ACTIVE; 1696 pa = sc->sc_cmds_paddr + 1697 ccb->ccb_cmdid * sizeof(struct twe_cmd); 1698 twe_outl(sc, TWE_REG_CMD_QUEUE, (u_int32_t)pa); 1699 rv = 0; 1700 } else 1701 rv = EBUSY; 1702 1703 return (rv); 1704 } 1705 1706 1707 /* 1708 * Accept an open operation on the control device. 1709 */ 1710 static int 1711 tweopen(dev_t dev, int flag, int mode, struct lwp *l) 1712 { 1713 struct twe_softc *twe; 1714 1715 if ((twe = device_lookup_private(&twe_cd, minor(dev))) == NULL) 1716 return (ENXIO); 1717 if ((twe->sc_flags & TWEF_OPEN) != 0) 1718 return (EBUSY); 1719 1720 twe->sc_flags |= TWEF_OPEN; 1721 return (0); 1722 } 1723 1724 /* 1725 * Accept the last close on the control device. 1726 */ 1727 static int 1728 tweclose(dev_t dev, int flag, int mode, 1729 struct lwp *l) 1730 { 1731 struct twe_softc *twe; 1732 1733 twe = device_lookup_private(&twe_cd, minor(dev)); 1734 twe->sc_flags &= ~TWEF_OPEN; 1735 return (0); 1736 } 1737 1738 void 1739 twe_ccb_wait_handler(struct twe_ccb *ccb, int error) 1740 { 1741 1742 /* Just wake up the sleeper. */ 1743 wakeup(ccb); 1744 } 1745 1746 /* 1747 * Handle control operations. 1748 */ 1749 static int 1750 tweioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 1751 { 1752 struct twe_softc *twe; 1753 struct twe_ccb *ccb; 1754 struct twe_param *param; 1755 struct twe_usercommand *tu; 1756 struct twe_paramcommand *tp; 1757 struct twe_drivecommand *td; 1758 void *pdata = NULL; 1759 int s, error = 0; 1760 u_int8_t cmdid; 1761 1762 twe = device_lookup_private(&twe_cd, minor(dev)); 1763 tu = (struct twe_usercommand *)data; 1764 tp = (struct twe_paramcommand *)data; 1765 td = (struct twe_drivecommand *)data; 1766 1767 /* This is intended to be compatible with the FreeBSD interface. */ 1768 switch (cmd) { 1769 case TWEIO_COMMAND: 1770 error = kauth_authorize_device_passthru(l->l_cred, dev, 1771 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data); 1772 if (error) 1773 return (error); 1774 1775 /* XXX mutex */ 1776 if (tu->tu_size > 0) { 1777 /* 1778 * XXX Handle > TWE_SECTOR_SIZE? Let's see if 1779 * it's really necessary, first. 1780 */ 1781 if (tu->tu_size > TWE_SECTOR_SIZE) { 1782 #ifdef TWE_DEBUG 1783 printf("%s: TWEIO_COMMAND: tu_size = %zu\n", 1784 device_xname(twe->sc_dev), tu->tu_size); 1785 #endif 1786 return EINVAL; 1787 } 1788 pdata = malloc(TWE_SECTOR_SIZE, M_DEVBUF, M_WAITOK); 1789 error = copyin(tu->tu_data, pdata, tu->tu_size); 1790 if (error != 0) 1791 goto done; 1792 ccb = twe_ccb_alloc_wait(twe, 1793 TWE_CCB_DATA_IN | TWE_CCB_DATA_OUT); 1794 KASSERT(ccb != NULL); 1795 ccb->ccb_data = pdata; 1796 ccb->ccb_datasize = TWE_SECTOR_SIZE; 1797 } else { 1798 ccb = twe_ccb_alloc_wait(twe, 0); 1799 KASSERT(ccb != NULL); 1800 } 1801 1802 ccb->ccb_tx.tx_handler = twe_ccb_wait_handler; 1803 ccb->ccb_tx.tx_context = NULL; 1804 ccb->ccb_tx.tx_dv = twe->sc_dev; 1805 1806 cmdid = ccb->ccb_cmdid; 1807 memcpy(ccb->ccb_cmd, &tu->tu_cmd, sizeof(struct twe_cmd)); 1808 ccb->ccb_cmd->tc_cmdid = cmdid; 1809 1810 /* Map the transfer. */ 1811 if ((error = twe_ccb_map(twe, ccb)) != 0) { 1812 twe_ccb_free(twe, ccb); 1813 goto done; 1814 } 1815 1816 /* Submit the command and wait up to 1 minute. */ 1817 error = 0; 1818 twe_ccb_enqueue(twe, ccb); 1819 s = splbio(); 1820 while ((ccb->ccb_flags & TWE_CCB_COMPLETE) == 0) 1821 if ((error = tsleep(ccb, PRIBIO, "tweioctl", 1822 60 * hz)) != 0) 1823 break; 1824 splx(s); 1825 1826 /* Copy the command back to the ioctl argument. */ 1827 memcpy(&tu->tu_cmd, ccb->ccb_cmd, sizeof(struct twe_cmd)); 1828 #ifdef TWE_DEBUG 1829 printf("%s: TWEIO_COMMAND: tc_opcode = 0x%02x, " 1830 "tc_status = 0x%02x\n", device_xname(twe->sc_dev), 1831 tu->tu_cmd.tc_opcode, tu->tu_cmd.tc_status); 1832 #endif 1833 1834 s = splbio(); 1835 twe_ccb_free(twe, ccb); 1836 splx(s); 1837 1838 if (tu->tu_size > 0) 1839 error = copyout(pdata, tu->tu_data, tu->tu_size); 1840 goto done; 1841 1842 case TWEIO_STATS: 1843 return (ENOENT); 1844 1845 case TWEIO_AEN_POLL: 1846 s = splbio(); 1847 *(u_int *)data = twe_aen_dequeue(twe); 1848 splx(s); 1849 return (0); 1850 1851 case TWEIO_AEN_WAIT: 1852 s = splbio(); 1853 while ((*(u_int *)data = 1854 twe_aen_dequeue(twe)) == TWE_AEN_QUEUE_EMPTY) { 1855 twe->sc_flags |= TWEF_AENQ_WAIT; 1856 error = tsleep(&twe->sc_aen_queue, PRIBIO | PCATCH, 1857 "tweaen", 0); 1858 if (error == EINTR) { 1859 splx(s); 1860 return (error); 1861 } 1862 } 1863 splx(s); 1864 return (0); 1865 1866 case TWEIO_GET_PARAM: 1867 error = twe_param_get(twe, tp->tp_table_id, tp->tp_param_id, 1868 tp->tp_size, 0, ¶m); 1869 if (error != 0) 1870 return (error); 1871 if (param->tp_param_size > tp->tp_size) { 1872 error = EFAULT; 1873 goto done; 1874 } 1875 error = copyout(param->tp_data, tp->tp_data, 1876 param->tp_param_size); 1877 free(param, M_DEVBUF); 1878 goto done; 1879 1880 case TWEIO_SET_PARAM: 1881 pdata = malloc(tp->tp_size, M_DEVBUF, M_WAITOK); 1882 if ((error = copyin(tp->tp_data, pdata, tp->tp_size)) != 0) 1883 goto done; 1884 error = twe_param_set(twe, tp->tp_table_id, tp->tp_param_id, 1885 tp->tp_size, pdata); 1886 goto done; 1887 1888 case TWEIO_RESET: 1889 s = splbio(); 1890 twe_reset(twe); 1891 splx(s); 1892 return (0); 1893 1894 case TWEIO_ADD_UNIT: 1895 /* XXX mutex */ 1896 return (twe_add_unit(twe, td->td_unit)); 1897 1898 case TWEIO_DEL_UNIT: 1899 /* XXX mutex */ 1900 return (twe_del_unit(twe, td->td_unit)); 1901 1902 default: 1903 return EINVAL; 1904 } 1905 done: 1906 if (pdata) 1907 free(pdata, M_DEVBUF); 1908 return error; 1909 } 1910 1911 const struct cdevsw twe_cdevsw = { 1912 .d_open = tweopen, 1913 .d_close = tweclose, 1914 .d_read = noread, 1915 .d_write = nowrite, 1916 .d_ioctl = tweioctl, 1917 .d_stop = nostop, 1918 .d_tty = notty, 1919 .d_poll = nopoll, 1920 .d_mmap = nommap, 1921 .d_kqfilter = nokqfilter, 1922 .d_discard = nodiscard, 1923 .d_flag = D_OTHER 1924 }; 1925 1926 /* 1927 * Print some information about the controller 1928 */ 1929 static void 1930 twe_describe_controller(struct twe_softc *sc) 1931 { 1932 struct twe_param *p[6]; 1933 int i, rv = 0; 1934 uint32_t dsize; 1935 uint8_t ports; 1936 1937 ports = 0; 1938 1939 /* get the port count */ 1940 rv |= twe_param_get_1(sc, TWE_PARAM_CONTROLLER, 1941 TWE_PARAM_CONTROLLER_PortCount, &ports); 1942 1943 /* get version strings */ 1944 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_Mon, 1945 16, NULL, &p[0]); 1946 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_FW, 1947 16, NULL, &p[1]); 1948 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_BIOS, 1949 16, NULL, &p[2]); 1950 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCB, 1951 8, NULL, &p[3]); 1952 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_ATA, 1953 8, NULL, &p[4]); 1954 rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCI, 1955 8, NULL, &p[5]); 1956 1957 if (rv) { 1958 /* some error occurred */ 1959 aprint_error_dev(sc->sc_dev, 1960 "failed to fetch version information\n"); 1961 return; 1962 } 1963 1964 aprint_normal_dev(sc->sc_dev, "%d ports, Firmware %.16s, BIOS %.16s\n", 1965 ports, p[1]->tp_data, p[2]->tp_data); 1966 1967 aprint_verbose_dev(sc->sc_dev, 1968 "Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n", 1969 p[0]->tp_data, p[3]->tp_data, 1970 p[4]->tp_data, p[5]->tp_data); 1971 1972 free(p[0], M_DEVBUF); 1973 free(p[1], M_DEVBUF); 1974 free(p[2], M_DEVBUF); 1975 free(p[3], M_DEVBUF); 1976 free(p[4], M_DEVBUF); 1977 free(p[5], M_DEVBUF); 1978 1979 rv = twe_param_get(sc, TWE_PARAM_DRIVESUMMARY, 1980 TWE_PARAM_DRIVESUMMARY_Status, 16, NULL, &p[0]); 1981 if (rv) { 1982 aprint_error_dev(sc->sc_dev, 1983 "failed to get drive status summary\n"); 1984 return; 1985 } 1986 for (i = 0; i < ports; i++) { 1987 if (p[0]->tp_data[i] != TWE_PARAM_DRIVESTATUS_Present) 1988 continue; 1989 rv = twe_param_get_4(sc, TWE_PARAM_DRIVEINFO + i, 1990 TWE_PARAM_DRIVEINFO_Size, &dsize); 1991 if (rv) { 1992 aprint_error_dev(sc->sc_dev, 1993 "unable to get drive size for port %d\n", i); 1994 continue; 1995 } 1996 rv = twe_param_get(sc, TWE_PARAM_DRIVEINFO + i, 1997 TWE_PARAM_DRIVEINFO_Model, 40, NULL, &p[1]); 1998 if (rv) { 1999 aprint_error_dev(sc->sc_dev, 2000 "unable to get drive model for port %d\n", i); 2001 continue; 2002 } 2003 aprint_verbose_dev(sc->sc_dev, "port %d: %.40s %d MB\n", 2004 i, p[1]->tp_data, dsize / 2048); 2005 free(p[1], M_DEVBUF); 2006 } 2007 free(p[0], M_DEVBUF); 2008 } 2009 2010 MODULE(MODULE_CLASS_DRIVER, twe, "pci"); 2011 2012 #ifdef _MODULE 2013 #include "ioconf.c" 2014 #endif 2015 2016 static int 2017 twe_modcmd(modcmd_t cmd, void *opaque) 2018 { 2019 int error = 0; 2020 2021 #ifdef _MODULE 2022 switch (cmd) { 2023 case MODULE_CMD_INIT: 2024 error = config_init_component(cfdriver_ioconf_twe, 2025 cfattach_ioconf_twe, cfdata_ioconf_twe); 2026 break; 2027 case MODULE_CMD_FINI: 2028 error = config_fini_component(cfdriver_ioconf_twe, 2029 cfattach_ioconf_twe, cfdata_ioconf_twe); 2030 break; 2031 default: 2032 error = ENOTTY; 2033 break; 2034 } 2035 #endif 2036 2037 return error; 2038 } 2039