1 /* $NetBSD: amr.c,v 1.8 2003/01/31 00:07:40 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /*- 40 * Copyright (c) 1999,2000 Michael Smith 41 * Copyright (c) 2000 BSDi 42 * All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * from FreeBSD: amr_pci.c,v 1.5 2000/08/30 07:52:40 msmith Exp 66 * from FreeBSD: amr.c,v 1.16 2000/08/30 07:52:40 msmith Exp 67 */ 68 69 /* 70 * Driver for AMI RAID controllers. 71 */ 72 73 #include <sys/cdefs.h> 74 __KERNEL_RCSID(0, "$NetBSD: amr.c,v 1.8 2003/01/31 00:07:40 thorpej Exp $"); 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/device.h> 80 #include <sys/queue.h> 81 #include <sys/proc.h> 82 #include <sys/buf.h> 83 #include <sys/malloc.h> 84 85 #include <uvm/uvm_extern.h> 86 87 #include <machine/endian.h> 88 #include <machine/bus.h> 89 90 #include <dev/pci/pcidevs.h> 91 #include <dev/pci/pcivar.h> 92 #include <dev/pci/amrreg.h> 93 #include <dev/pci/amrvar.h> 94 95 #if AMR_MAX_SEGS > 32 96 #error AMR_MAX_SEGS too high 97 #endif 98 99 #define AMR_ENQUIRY_BUFSIZE 2048 100 #define AMR_SGL_SIZE (sizeof(struct amr_sgentry) * 32) 101 102 void amr_attach(struct device *, struct device *, void *); 103 void *amr_enquire(struct amr_softc *, u_int8_t, u_int8_t, u_int8_t); 104 int amr_init(struct amr_softc *, const char *, 105 struct pci_attach_args *pa); 106 int amr_intr(void *); 107 int amr_match(struct device *, struct cfdata *, void *); 108 int amr_print(void *, const char *); 109 void amr_shutdown(void *); 110 int amr_submatch(struct device *, struct cfdata *, void *); 111 112 int amr_mbox_wait(struct amr_softc *); 113 int amr_quartz_get_work(struct amr_softc *, struct amr_mailbox *); 114 int amr_quartz_submit(struct amr_softc *, struct amr_ccb *); 115 int amr_std_get_work(struct amr_softc *, struct amr_mailbox *); 116 int amr_std_submit(struct amr_softc *, struct amr_ccb *); 117 118 static inline u_int8_t amr_inb(struct amr_softc *, int); 119 static inline u_int32_t amr_inl(struct amr_softc *, int); 120 static inline void amr_outb(struct amr_softc *, int, u_int8_t); 121 static inline void amr_outl(struct amr_softc *, int, u_int32_t); 122 123 CFATTACH_DECL(amr, sizeof(struct amr_softc), 124 amr_match, amr_attach, NULL, NULL); 125 126 #define AT_QUARTZ 0x01 /* `Quartz' chipset */ 127 #define AT_SIG 0x02 /* Check for signature */ 128 129 struct amr_pci_type { 130 u_short apt_vendor; 131 u_short apt_product; 132 u_short apt_flags; 133 } static const amr_pci_type[] = { 134 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID, 0 }, 135 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID2, 0 }, 136 { PCI_VENDOR_AMI, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ }, 137 { PCI_VENDOR_INTEL, PCI_PRODUCT_AMI_MEGARAID3, AT_QUARTZ | AT_SIG } 138 }; 139 140 struct amr_typestr { 141 const char *at_str; 142 int at_sig; 143 } static const amr_typestr[] = { 144 { "Series 431", AMR_SIG_431 }, 145 { "Series 438", AMR_SIG_438 }, 146 { "Series 466", AMR_SIG_466 }, 147 { "Series 467", AMR_SIG_467 }, 148 { "Series 490", AMR_SIG_490 }, 149 { "Series 762", AMR_SIG_762 }, 150 { "HP NetRAID (T5)", AMR_SIG_T5 }, 151 { "HP NetRAID (T7)", AMR_SIG_T7 }, 152 }; 153 154 static void *amr_sdh; 155 156 static inline u_int8_t 157 amr_inb(struct amr_softc *amr, int off) 158 { 159 160 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1, 161 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 162 return (bus_space_read_1(amr->amr_iot, amr->amr_ioh, off)); 163 } 164 165 static inline u_int32_t 166 amr_inl(struct amr_softc *amr, int off) 167 { 168 169 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4, 170 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 171 return (bus_space_read_4(amr->amr_iot, amr->amr_ioh, off)); 172 } 173 174 static inline void 175 amr_outb(struct amr_softc *amr, int off, u_int8_t val) 176 { 177 178 bus_space_write_1(amr->amr_iot, amr->amr_ioh, off, val); 179 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 1, 180 BUS_SPACE_BARRIER_WRITE); 181 } 182 183 static inline void 184 amr_outl(struct amr_softc *amr, int off, u_int32_t val) 185 { 186 187 bus_space_write_4(amr->amr_iot, amr->amr_ioh, off, val); 188 bus_space_barrier(amr->amr_iot, amr->amr_ioh, off, 4, 189 BUS_SPACE_BARRIER_WRITE); 190 } 191 192 /* 193 * Match a supported device. 194 */ 195 int 196 amr_match(struct device *parent, struct cfdata *match, void *aux) 197 { 198 struct pci_attach_args *pa; 199 pcireg_t s; 200 int i; 201 202 pa = (struct pci_attach_args *)aux; 203 204 /* 205 * Don't match the device if it's operating in I2O mode. In this 206 * case it should be handled by the `iop' driver. 207 */ 208 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O) 209 return (0); 210 211 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++) 212 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor && 213 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product) 214 break; 215 216 if (i == sizeof(amr_pci_type) / sizeof(amr_pci_type[0])) 217 return (0); 218 219 if ((amr_pci_type[i].apt_flags & AT_SIG) == 0) 220 return (1); 221 222 s = pci_conf_read(pa->pa_pc, pa->pa_tag, AMR_QUARTZ_SIG_REG) & 0xffff; 223 return (s == AMR_QUARTZ_SIG0 || s == AMR_QUARTZ_SIG1); 224 } 225 226 /* 227 * Attach a supported device. XXX This doesn't fail gracefully, and may 228 * over-allocate resources. 229 */ 230 void 231 amr_attach(struct device *parent, struct device *self, void *aux) 232 { 233 bus_space_tag_t memt, iot; 234 bus_space_handle_t memh, ioh; 235 struct pci_attach_args *pa; 236 struct amr_attach_args amra; 237 const struct amr_pci_type *apt; 238 struct amr_softc *amr; 239 pci_chipset_tag_t pc; 240 pci_intr_handle_t ih; 241 const char *intrstr; 242 pcireg_t reg; 243 int rseg, i, size, rv, memreg, ioreg; 244 bus_dma_segment_t seg; 245 struct amr_ccb *ac; 246 247 aprint_naive(": RAID controller\n"); 248 249 amr = (struct amr_softc *)self; 250 pa = (struct pci_attach_args *)aux; 251 pc = pa->pa_pc; 252 253 for (i = 0; i < sizeof(amr_pci_type) / sizeof(amr_pci_type[0]); i++) 254 if (PCI_VENDOR(pa->pa_id) == amr_pci_type[i].apt_vendor && 255 PCI_PRODUCT(pa->pa_id) == amr_pci_type[i].apt_product) 256 break; 257 apt = amr_pci_type + i; 258 259 memreg = ioreg = 0; 260 for (i = 0x10; i <= 0x14; i += 4) { 261 reg = pci_conf_read(pc, pa->pa_tag, i); 262 switch (PCI_MAPREG_TYPE(reg)) { 263 case PCI_MAPREG_TYPE_MEM: 264 if (PCI_MAPREG_MEM_SIZE(reg) != 0) 265 memreg = i; 266 break; 267 case PCI_MAPREG_TYPE_IO: 268 if (PCI_MAPREG_IO_SIZE(reg) != 0) 269 ioreg = i; 270 break; 271 } 272 } 273 274 if (memreg != 0) 275 if (pci_mapreg_map(pa, memreg, PCI_MAPREG_TYPE_MEM, 0, 276 &memt, &memh, NULL, NULL)) 277 memreg = 0; 278 if (ioreg != 0) 279 if (pci_mapreg_map(pa, ioreg, PCI_MAPREG_TYPE_IO, 0, 280 &iot, &ioh, NULL, NULL)) 281 ioreg = 0; 282 283 if (memreg) { 284 amr->amr_iot = memt; 285 amr->amr_ioh = memh; 286 } else if (ioreg) { 287 amr->amr_iot = iot; 288 amr->amr_ioh = ioh; 289 } else { 290 aprint_error("can't map control registers\n"); 291 return; 292 } 293 294 amr->amr_dmat = pa->pa_dmat; 295 296 /* Enable the device. */ 297 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 298 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 299 reg | PCI_COMMAND_MASTER_ENABLE); 300 301 /* Map and establish the interrupt. */ 302 if (pci_intr_map(pa, &ih)) { 303 aprint_error("can't map interrupt\n"); 304 return; 305 } 306 intrstr = pci_intr_string(pc, ih); 307 amr->amr_ih = pci_intr_establish(pc, ih, IPL_BIO, amr_intr, amr); 308 if (amr->amr_ih == NULL) { 309 aprint_error("can't establish interrupt"); 310 if (intrstr != NULL) 311 aprint_normal(" at %s", intrstr); 312 aprint_normal("\n"); 313 return; 314 } 315 316 /* 317 * Allocate space for the mailbox and S/G lists. Some controllers 318 * don't like S/G lists to be located below 0x2000, so we allocate 319 * enough slop to enable us to compensate. 320 * 321 * The standard mailbox structure needs to be aligned on a 16-byte 322 * boundary. The 64-bit mailbox has one extra field, 4 bytes in 323 * size, which preceeds the standard mailbox. 324 */ 325 size = AMR_SGL_SIZE * AMR_MAX_CMDS + 0x2000; 326 327 if ((rv = bus_dmamem_alloc(amr->amr_dmat, size, PAGE_SIZE, NULL, &seg, 328 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 329 aprint_error("%s: unable to allocate buffer, rv = %d\n", 330 amr->amr_dv.dv_xname, rv); 331 return; 332 } 333 334 if ((rv = bus_dmamem_map(amr->amr_dmat, &seg, rseg, size, 335 (caddr_t *)&amr->amr_mbox, 336 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 337 aprint_error("%s: unable to map buffer, rv = %d\n", 338 amr->amr_dv.dv_xname, rv); 339 return; 340 } 341 342 if ((rv = bus_dmamap_create(amr->amr_dmat, size, 1, size, 0, 343 BUS_DMA_NOWAIT, &amr->amr_dmamap)) != 0) { 344 aprint_error("%s: unable to create buffer DMA map, rv = %d\n", 345 amr->amr_dv.dv_xname, rv); 346 return; 347 } 348 349 if ((rv = bus_dmamap_load(amr->amr_dmat, amr->amr_dmamap, 350 amr->amr_mbox, size, NULL, BUS_DMA_NOWAIT)) != 0) { 351 aprint_error("%s: unable to load buffer DMA map, rv = %d\n", 352 amr->amr_dv.dv_xname, rv); 353 return; 354 } 355 356 memset(amr->amr_mbox, 0, size); 357 358 amr->amr_mbox_paddr = amr->amr_dmamap->dm_segs[0].ds_addr + 16; 359 amr->amr_sgls_paddr = (amr->amr_mbox_paddr + 0x1fff) & ~0x1fff; 360 amr->amr_sgls = (struct amr_sgentry *)((caddr_t)amr->amr_mbox + 361 amr->amr_sgls_paddr - amr->amr_dmamap->dm_segs[0].ds_addr); 362 amr->amr_mbox = (struct amr_mailbox *)((caddr_t)amr->amr_mbox + 16); 363 364 /* 365 * Allocate and initalise the command control blocks. 366 */ 367 ac = malloc(sizeof(*ac) * AMR_MAX_CMDS, M_DEVBUF, M_NOWAIT | M_ZERO); 368 amr->amr_ccbs = ac; 369 SLIST_INIT(&amr->amr_ccb_freelist); 370 371 for (i = 0; i < AMR_MAX_CMDS; i++, ac++) { 372 rv = bus_dmamap_create(amr->amr_dmat, AMR_MAX_XFER, 373 AMR_MAX_SEGS, AMR_MAX_XFER, 0, 374 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 375 &ac->ac_xfer_map); 376 if (rv != 0) 377 break; 378 379 ac->ac_ident = i; 380 SLIST_INSERT_HEAD(&amr->amr_ccb_freelist, ac, ac_chain.slist); 381 } 382 if (i != AMR_MAX_CMDS) 383 aprint_error("%s: %d/%d CCBs created\n", amr->amr_dv.dv_xname, 384 i, AMR_MAX_CMDS); 385 386 /* 387 * Take care of model-specific tasks. 388 */ 389 if ((apt->apt_flags & AT_QUARTZ) != 0) { 390 amr->amr_submit = amr_quartz_submit; 391 amr->amr_get_work = amr_quartz_get_work; 392 } else { 393 amr->amr_submit = amr_std_submit; 394 amr->amr_get_work = amr_std_get_work; 395 396 /* Notify the controller of the mailbox location. */ 397 amr_outl(amr, AMR_SREG_MBOX, amr->amr_mbox_paddr); 398 amr_outb(amr, AMR_SREG_MBOX_ENABLE, AMR_SMBOX_ENABLE_ADDR); 399 400 /* Clear outstanding interrupts and enable interrupts. */ 401 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR); 402 amr_outb(amr, AMR_SREG_TOGL, 403 amr_inb(amr, AMR_SREG_TOGL) | AMR_STOGL_ENABLE); 404 } 405 406 /* 407 * Retrieve parameters, and tell the world about us. 408 */ 409 amr->amr_maxqueuecnt = i; 410 aprint_normal(": AMI RAID "); 411 if (amr_init(amr, intrstr, pa) != 0) 412 return; 413 414 /* 415 * Cap the maximum number of outstanding commands. AMI's Linux 416 * driver doesn't trust the controller's reported value, and lockups 417 * have been seen when we do. 418 */ 419 amr->amr_maxqueuecnt = min(amr->amr_maxqueuecnt, AMR_MAX_CMDS); 420 if (amr->amr_maxqueuecnt > i) 421 amr->amr_maxqueuecnt = i; 422 423 /* Set our `shutdownhook' before we start any device activity. */ 424 if (amr_sdh == NULL) 425 amr_sdh = shutdownhook_establish(amr_shutdown, NULL); 426 427 /* Attach sub-devices. */ 428 for (i = 0; i < amr->amr_numdrives; i++) { 429 if (amr->amr_drive[i].al_size == 0) 430 continue; 431 amra.amra_unit = i; 432 config_found_sm(&amr->amr_dv, &amra, amr_print, amr_submatch); 433 } 434 435 SIMPLEQ_INIT(&amr->amr_ccb_queue); 436 } 437 438 /* 439 * Print autoconfiguration message for a sub-device. 440 */ 441 int 442 amr_print(void *aux, const char *pnp) 443 { 444 struct amr_attach_args *amra; 445 446 amra = (struct amr_attach_args *)aux; 447 448 if (pnp != NULL) 449 aprint_normal("block device at %s", pnp); 450 aprint_normal(" unit %d", amra->amra_unit); 451 return (UNCONF); 452 } 453 454 /* 455 * Match a sub-device. 456 */ 457 int 458 amr_submatch(struct device *parent, struct cfdata *cf, void *aux) 459 { 460 struct amr_attach_args *amra; 461 462 amra = (struct amr_attach_args *)aux; 463 464 if (cf->amracf_unit != AMRCF_UNIT_DEFAULT && 465 cf->amracf_unit != amra->amra_unit) 466 return (0); 467 468 return (config_match(parent, cf, aux)); 469 } 470 471 /* 472 * Retrieve operational parameters and describe the controller. 473 */ 474 int 475 amr_init(struct amr_softc *amr, const char *intrstr, 476 struct pci_attach_args *pa) 477 { 478 struct amr_prodinfo *ap; 479 struct amr_enquiry *ae; 480 struct amr_enquiry3 *aex; 481 const char *prodstr; 482 u_int i, sig; 483 char buf[64]; 484 485 /* 486 * Try to get 40LD product info, which tells us what the card is 487 * labelled as. 488 */ 489 ap = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0); 490 if (ap != NULL) { 491 aprint_normal("<%.80s>\n", ap->ap_product); 492 if (intrstr != NULL) 493 aprint_normal("%s: interrupting at %s\n", 494 amr->amr_dv.dv_xname, intrstr); 495 aprint_normal("%s: firmware %.16s, BIOS %.16s, %dMB RAM\n", 496 amr->amr_dv.dv_xname, ap->ap_firmware, ap->ap_bios, 497 le16toh(ap->ap_memsize)); 498 499 amr->amr_maxqueuecnt = ap->ap_maxio; 500 free(ap, M_DEVBUF); 501 502 /* 503 * Fetch and record state of logical drives. 504 */ 505 aex = amr_enquire(amr, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3, 506 AMR_CONFIG_ENQ3_SOLICITED_FULL); 507 if (aex == NULL) { 508 aprint_error("%s ENQUIRY3 failed\n", 509 amr->amr_dv.dv_xname); 510 return (-1); 511 } 512 513 if (aex->ae_numldrives > AMR_MAX_UNITS) { 514 aprint_error( 515 "%s: adjust AMR_MAX_UNITS to %d (currently %d)" 516 "\n", amr->amr_dv.dv_xname, 517 ae->ae_ldrv.al_numdrives, AMR_MAX_UNITS); 518 amr->amr_numdrives = AMR_MAX_UNITS; 519 } else 520 amr->amr_numdrives = aex->ae_numldrives; 521 522 for (i = 0; i < amr->amr_numdrives; i++) { 523 amr->amr_drive[i].al_size = 524 le32toh(aex->ae_drivesize[i]); 525 amr->amr_drive[i].al_state = aex->ae_drivestate[i]; 526 amr->amr_drive[i].al_properties = aex->ae_driveprop[i]; 527 } 528 529 free(aex, M_DEVBUF); 530 return (0); 531 } 532 533 /* 534 * Try 8LD extended ENQUIRY to get the controller signature. Once 535 * found, search for a product description. 536 */ 537 if ((ae = amr_enquire(amr, AMR_CMD_EXT_ENQUIRY2, 0, 0)) != NULL) { 538 i = 0; 539 sig = le32toh(ae->ae_signature); 540 541 while (i < sizeof(amr_typestr) / sizeof(amr_typestr[0])) { 542 if (amr_typestr[i].at_sig == sig) 543 break; 544 i++; 545 } 546 if (i == sizeof(amr_typestr) / sizeof(amr_typestr[0])) { 547 sprintf(buf, "unknown ENQUIRY2 sig (0x%08x)", sig); 548 prodstr = buf; 549 } else 550 prodstr = amr_typestr[i].at_str; 551 } else { 552 if ((ae = amr_enquire(amr, AMR_CMD_ENQUIRY, 0, 0)) == NULL) { 553 aprint_error("%s: unsupported controller\n", 554 amr->amr_dv.dv_xname); 555 return (-1); 556 } 557 558 switch (PCI_PRODUCT(pa->pa_id)) { 559 case PCI_PRODUCT_AMI_MEGARAID: 560 prodstr = "Series 428"; 561 break; 562 case PCI_PRODUCT_AMI_MEGARAID2: 563 prodstr = "Series 434"; 564 break; 565 default: 566 sprintf(buf, "unknown PCI dev (0x%04x)", 567 PCI_PRODUCT(pa->pa_id)); 568 prodstr = buf; 569 break; 570 } 571 } 572 573 aprint_normal("<%s>\n", prodstr); 574 if (intrstr != NULL) 575 aprint_normal("%s: interrupting at %s\n", amr->amr_dv.dv_xname, 576 intrstr); 577 aprint_normal("%s: firmware <%.4s>, BIOS <%.4s>, %dMB RAM\n", 578 amr->amr_dv.dv_xname, ae->ae_adapter.aa_firmware, 579 ae->ae_adapter.aa_bios, ae->ae_adapter.aa_memorysize); 580 581 amr->amr_maxqueuecnt = ae->ae_adapter.aa_maxio; 582 583 /* 584 * Record state of logical drives. 585 */ 586 if (ae->ae_ldrv.al_numdrives > AMR_MAX_UNITS) { 587 aprint_error("%s: adjust AMR_MAX_UNITS to %d (currently %d)\n", 588 amr->amr_dv.dv_xname, ae->ae_ldrv.al_numdrives, 589 AMR_MAX_UNITS); 590 amr->amr_numdrives = AMR_MAX_UNITS; 591 } else 592 amr->amr_numdrives = ae->ae_ldrv.al_numdrives; 593 594 for (i = 0; i < AMR_MAX_UNITS; i++) { 595 amr->amr_drive[i].al_size = le32toh(ae->ae_ldrv.al_size[i]); 596 amr->amr_drive[i].al_state = ae->ae_ldrv.al_state[i]; 597 amr->amr_drive[i].al_properties = ae->ae_ldrv.al_properties[i]; 598 } 599 600 free(ae, M_DEVBUF); 601 return (0); 602 } 603 604 /* 605 * Flush the internal cache on each configured controller. Called at 606 * shutdown time. 607 */ 608 void 609 amr_shutdown(void *cookie) 610 { 611 extern struct cfdriver amr_cd; 612 struct amr_softc *amr; 613 struct amr_ccb *ac; 614 int i, rv; 615 616 for (i = 0; i < amr_cd.cd_ndevs; i++) { 617 if ((amr = device_lookup(&amr_cd, i)) == NULL) 618 continue; 619 620 if ((rv = amr_ccb_alloc(amr, &ac)) == 0) { 621 ac->ac_mbox.mb_command = AMR_CMD_FLUSH; 622 rv = amr_ccb_poll(amr, ac, 30000); 623 amr_ccb_free(amr, ac); 624 } 625 if (rv != 0) 626 printf("%s: unable to flush cache (%d)\n", 627 amr->amr_dv.dv_xname, rv); 628 } 629 } 630 631 /* 632 * Interrupt service routine. 633 */ 634 int 635 amr_intr(void *cookie) 636 { 637 struct amr_softc *amr; 638 struct amr_ccb *ac; 639 struct amr_mailbox mbox; 640 u_int i, forus, idx; 641 642 amr = cookie; 643 forus = 0; 644 645 while ((*amr->amr_get_work)(amr, &mbox) == 0) { 646 /* Iterate over completed commands in this result. */ 647 for (i = 0; i < mbox.mb_nstatus; i++) { 648 idx = mbox.mb_completed[i] - 1; 649 ac = amr->amr_ccbs + idx; 650 651 if (idx >= amr->amr_maxqueuecnt) { 652 printf("%s: bad status (bogus ID: %u=%u)\n", 653 amr->amr_dv.dv_xname, i, idx); 654 continue; 655 } 656 657 if ((ac->ac_flags & AC_ACTIVE) == 0) { 658 printf("%s: bad status (not active; 0x04%x)\n", 659 amr->amr_dv.dv_xname, ac->ac_flags); 660 continue; 661 } 662 663 ac->ac_status = mbox.mb_status; 664 ac->ac_flags = (ac->ac_flags & ~AC_ACTIVE) | 665 AC_COMPLETE; 666 667 /* Pass notification to upper layers. */ 668 if (ac->ac_handler != NULL) 669 (*ac->ac_handler)(ac); 670 } 671 forus = 1; 672 } 673 674 if (forus) 675 amr_ccb_enqueue(amr, NULL); 676 return (forus); 677 } 678 679 /* 680 * Run a generic enquiry-style command. 681 */ 682 void * 683 amr_enquire(struct amr_softc *amr, u_int8_t cmd, u_int8_t cmdsub, 684 u_int8_t cmdqual) 685 { 686 struct amr_ccb *ac; 687 u_int8_t *mb; 688 void *buf; 689 int rv; 690 691 if (amr_ccb_alloc(amr, &ac) != 0) 692 return (NULL); 693 buf = malloc(AMR_ENQUIRY_BUFSIZE, M_DEVBUF, M_NOWAIT); 694 695 /* Build the command proper. */ 696 mb = (u_int8_t *)&ac->ac_mbox; 697 mb[0] = cmd; 698 mb[2] = cmdsub; 699 mb[3] = cmdqual; 700 701 if ((rv = amr_ccb_map(amr, ac, buf, AMR_ENQUIRY_BUFSIZE, 0)) == 0) { 702 rv = amr_ccb_poll(amr, ac, 2000); 703 amr_ccb_unmap(amr, ac); 704 } 705 706 amr_ccb_free(amr, ac); 707 708 if (rv != 0) { 709 free(buf, M_DEVBUF); 710 buf = NULL; 711 } 712 713 return (buf); 714 } 715 716 /* 717 * Allocate and initialise a CCB. 718 */ 719 int 720 amr_ccb_alloc(struct amr_softc *amr, struct amr_ccb **acp) 721 { 722 struct amr_ccb *ac; 723 struct amr_mailbox *mb; 724 int s; 725 726 s = splbio(); 727 if ((ac = SLIST_FIRST(&amr->amr_ccb_freelist)) == NULL) { 728 splx(s); 729 return (EAGAIN); 730 } 731 SLIST_REMOVE_HEAD(&amr->amr_ccb_freelist, ac_chain.slist); 732 splx(s); 733 734 ac->ac_handler = NULL; 735 mb = &ac->ac_mbox; 736 *acp = ac; 737 738 memset(mb, 0, sizeof(*mb)); 739 740 mb->mb_ident = ac->ac_ident + 1; 741 mb->mb_busy = 1; 742 mb->mb_poll = 0; 743 mb->mb_ack = 0; 744 745 return (0); 746 } 747 748 /* 749 * Free a CCB. 750 */ 751 void 752 amr_ccb_free(struct amr_softc *amr, struct amr_ccb *ac) 753 { 754 int s; 755 756 ac->ac_flags = 0; 757 758 s = splbio(); 759 SLIST_INSERT_HEAD(&amr->amr_ccb_freelist, ac, ac_chain.slist); 760 splx(s); 761 } 762 763 /* 764 * If a CCB is specified, enqueue it. Pull CCBs off the software queue in 765 * the order that they were enqueued and try to submit their command blocks 766 * to the controller for execution. 767 */ 768 void 769 amr_ccb_enqueue(struct amr_softc *amr, struct amr_ccb *ac) 770 { 771 int s; 772 773 s = splbio(); 774 775 if (ac != NULL) 776 SIMPLEQ_INSERT_TAIL(&amr->amr_ccb_queue, ac, ac_chain.simpleq); 777 778 while ((ac = SIMPLEQ_FIRST(&amr->amr_ccb_queue)) != NULL) { 779 if ((*amr->amr_submit)(amr, ac) != 0) 780 break; 781 SIMPLEQ_REMOVE_HEAD(&amr->amr_ccb_queue, ac_chain.simpleq); 782 } 783 784 splx(s); 785 } 786 787 /* 788 * Map the specified CCB's data buffer onto the bus, and fill the 789 * scatter-gather list. 790 */ 791 int 792 amr_ccb_map(struct amr_softc *amr, struct amr_ccb *ac, void *data, int size, 793 int out) 794 { 795 struct amr_sgentry *sge; 796 struct amr_mailbox *mb; 797 int nsegs, i, rv, sgloff; 798 bus_dmamap_t xfer; 799 800 xfer = ac->ac_xfer_map; 801 802 rv = bus_dmamap_load(amr->amr_dmat, xfer, data, size, NULL, 803 BUS_DMA_NOWAIT); 804 if (rv != 0) 805 return (rv); 806 807 mb = &ac->ac_mbox; 808 ac->ac_xfer_size = size; 809 ac->ac_flags |= (out ? AC_XFER_OUT : AC_XFER_IN); 810 sgloff = AMR_SGL_SIZE * ac->ac_ident; 811 812 /* We don't need to use a scatter/gather list for just 1 segment. */ 813 nsegs = xfer->dm_nsegs; 814 if (nsegs == 1) { 815 mb->mb_nsgelem = 0; 816 mb->mb_physaddr = htole32(xfer->dm_segs[0].ds_addr); 817 ac->ac_flags |= AC_NOSGL; 818 } else { 819 mb->mb_nsgelem = nsegs; 820 mb->mb_physaddr = htole32(amr->amr_sgls_paddr + sgloff); 821 822 sge = (struct amr_sgentry *)((caddr_t)amr->amr_sgls + sgloff); 823 for (i = 0; i < nsegs; i++, sge++) { 824 sge->sge_addr = htole32(xfer->dm_segs[i].ds_addr); 825 sge->sge_count = htole32(xfer->dm_segs[i].ds_len); 826 } 827 } 828 829 bus_dmamap_sync(amr->amr_dmat, xfer, 0, ac->ac_xfer_size, 830 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD); 831 832 if ((ac->ac_flags & AC_NOSGL) == 0) 833 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, sgloff, 834 AMR_SGL_SIZE, BUS_DMASYNC_PREWRITE); 835 836 return (0); 837 } 838 839 /* 840 * Unmap the specified CCB's data buffer. 841 */ 842 void 843 amr_ccb_unmap(struct amr_softc *amr, struct amr_ccb *ac) 844 { 845 846 if ((ac->ac_flags & AC_NOSGL) == 0) 847 bus_dmamap_sync(amr->amr_dmat, amr->amr_dmamap, 848 AMR_SGL_SIZE * ac->ac_ident, AMR_SGL_SIZE, 849 BUS_DMASYNC_POSTWRITE); 850 bus_dmamap_sync(amr->amr_dmat, ac->ac_xfer_map, 0, ac->ac_xfer_size, 851 (ac->ac_flags & AC_XFER_IN) != 0 ? 852 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 853 bus_dmamap_unload(amr->amr_dmat, ac->ac_xfer_map); 854 } 855 856 /* 857 * Submit a command to the controller and poll on completion. Return 858 * non-zero on timeout or error. Must be called with interrupts blocked. 859 */ 860 int 861 amr_ccb_poll(struct amr_softc *amr, struct amr_ccb *ac, int timo) 862 { 863 int rv; 864 865 if ((rv = (*amr->amr_submit)(amr, ac)) != 0) 866 return (rv); 867 868 for (timo *= 10; timo != 0; timo--) { 869 amr_intr(amr); 870 if ((ac->ac_flags & AC_COMPLETE) != 0) 871 break; 872 DELAY(100); 873 } 874 875 return (timo == 0 || ac->ac_status != 0 ? EIO : 0); 876 } 877 878 /* 879 * Wait for the mailbox to become available. 880 */ 881 int 882 amr_mbox_wait(struct amr_softc *amr) 883 { 884 int timo; 885 886 for (timo = 10000; timo != 0; timo--) { 887 if (amr->amr_mbox->mb_busy == 0) 888 break; 889 DELAY(100); 890 } 891 892 #if 0 893 if (timo != 0) 894 printf("%s: controller wedged\n", amr->amr_dv.dv_xname); 895 #endif 896 897 return (timo != 0 ? 0 : EIO); 898 } 899 900 /* 901 * Tell the controller that the mailbox contains a valid command. Must be 902 * called with interrupts blocked. 903 */ 904 int 905 amr_quartz_submit(struct amr_softc *amr, struct amr_ccb *ac) 906 { 907 u_int32_t v; 908 909 v = amr_inl(amr, AMR_QREG_IDB); 910 if ((v & (AMR_QIDB_SUBMIT | AMR_QIDB_ACK)) != 0) 911 return (EBUSY); 912 913 memcpy(amr->amr_mbox, &ac->ac_mbox, sizeof(ac->ac_mbox)); 914 915 ac->ac_flags |= AC_ACTIVE; 916 amr_outl(amr, AMR_QREG_IDB, amr->amr_mbox_paddr | AMR_QIDB_SUBMIT); 917 DELAY(10); 918 return (0); 919 } 920 921 int 922 amr_std_submit(struct amr_softc *amr, struct amr_ccb *ac) 923 { 924 925 if ((amr_inb(amr, AMR_SREG_MBOX_BUSY) & AMR_SMBOX_BUSY_FLAG) != 0) 926 return (EBUSY); 927 928 memcpy(amr->amr_mbox, &ac->ac_mbox, sizeof(ac->ac_mbox)); 929 930 ac->ac_flags |= AC_ACTIVE; 931 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_POST); 932 return (0); 933 } 934 935 /* 936 * Claim any work that the controller has completed; acknowledge completion, 937 * save details of the completion in (mbsave). Must be called with 938 * interrupts blocked. 939 */ 940 int 941 amr_quartz_get_work(struct amr_softc *amr, struct amr_mailbox *mbsave) 942 { 943 u_int32_t v; 944 945 if (amr_mbox_wait(amr)) 946 return (EBUSY); 947 948 v = amr_inl(amr, AMR_QREG_IDB); 949 if ((v & (AMR_QIDB_SUBMIT | AMR_QIDB_ACK)) != 0) 950 return (EBUSY); 951 952 /* Work waiting for us? */ 953 if (amr_inl(amr, AMR_QREG_ODB) != AMR_QODB_READY) 954 return (-1); 955 956 /* Save the mailbox, which contains a list of completed commands. */ 957 memcpy(mbsave, amr->amr_mbox, sizeof(*mbsave)); 958 959 /* Ack the interrupt and mailbox transfer. */ 960 amr_outl(amr, AMR_QREG_ODB, AMR_QODB_READY); 961 amr_outl(amr, AMR_QREG_IDB, amr->amr_mbox_paddr | AMR_QIDB_ACK); 962 DELAY(10); 963 964 #if 0 965 /* 966 * This waits for the controller to notice that we've taken the 967 * command from it. It's very inefficient, and we shouldn't do it, 968 * but if we remove this code, we stop completing commands under 969 * load. 970 * 971 * Peter J says we shouldn't do this. The documentation says we 972 * should. Who is right? 973 */ 974 while ((amr_inl(amr, AMR_QREG_IDB) & AMR_QIDB_ACK) != 0) 975 ; 976 #endif 977 978 return (0); 979 } 980 981 int 982 amr_std_get_work(struct amr_softc *amr, struct amr_mailbox *mbsave) 983 { 984 u_int8_t istat; 985 986 if (amr_mbox_wait(amr)) 987 return (EBUSY); 988 989 /* Puke if the mailbox is busy. */ 990 if ((amr_inb(amr, AMR_SREG_MBOX_BUSY) & AMR_SMBOX_BUSY_FLAG) != 0) 991 return (-1); 992 993 /* Check for valid interrupt status. */ 994 if (((istat = amr_inb(amr, AMR_SREG_INTR)) & AMR_SINTR_VALID) == 0) 995 return (-1); 996 997 /* Ack the interrupt. */ 998 amr_outb(amr, AMR_SREG_INTR, istat); 999 1000 /* Save mailbox, which contains a list of completed commands. */ 1001 memcpy(mbsave, amr->amr_mbox, sizeof(*mbsave)); 1002 1003 /* Ack mailbox transfer. */ 1004 amr_outb(amr, AMR_SREG_CMD, AMR_SCMD_ACKINTR); 1005 1006 return (0); 1007 } 1008