1 /* $OpenBSD: mpi.c,v 1.121 2009/11/12 06:20:27 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "bio.h" 21 22 #include <sys/param.h> 23 #include <sys/systm.h> 24 #include <sys/buf.h> 25 #include <sys/device.h> 26 #include <sys/ioctl.h> 27 #include <sys/proc.h> 28 #include <sys/malloc.h> 29 #include <sys/kernel.h> 30 #include <sys/rwlock.h> 31 #include <sys/sensors.h> 32 33 #include <machine/bus.h> 34 35 #include <scsi/scsi_all.h> 36 #include <scsi/scsiconf.h> 37 38 #include <dev/biovar.h> 39 #include <dev/ic/mpireg.h> 40 #include <dev/ic/mpivar.h> 41 42 #ifdef MPI_DEBUG 43 uint32_t mpi_debug = 0 44 /* | MPI_D_CMD */ 45 /* | MPI_D_INTR */ 46 /* | MPI_D_MISC */ 47 /* | MPI_D_DMA */ 48 /* | MPI_D_IOCTL */ 49 /* | MPI_D_RW */ 50 /* | MPI_D_MEM */ 51 /* | MPI_D_CCB */ 52 /* | MPI_D_PPR */ 53 /* | MPI_D_RAID */ 54 /* | MPI_D_EVT */ 55 ; 56 #endif 57 58 struct cfdriver mpi_cd = { 59 NULL, 60 "mpi", 61 DV_DULL 62 }; 63 64 int mpi_scsi_cmd(struct scsi_xfer *); 65 void mpi_scsi_cmd_done(struct mpi_ccb *); 66 void mpi_minphys(struct buf *bp, struct scsi_link *sl); 67 int mpi_scsi_probe(struct scsi_link *); 68 int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, 69 int, struct proc *); 70 71 struct scsi_adapter mpi_switch = { 72 mpi_scsi_cmd, 73 mpi_minphys, 74 mpi_scsi_probe, 75 NULL, 76 mpi_scsi_ioctl 77 }; 78 79 struct scsi_device mpi_dev = { 80 NULL, 81 NULL, 82 NULL, 83 NULL 84 }; 85 86 struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t); 87 void mpi_dmamem_free(struct mpi_softc *, 88 struct mpi_dmamem *); 89 int mpi_alloc_ccbs(struct mpi_softc *); 90 struct mpi_ccb *mpi_get_ccb(struct mpi_softc *); 91 void mpi_put_ccb(struct mpi_softc *, struct mpi_ccb *); 92 int mpi_alloc_replies(struct mpi_softc *); 93 void mpi_push_replies(struct mpi_softc *); 94 95 void mpi_start(struct mpi_softc *, struct mpi_ccb *); 96 int mpi_complete(struct mpi_softc *, struct mpi_ccb *, int); 97 int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int); 98 int mpi_reply(struct mpi_softc *, u_int32_t); 99 100 int mpi_cfg_spi_port(struct mpi_softc *); 101 void mpi_squash_ppr(struct mpi_softc *); 102 void mpi_run_ppr(struct mpi_softc *); 103 int mpi_ppr(struct mpi_softc *, struct scsi_link *, 104 struct mpi_cfg_raid_physdisk *, int, int, int); 105 int mpi_inq(struct mpi_softc *, u_int16_t, int); 106 107 void mpi_fc_info(struct mpi_softc *); 108 109 void mpi_timeout_xs(void *); 110 int mpi_load_xs(struct mpi_ccb *); 111 112 u_int32_t mpi_read(struct mpi_softc *, bus_size_t); 113 void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t); 114 int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t, 115 u_int32_t); 116 int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t, 117 u_int32_t); 118 119 int mpi_init(struct mpi_softc *); 120 int mpi_reset_soft(struct mpi_softc *); 121 int mpi_reset_hard(struct mpi_softc *); 122 123 int mpi_handshake_send(struct mpi_softc *, void *, size_t); 124 int mpi_handshake_recv_dword(struct mpi_softc *, 125 u_int32_t *); 126 int mpi_handshake_recv(struct mpi_softc *, void *, size_t); 127 128 void mpi_empty_done(struct mpi_ccb *); 129 130 int mpi_iocinit(struct mpi_softc *); 131 int mpi_iocfacts(struct mpi_softc *); 132 int mpi_portfacts(struct mpi_softc *); 133 int mpi_portenable(struct mpi_softc *); 134 int mpi_cfg_coalescing(struct mpi_softc *); 135 void mpi_get_raid(struct mpi_softc *); 136 int mpi_fwupload(struct mpi_softc *); 137 138 int mpi_eventnotify(struct mpi_softc *); 139 void mpi_eventnotify_done(struct mpi_ccb *); 140 void mpi_eventack(struct mpi_softc *, 141 struct mpi_msg_event_reply *); 142 void mpi_eventack_done(struct mpi_ccb *); 143 void mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *); 144 145 int mpi_req_cfg_header(struct mpi_softc *, u_int8_t, 146 u_int8_t, u_int32_t, int, void *); 147 int mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int, 148 void *, int, void *, size_t); 149 150 #if NBIO > 0 151 int mpi_bio_get_pg0_raid(struct mpi_softc *, int); 152 int mpi_ioctl(struct device *, u_long, caddr_t); 153 int mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *); 154 int mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *); 155 int mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *); 156 int mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *); 157 #ifndef SMALL_KERNEL 158 int mpi_create_sensors(struct mpi_softc *); 159 void mpi_refresh_sensors(void *); 160 #endif /* SMALL_KERNEL */ 161 #endif /* NBIO > 0 */ 162 163 #define DEVNAME(s) ((s)->sc_dev.dv_xname) 164 165 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t)) 166 167 #define mpi_read_db(s) mpi_read((s), MPI_DOORBELL) 168 #define mpi_write_db(s, v) mpi_write((s), MPI_DOORBELL, (v)) 169 #define mpi_read_intr(s) mpi_read((s), MPI_INTR_STATUS) 170 #define mpi_write_intr(s, v) mpi_write((s), MPI_INTR_STATUS, (v)) 171 #define mpi_pop_reply(s) mpi_read((s), MPI_REPLY_QUEUE) 172 #define mpi_push_reply(s, v) mpi_write((s), MPI_REPLY_QUEUE, (v)) 173 174 #define mpi_wait_db_int(s) mpi_wait_ne((s), MPI_INTR_STATUS, \ 175 MPI_INTR_STATUS_DOORBELL, 0) 176 #define mpi_wait_db_ack(s) mpi_wait_eq((s), MPI_INTR_STATUS, \ 177 MPI_INTR_STATUS_IOCDOORBELL, 0) 178 179 #define MPI_PG_EXTENDED (1<<0) 180 #define MPI_PG_POLL (1<<1) 181 #define MPI_PG_FMT "\020" "\002POLL" "\001EXTENDED" 182 183 #define mpi_cfg_header(_s, _t, _n, _a, _h) \ 184 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 185 MPI_PG_POLL, (_h)) 186 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \ 187 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 188 MPI_PG_POLL|MPI_PG_EXTENDED, (_h)) 189 190 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \ 191 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \ 192 (_h), (_r), (_p), (_l)) 193 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \ 194 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \ 195 (_h), (_r), (_p), (_l)) 196 197 int 198 mpi_attach(struct mpi_softc *sc) 199 { 200 struct scsibus_attach_args saa; 201 struct mpi_ccb *ccb; 202 203 printf("\n"); 204 205 /* disable interrupts */ 206 mpi_write(sc, MPI_INTR_MASK, 207 MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL); 208 209 if (mpi_init(sc) != 0) { 210 printf("%s: unable to initialise\n", DEVNAME(sc)); 211 return (1); 212 } 213 214 if (mpi_iocfacts(sc) != 0) { 215 printf("%s: unable to get iocfacts\n", DEVNAME(sc)); 216 return (1); 217 } 218 219 if (mpi_alloc_ccbs(sc) != 0) { 220 /* error already printed */ 221 return (1); 222 } 223 224 if (mpi_alloc_replies(sc) != 0) { 225 printf("%s: unable to allocate reply space\n", DEVNAME(sc)); 226 goto free_ccbs; 227 } 228 229 if (mpi_iocinit(sc) != 0) { 230 printf("%s: unable to send iocinit\n", DEVNAME(sc)); 231 goto free_ccbs; 232 } 233 234 /* spin until we're operational */ 235 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 236 MPI_DOORBELL_STATE_OPER) != 0) { 237 printf("%s: state: 0x%08x\n", DEVNAME(sc), 238 mpi_read_db(sc) & MPI_DOORBELL_STATE); 239 printf("%s: operational state timeout\n", DEVNAME(sc)); 240 goto free_ccbs; 241 } 242 243 mpi_push_replies(sc); 244 245 if (mpi_portfacts(sc) != 0) { 246 printf("%s: unable to get portfacts\n", DEVNAME(sc)); 247 goto free_replies; 248 } 249 250 if (mpi_cfg_coalescing(sc) != 0) { 251 printf("%s: unable to configure coalescing\n", DEVNAME(sc)); 252 goto free_replies; 253 } 254 255 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SAS) { 256 if (mpi_eventnotify(sc) != 0) { 257 printf("%s: unable to enable events\n", DEVNAME(sc)); 258 goto free_replies; 259 } 260 } 261 262 if (mpi_portenable(sc) != 0) { 263 printf("%s: unable to enable port\n", DEVNAME(sc)); 264 goto free_replies; 265 } 266 267 if (mpi_fwupload(sc) != 0) { 268 printf("%s: unable to upload firmware\n", DEVNAME(sc)); 269 goto free_replies; 270 } 271 272 switch (sc->sc_porttype) { 273 case MPI_PORTFACTS_PORTTYPE_SCSI: 274 if (mpi_cfg_spi_port(sc) != 0) 275 goto free_replies; 276 mpi_squash_ppr(sc); 277 break; 278 case MPI_PORTFACTS_PORTTYPE_FC: 279 mpi_fc_info(sc); 280 break; 281 } 282 283 rw_init(&sc->sc_lock, "mpi_lock"); 284 285 /* we should be good to go now, attach scsibus */ 286 sc->sc_link.device = &mpi_dev; 287 sc->sc_link.adapter = &mpi_switch; 288 sc->sc_link.adapter_softc = sc; 289 sc->sc_link.adapter_target = sc->sc_target; 290 sc->sc_link.adapter_buswidth = sc->sc_buswidth; 291 sc->sc_link.openings = sc->sc_maxcmds / sc->sc_buswidth; 292 293 bzero(&saa, sizeof(saa)); 294 saa.saa_sc_link = &sc->sc_link; 295 296 /* config_found() returns the scsibus attached to us */ 297 sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev, 298 &saa, scsiprint); 299 300 /* get raid pages */ 301 mpi_get_raid(sc); 302 303 /* do domain validation */ 304 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI) 305 mpi_run_ppr(sc); 306 307 /* enable interrupts */ 308 mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL); 309 310 #if NBIO > 0 311 if (sc->sc_flags & MPI_F_RAID) { 312 if (bio_register(&sc->sc_dev, mpi_ioctl) != 0) 313 panic("%s: controller registration failed", 314 DEVNAME(sc)); 315 else { 316 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 317 2, 0, &sc->sc_cfg_hdr) != 0) { 318 printf("%s: can't get IOC page 2 hdr, bio " 319 "disabled\n", DEVNAME(sc)); 320 goto done; 321 } 322 sc->sc_vol_page = malloc(sc->sc_cfg_hdr.page_length * 4, 323 M_TEMP, M_WAITOK | M_CANFAIL); 324 if (sc->sc_vol_page == NULL) { 325 printf("%s: can't get memory for IOC page 2, " 326 "bio disabled\n", DEVNAME(sc)); 327 goto done; 328 } 329 sc->sc_vol_list = (struct mpi_cfg_raid_vol *) 330 (sc->sc_vol_page + 1); 331 332 sc->sc_ioctl = mpi_ioctl; 333 } 334 } 335 #ifndef SMALL_KERNEL 336 mpi_create_sensors(sc); 337 #endif /* SMALL_KERNEL */ 338 done: 339 #endif /* NBIO > 0 */ 340 341 return (0); 342 343 free_replies: 344 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 345 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 346 mpi_dmamem_free(sc, sc->sc_replies); 347 free_ccbs: 348 while ((ccb = mpi_get_ccb(sc)) != NULL) 349 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 350 mpi_dmamem_free(sc, sc->sc_requests); 351 free(sc->sc_ccbs, M_DEVBUF); 352 353 return(1); 354 } 355 356 int 357 mpi_cfg_spi_port(struct mpi_softc *sc) 358 { 359 struct mpi_cfg_hdr hdr; 360 struct mpi_cfg_spi_port_pg1 port; 361 362 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0, 363 &hdr) != 0) 364 return (1); 365 366 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0) 367 return (1); 368 369 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc)); 370 DNPRINTF(MPI_D_MISC, "%s: port_scsi_id: %d port_resp_ids 0x%04x\n", 371 DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids)); 372 DNPRINTF(MPI_D_MISC, "%s: on_bus_timer_value: 0x%08x\n", DEVNAME(sc), 373 letoh32(port.port_scsi_id)); 374 DNPRINTF(MPI_D_MISC, "%s: target_config: 0x%02x id_config: 0x%04x\n", 375 DEVNAME(sc), port.target_config, letoh16(port.id_config)); 376 377 if (port.port_scsi_id == sc->sc_target && 378 port.port_resp_ids == htole16(1 << sc->sc_target) && 379 port.on_bus_timer_value != htole32(0x0)) 380 return (0); 381 382 DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc), 383 sc->sc_target); 384 port.port_scsi_id = sc->sc_target; 385 port.port_resp_ids = htole16(1 << sc->sc_target); 386 port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */ 387 388 if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) { 389 printf("%s: unable to configure port scsi id\n", DEVNAME(sc)); 390 return (1); 391 } 392 393 return (0); 394 } 395 396 void 397 mpi_squash_ppr(struct mpi_softc *sc) 398 { 399 struct mpi_cfg_hdr hdr; 400 struct mpi_cfg_spi_dev_pg1 page; 401 int i; 402 403 DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc)); 404 405 for (i = 0; i < sc->sc_buswidth; i++) { 406 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 407 1, i, &hdr) != 0) 408 return; 409 410 if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0) 411 return; 412 413 DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x " 414 "req_offset: 0x%02x req_period: 0x%02x " 415 "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i, 416 page.req_params1, page.req_offset, page.req_period, 417 page.req_params2, letoh32(page.configuration)); 418 419 page.req_params1 = 0x0; 420 page.req_offset = 0x0; 421 page.req_period = 0x0; 422 page.req_params2 = 0x0; 423 page.configuration = htole32(0x0); 424 425 if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0) 426 return; 427 } 428 } 429 430 void 431 mpi_run_ppr(struct mpi_softc *sc) 432 { 433 struct mpi_cfg_hdr hdr; 434 struct mpi_cfg_spi_port_pg0 port_pg; 435 struct mpi_cfg_ioc_pg3 *physdisk_pg; 436 struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk; 437 size_t pagelen; 438 struct scsi_link *link; 439 int i, tries; 440 441 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0, 442 &hdr) != 0) { 443 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n", 444 DEVNAME(sc)); 445 return; 446 } 447 448 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) { 449 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n", 450 DEVNAME(sc)); 451 return; 452 } 453 454 for (i = 0; i < sc->sc_buswidth; i++) { 455 link = sc->sc_scsibus->sc_link[i][0]; 456 if (link == NULL) 457 continue; 458 459 /* do not ppr volumes */ 460 if (link->flags & SDEV_VIRTUAL) 461 continue; 462 463 tries = 0; 464 while (mpi_ppr(sc, link, NULL, port_pg.min_period, 465 port_pg.max_offset, tries) == EAGAIN) 466 tries++; 467 } 468 469 if ((sc->sc_flags & MPI_F_RAID) == 0) 470 return; 471 472 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0, 473 &hdr) != 0) { 474 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 475 "fetch ioc pg 3 header\n", DEVNAME(sc)); 476 return; 477 } 478 479 pagelen = hdr.page_length * 4; /* dwords to bytes */ 480 physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 481 if (physdisk_pg == NULL) { 482 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 483 "allocate ioc pg 3\n", DEVNAME(sc)); 484 return; 485 } 486 physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1); 487 488 if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) { 489 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to " 490 "fetch ioc page 3\n", DEVNAME(sc)); 491 goto out; 492 } 493 494 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc), 495 physdisk_pg->no_phys_disks); 496 497 for (i = 0; i < physdisk_pg->no_phys_disks; i++) { 498 physdisk = &physdisk_list[i]; 499 500 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d " 501 "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id, 502 physdisk->phys_disk_bus, physdisk->phys_disk_ioc, 503 physdisk->phys_disk_num); 504 505 if (physdisk->phys_disk_ioc != sc->sc_ioc_number) 506 continue; 507 508 tries = 0; 509 while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period, 510 port_pg.max_offset, tries) == EAGAIN) 511 tries++; 512 } 513 514 out: 515 free(physdisk_pg, M_TEMP); 516 } 517 518 int 519 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link, 520 struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try) 521 { 522 struct mpi_cfg_hdr hdr0, hdr1; 523 struct mpi_cfg_spi_dev_pg0 pg0; 524 struct mpi_cfg_spi_dev_pg1 pg1; 525 u_int32_t address; 526 int id; 527 int raid = 0; 528 529 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d " 530 "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try, 531 link->quirks); 532 533 if (try >= 3) 534 return (EIO); 535 536 if (physdisk == NULL) { 537 if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR) 538 return (EIO); 539 540 address = link->target; 541 id = link->target; 542 } else { 543 raid = 1; 544 address = (physdisk->phys_disk_bus << 8) | 545 (physdisk->phys_disk_id); 546 id = physdisk->phys_disk_num; 547 } 548 549 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0, 550 address, &hdr0) != 0) { 551 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n", 552 DEVNAME(sc)); 553 return (EIO); 554 } 555 556 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1, 557 address, &hdr1) != 0) { 558 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n", 559 DEVNAME(sc)); 560 return (EIO); 561 } 562 563 #ifdef MPI_DEBUG 564 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 565 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n", 566 DEVNAME(sc)); 567 return (EIO); 568 } 569 570 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 571 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 572 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 573 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 574 #endif 575 576 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 577 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n", 578 DEVNAME(sc)); 579 return (EIO); 580 } 581 582 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 583 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 584 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 585 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 586 587 pg1.req_params1 = 0; 588 pg1.req_offset = offset; 589 pg1.req_period = period; 590 pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH; 591 592 if (raid || !(link->quirks & SDEV_NOSYNC)) { 593 pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE; 594 595 switch (try) { 596 case 0: /* U320 */ 597 break; 598 case 1: /* U160 */ 599 pg1.req_period = 0x09; 600 break; 601 case 2: /* U80 */ 602 pg1.req_period = 0x0a; 603 break; 604 } 605 606 if (pg1.req_period < 0x09) { 607 /* Ultra320: enable QAS & PACKETIZED */ 608 pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS | 609 MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED; 610 } 611 if (pg1.req_period < 0xa) { 612 /* >= Ultra160: enable dual xfers */ 613 pg1.req_params1 |= 614 MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS; 615 } 616 } 617 618 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 619 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 620 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 621 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 622 623 if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) { 624 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n", 625 DEVNAME(sc)); 626 return (EIO); 627 } 628 629 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 630 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n", 631 DEVNAME(sc)); 632 return (EIO); 633 } 634 635 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 636 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 637 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 638 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 639 640 if (mpi_inq(sc, id, raid) != 0) { 641 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against " 642 "target %d\n", DEVNAME(sc), link->target); 643 return (EIO); 644 } 645 646 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 647 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after " 648 "inquiry\n", DEVNAME(sc)); 649 return (EIO); 650 } 651 652 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 653 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 654 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 655 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 656 657 if (!(letoh32(pg0.information) & 0x07) && (try == 0)) { 658 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n", 659 DEVNAME(sc)); 660 return (EAGAIN); 661 } 662 663 if ((((letoh32(pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) { 664 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n", 665 DEVNAME(sc)); 666 return (EAGAIN); 667 } 668 669 if (letoh32(pg0.information) & 0x0e) { 670 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n", 671 DEVNAME(sc), letoh32(pg0.information)); 672 return (EAGAIN); 673 } 674 675 switch(pg0.neg_period) { 676 case 0x08: 677 period = 160; 678 break; 679 case 0x09: 680 period = 80; 681 break; 682 case 0x0a: 683 period = 40; 684 break; 685 case 0x0b: 686 period = 20; 687 break; 688 case 0x0c: 689 period = 10; 690 break; 691 default: 692 period = 0; 693 break; 694 } 695 696 printf("%s: %s %d %s at %dMHz width %dbit offset %d " 697 "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target", 698 id, period ? "Sync" : "Async", period, 699 (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8, 700 pg0.neg_offset, 701 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0, 702 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0, 703 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0); 704 705 return (0); 706 } 707 708 int 709 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk) 710 { 711 struct mpi_ccb *ccb; 712 struct scsi_inquiry inq; 713 struct { 714 struct mpi_msg_scsi_io io; 715 struct mpi_sge sge; 716 struct scsi_inquiry_data inqbuf; 717 struct scsi_sense_data sense; 718 } __packed *bundle; 719 struct mpi_msg_scsi_io *io; 720 struct mpi_sge *sge; 721 u_int64_t addr; 722 723 DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc)); 724 725 bzero(&inq, sizeof(inq)); 726 inq.opcode = INQUIRY; 727 _lto2b(sizeof(struct scsi_inquiry_data), inq.length); 728 729 ccb = mpi_get_ccb(sc); 730 if (ccb == NULL) 731 return (1); 732 733 ccb->ccb_done = mpi_empty_done; 734 735 bundle = ccb->ccb_cmd; 736 io = &bundle->io; 737 sge = &bundle->sge; 738 739 io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH : 740 MPI_FUNCTION_SCSI_IO_REQUEST; 741 /* 742 * bus is always 0 743 * io->bus = htole16(sc->sc_bus); 744 */ 745 io->target_id = target; 746 747 io->cdb_length = sizeof(inq); 748 io->sense_buf_len = sizeof(struct scsi_sense_data); 749 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 750 751 io->msg_context = htole32(ccb->ccb_id); 752 753 /* 754 * always lun 0 755 * io->lun[0] = htobe16(link->lun); 756 */ 757 758 io->direction = MPI_SCSIIO_DIR_READ; 759 io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT; 760 761 bcopy(&inq, io->cdb, sizeof(inq)); 762 763 io->data_length = htole32(sizeof(struct scsi_inquiry_data)); 764 765 io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva + 766 ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle)); 767 768 sge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 | 769 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 770 (u_int32_t)sizeof(inq)); 771 772 addr = ccb->ccb_cmd_dva + 773 ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle); 774 sge->sg_hi_addr = htole32((u_int32_t)(addr >> 32)); 775 sge->sg_lo_addr = htole32((u_int32_t)addr); 776 777 if (mpi_poll(sc, ccb, 5000) != 0) 778 return (1); 779 780 if (ccb->ccb_rcb != NULL) 781 mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva); 782 783 mpi_put_ccb(sc, ccb); 784 785 return (0); 786 } 787 788 void 789 mpi_fc_info(struct mpi_softc *sc) 790 { 791 struct mpi_cfg_hdr hdr; 792 struct mpi_cfg_fc_port_pg0 pg; 793 794 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0, 795 &hdr) != 0) { 796 DNPRINTF(MPI_D_MISC, "%s: mpi_fc_print unable to fetch " 797 "FC port header 0\n", DEVNAME(sc)); 798 return; 799 } 800 801 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) { 802 DNPRINTF(MPI_D_MISC, "%s: mpi_fc_print unable to fetch " 803 "FC port page 0\n", 804 DEVNAME(sc)); 805 return; 806 } 807 808 sc->sc_link.port_wwn = letoh64(pg.wwpn); 809 sc->sc_link.node_wwn = letoh64(pg.wwnn); 810 } 811 812 void 813 mpi_detach(struct mpi_softc *sc) 814 { 815 816 } 817 818 int 819 mpi_intr(void *arg) 820 { 821 struct mpi_softc *sc = arg; 822 u_int32_t reg; 823 int rv = 0; 824 825 while ((reg = mpi_pop_reply(sc)) != 0xffffffff) { 826 mpi_reply(sc, reg); 827 rv = 1; 828 } 829 830 return (rv); 831 } 832 833 int 834 mpi_reply(struct mpi_softc *sc, u_int32_t reg) 835 { 836 struct mpi_ccb *ccb; 837 struct mpi_rcb *rcb = NULL; 838 struct mpi_msg_reply *reply = NULL; 839 u_int32_t reply_dva; 840 int id; 841 int i; 842 843 DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg); 844 845 if (reg & MPI_REPLY_QUEUE_ADDRESS) { 846 bus_dmamap_sync(sc->sc_dmat, 847 MPI_DMA_MAP(sc->sc_replies), 0, sc->sc_repq * 848 MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 849 850 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1; 851 852 i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) / 853 MPI_REPLY_SIZE; 854 rcb = &sc->sc_rcbs[i]; 855 reply = rcb->rcb_reply; 856 857 id = letoh32(reply->msg_context); 858 859 bus_dmamap_sync(sc->sc_dmat, 860 MPI_DMA_MAP(sc->sc_replies), 0, sc->sc_repq * 861 MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 862 } else { 863 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) { 864 case MPI_REPLY_QUEUE_TYPE_INIT: 865 id = reg & MPI_REPLY_QUEUE_CONTEXT; 866 break; 867 868 default: 869 panic("%s: unsupported context reply\n", 870 DEVNAME(sc)); 871 } 872 } 873 874 DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n", 875 DEVNAME(sc), id, reply); 876 877 ccb = &sc->sc_ccbs[id]; 878 879 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 880 ccb->ccb_offset, MPI_REQUEST_SIZE, 881 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 882 ccb->ccb_state = MPI_CCB_READY; 883 ccb->ccb_rcb = rcb; 884 885 ccb->ccb_done(ccb); 886 887 return (id); 888 } 889 890 struct mpi_dmamem * 891 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size) 892 { 893 struct mpi_dmamem *mdm; 894 int nsegs; 895 896 mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO); 897 if (mdm == NULL) 898 return (NULL); 899 900 mdm->mdm_size = size; 901 902 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 903 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0) 904 goto mdmfree; 905 906 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg, 907 1, &nsegs, BUS_DMA_NOWAIT) != 0) 908 goto destroy; 909 910 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size, 911 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0) 912 goto free; 913 914 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size, 915 NULL, BUS_DMA_NOWAIT) != 0) 916 goto unmap; 917 918 bzero(mdm->mdm_kva, size); 919 920 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x " 921 "map: %#x nsegs: %d segs: %#x kva: %x\n", 922 DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva); 923 924 return (mdm); 925 926 unmap: 927 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size); 928 free: 929 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 930 destroy: 931 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 932 mdmfree: 933 free(mdm, M_DEVBUF); 934 935 return (NULL); 936 } 937 938 void 939 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm) 940 { 941 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm); 942 943 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map); 944 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size); 945 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 946 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 947 free(mdm, M_DEVBUF); 948 } 949 950 int 951 mpi_alloc_ccbs(struct mpi_softc *sc) 952 { 953 struct mpi_ccb *ccb; 954 u_int8_t *cmd; 955 int i; 956 957 TAILQ_INIT(&sc->sc_ccb_free); 958 959 sc->sc_ccbs = malloc(sizeof(struct mpi_ccb) * sc->sc_maxcmds, 960 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 961 if (sc->sc_ccbs == NULL) { 962 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 963 return (1); 964 } 965 966 sc->sc_requests = mpi_dmamem_alloc(sc, 967 MPI_REQUEST_SIZE * sc->sc_maxcmds); 968 if (sc->sc_requests == NULL) { 969 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 970 goto free_ccbs; 971 } 972 cmd = MPI_DMA_KVA(sc->sc_requests); 973 bzero(cmd, MPI_REQUEST_SIZE * sc->sc_maxcmds); 974 975 for (i = 0; i < sc->sc_maxcmds; i++) { 976 ccb = &sc->sc_ccbs[i]; 977 978 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, 979 sc->sc_max_sgl_len, MAXPHYS, 0, 980 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 981 &ccb->ccb_dmamap) != 0) { 982 printf("%s: unable to create dma map\n", DEVNAME(sc)); 983 goto free_maps; 984 } 985 986 ccb->ccb_sc = sc; 987 ccb->ccb_id = i; 988 ccb->ccb_offset = MPI_REQUEST_SIZE * i; 989 990 ccb->ccb_cmd = &cmd[ccb->ccb_offset]; 991 ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) + 992 ccb->ccb_offset; 993 994 DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x " 995 "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n", 996 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc, 997 ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd, 998 ccb->ccb_cmd_dva); 999 1000 mpi_put_ccb(sc, ccb); 1001 } 1002 1003 return (0); 1004 1005 free_maps: 1006 while ((ccb = mpi_get_ccb(sc)) != NULL) 1007 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 1008 1009 mpi_dmamem_free(sc, sc->sc_requests); 1010 free_ccbs: 1011 free(sc->sc_ccbs, M_DEVBUF); 1012 1013 return (1); 1014 } 1015 1016 struct mpi_ccb * 1017 mpi_get_ccb(struct mpi_softc *sc) 1018 { 1019 struct mpi_ccb *ccb; 1020 1021 ccb = TAILQ_FIRST(&sc->sc_ccb_free); 1022 if (ccb == NULL) { 1023 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb == NULL\n", DEVNAME(sc)); 1024 return (NULL); 1025 } 1026 1027 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link); 1028 1029 ccb->ccb_state = MPI_CCB_READY; 1030 1031 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %#x\n", DEVNAME(sc), ccb); 1032 1033 return (ccb); 1034 } 1035 1036 void 1037 mpi_put_ccb(struct mpi_softc *sc, struct mpi_ccb *ccb) 1038 { 1039 DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %#x\n", DEVNAME(sc), ccb); 1040 1041 ccb->ccb_state = MPI_CCB_FREE; 1042 ccb->ccb_xs = NULL; 1043 ccb->ccb_done = NULL; 1044 bzero(ccb->ccb_cmd, MPI_REQUEST_SIZE); 1045 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link); 1046 } 1047 1048 int 1049 mpi_alloc_replies(struct mpi_softc *sc) 1050 { 1051 DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc)); 1052 1053 sc->sc_rcbs = malloc(sc->sc_repq * sizeof(struct mpi_rcb), M_DEVBUF, 1054 M_WAITOK|M_CANFAIL); 1055 if (sc->sc_rcbs == NULL) 1056 return (1); 1057 1058 sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE); 1059 if (sc->sc_replies == NULL) { 1060 free(sc->sc_rcbs, M_DEVBUF); 1061 return (1); 1062 } 1063 1064 return (0); 1065 } 1066 1067 void 1068 mpi_push_replies(struct mpi_softc *sc) 1069 { 1070 struct mpi_rcb *rcb; 1071 char *kva = MPI_DMA_KVA(sc->sc_replies); 1072 int i; 1073 1074 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 1075 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1076 1077 for (i = 0; i < sc->sc_repq; i++) { 1078 rcb = &sc->sc_rcbs[i]; 1079 1080 rcb->rcb_reply = kva + MPI_REPLY_SIZE * i; 1081 rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) + 1082 MPI_REPLY_SIZE * i; 1083 mpi_push_reply(sc, rcb->rcb_reply_dva); 1084 } 1085 } 1086 1087 void 1088 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb) 1089 { 1090 DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc), 1091 ccb->ccb_cmd_dva); 1092 1093 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 1094 ccb->ccb_offset, MPI_REQUEST_SIZE, 1095 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1096 1097 ccb->ccb_state = MPI_CCB_QUEUED; 1098 mpi_write(sc, MPI_REQ_QUEUE, ccb->ccb_cmd_dva); 1099 } 1100 1101 int 1102 mpi_complete(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout) 1103 { 1104 u_int32_t reg; 1105 int id = -1; 1106 1107 DNPRINTF(MPI_D_INTR, "%s: mpi_complete timeout %d\n", DEVNAME(sc), 1108 timeout); 1109 1110 do { 1111 reg = mpi_pop_reply(sc); 1112 if (reg == 0xffffffff) { 1113 if (timeout-- == 0) 1114 return (1); 1115 1116 delay(1000); 1117 continue; 1118 } 1119 1120 id = mpi_reply(sc, reg); 1121 1122 } while (ccb->ccb_id != id); 1123 1124 return (0); 1125 } 1126 1127 int 1128 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout) 1129 { 1130 int error; 1131 int s; 1132 1133 DNPRINTF(MPI_D_CMD, "%s: mpi_poll\n", DEVNAME(sc)); 1134 1135 s = splbio(); 1136 mpi_start(sc, ccb); 1137 error = mpi_complete(sc, ccb, timeout); 1138 splx(s); 1139 1140 return (error); 1141 } 1142 1143 int 1144 mpi_scsi_cmd(struct scsi_xfer *xs) 1145 { 1146 struct scsi_link *link = xs->sc_link; 1147 struct mpi_softc *sc = link->adapter_softc; 1148 struct mpi_ccb *ccb; 1149 struct mpi_ccb_bundle *mcb; 1150 struct mpi_msg_scsi_io *io; 1151 int s; 1152 1153 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc)); 1154 1155 if (xs->cmdlen > MPI_CDB_LEN) { 1156 DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n", 1157 DEVNAME(sc), xs->cmdlen); 1158 bzero(&xs->sense, sizeof(xs->sense)); 1159 xs->sense.error_code = SSD_ERRCODE_VALID | 0x70; 1160 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1161 xs->sense.add_sense_code = 0x20; 1162 xs->error = XS_SENSE; 1163 xs->flags |= ITSDONE; 1164 s = splbio(); 1165 scsi_done(xs); 1166 splx(s); 1167 return (COMPLETE); 1168 } 1169 1170 s = splbio(); 1171 ccb = mpi_get_ccb(sc); 1172 splx(s); 1173 if (ccb == NULL) 1174 return (NO_CCB); 1175 1176 DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n", 1177 DEVNAME(sc), ccb->ccb_id, xs->flags); 1178 1179 ccb->ccb_xs = xs; 1180 ccb->ccb_done = mpi_scsi_cmd_done; 1181 1182 mcb = ccb->ccb_cmd; 1183 io = &mcb->mcb_io; 1184 1185 io->function = MPI_FUNCTION_SCSI_IO_REQUEST; 1186 /* 1187 * bus is always 0 1188 * io->bus = htole16(sc->sc_bus); 1189 */ 1190 io->target_id = link->target; 1191 1192 io->cdb_length = xs->cmdlen; 1193 io->sense_buf_len = sizeof(xs->sense); 1194 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 1195 1196 io->msg_context = htole32(ccb->ccb_id); 1197 1198 io->lun[0] = htobe16(link->lun); 1199 1200 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1201 case SCSI_DATA_IN: 1202 io->direction = MPI_SCSIIO_DIR_READ; 1203 break; 1204 case SCSI_DATA_OUT: 1205 io->direction = MPI_SCSIIO_DIR_WRITE; 1206 break; 1207 default: 1208 io->direction = MPI_SCSIIO_DIR_NONE; 1209 break; 1210 } 1211 1212 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI && 1213 (link->quirks & SDEV_NOTAGS)) 1214 io->tagging = MPI_SCSIIO_ATTR_UNTAGGED; 1215 else 1216 io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q; 1217 1218 bcopy(xs->cmd, io->cdb, xs->cmdlen); 1219 1220 io->data_length = htole32(xs->datalen); 1221 1222 io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva + 1223 ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb)); 1224 1225 if (mpi_load_xs(ccb) != 0) { 1226 xs->error = XS_DRIVER_STUFFUP; 1227 xs->flags |= ITSDONE; 1228 s = splbio(); 1229 mpi_put_ccb(sc, ccb); 1230 scsi_done(xs); 1231 splx(s); 1232 return (COMPLETE); 1233 } 1234 1235 timeout_set(&xs->stimeout, mpi_timeout_xs, ccb); 1236 1237 if (xs->flags & SCSI_POLL) { 1238 if (mpi_poll(sc, ccb, xs->timeout) != 0) { 1239 xs->error = XS_DRIVER_STUFFUP; 1240 xs->flags |= ITSDONE; 1241 s = splbio(); 1242 scsi_done(xs); 1243 splx(s); 1244 } 1245 return (COMPLETE); 1246 } 1247 1248 s = splbio(); 1249 mpi_start(sc, ccb); 1250 splx(s); 1251 return (SUCCESSFULLY_QUEUED); 1252 } 1253 1254 void 1255 mpi_scsi_cmd_done(struct mpi_ccb *ccb) 1256 { 1257 struct mpi_softc *sc = ccb->ccb_sc; 1258 struct scsi_xfer *xs = ccb->ccb_xs; 1259 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1260 bus_dmamap_t dmap = ccb->ccb_dmamap; 1261 struct mpi_msg_scsi_io_error *sie; 1262 1263 if (xs->datalen != 0) { 1264 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1265 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1266 BUS_DMASYNC_POSTWRITE); 1267 1268 bus_dmamap_unload(sc->sc_dmat, dmap); 1269 } 1270 1271 /* timeout_del */ 1272 xs->error = XS_NOERROR; 1273 xs->resid = 0; 1274 xs->flags |= ITSDONE; 1275 1276 if (ccb->ccb_rcb == NULL) { 1277 /* no scsi error, we're ok so drop out early */ 1278 xs->status = SCSI_OK; 1279 mpi_put_ccb(sc, ccb); 1280 scsi_done(xs); 1281 return; 1282 } 1283 1284 sie = ccb->ccb_rcb->rcb_reply; 1285 1286 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d " 1287 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen, 1288 xs->flags); 1289 DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d " 1290 "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus, 1291 sie->msg_length, sie->function); 1292 DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d " 1293 "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length, 1294 sie->sense_buf_len, sie->msg_flags); 1295 DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 1296 letoh32(sie->msg_context)); 1297 DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x " 1298 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status, 1299 sie->scsi_state, letoh16(sie->ioc_status)); 1300 DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1301 letoh32(sie->ioc_loginfo)); 1302 DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc), 1303 letoh32(sie->transfer_count)); 1304 DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc), 1305 letoh32(sie->sense_count)); 1306 DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc), 1307 letoh32(sie->response_info)); 1308 DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc), 1309 letoh16(sie->tag)); 1310 1311 xs->status = sie->scsi_status; 1312 switch (letoh16(sie->ioc_status)) { 1313 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 1314 xs->resid = xs->datalen - letoh32(sie->transfer_count); 1315 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) { 1316 xs->error = XS_DRIVER_STUFFUP; 1317 break; 1318 } 1319 /* FALLTHROUGH */ 1320 case MPI_IOCSTATUS_SUCCESS: 1321 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 1322 switch (xs->status) { 1323 case SCSI_OK: 1324 xs->resid = 0; 1325 break; 1326 1327 case SCSI_CHECK: 1328 xs->error = XS_SENSE; 1329 break; 1330 1331 case SCSI_BUSY: 1332 case SCSI_QUEUE_FULL: 1333 xs->error = XS_BUSY; 1334 break; 1335 1336 default: 1337 xs->error = XS_DRIVER_STUFFUP; 1338 break; 1339 } 1340 break; 1341 1342 case MPI_IOCSTATUS_BUSY: 1343 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 1344 xs->error = XS_BUSY; 1345 break; 1346 1347 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 1348 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 1349 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1350 xs->error = XS_SELTIMEOUT; 1351 break; 1352 1353 default: 1354 xs->error = XS_DRIVER_STUFFUP; 1355 break; 1356 } 1357 1358 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID) 1359 bcopy(&mcb->mcb_sense, &xs->sense, sizeof(xs->sense)); 1360 1361 DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc), 1362 xs->error, xs->status); 1363 1364 mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva); 1365 mpi_put_ccb(sc, ccb); 1366 scsi_done(xs); 1367 } 1368 1369 void 1370 mpi_timeout_xs(void *arg) 1371 { 1372 /* XXX */ 1373 } 1374 1375 int 1376 mpi_load_xs(struct mpi_ccb *ccb) 1377 { 1378 struct mpi_softc *sc = ccb->ccb_sc; 1379 struct scsi_xfer *xs = ccb->ccb_xs; 1380 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1381 struct mpi_msg_scsi_io *io = &mcb->mcb_io; 1382 struct mpi_sge *sge, *nsge = &mcb->mcb_sgl[0]; 1383 struct mpi_sge *ce = NULL, *nce; 1384 u_int64_t ce_dva; 1385 bus_dmamap_t dmap = ccb->ccb_dmamap; 1386 u_int32_t addr, flags; 1387 int i, error; 1388 1389 if (xs->datalen == 0) { 1390 nsge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 1391 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 1392 return (0); 1393 } 1394 1395 error = bus_dmamap_load(sc->sc_dmat, dmap, 1396 xs->data, xs->datalen, NULL, 1397 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 1398 if (error) { 1399 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 1400 return (1); 1401 } 1402 1403 flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64; 1404 if (xs->flags & SCSI_DATA_OUT) 1405 flags |= MPI_SGE_FL_DIR_OUT; 1406 1407 if (dmap->dm_nsegs > sc->sc_first_sgl_len) { 1408 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1]; 1409 io->chain_offset = ((u_int8_t *)ce - (u_int8_t *)io) / 4; 1410 } 1411 1412 for (i = 0; i < dmap->dm_nsegs; i++) { 1413 1414 if (nsge == ce) { 1415 nsge++; 1416 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST); 1417 1418 DNPRINTF(MPI_D_DMA, "%s: - 0x%08x 0x%08x 0x%08x\n", 1419 DEVNAME(sc), sge->sg_hdr, 1420 sge->sg_hi_addr, sge->sg_lo_addr); 1421 1422 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) { 1423 nce = &nsge[sc->sc_chain_len - 1]; 1424 addr = ((u_int8_t *)nce - (u_int8_t *)nsge) / 4; 1425 addr = addr << 16 | 1426 sizeof(struct mpi_sge) * sc->sc_chain_len; 1427 } else { 1428 nce = NULL; 1429 addr = sizeof(struct mpi_sge) * 1430 (dmap->dm_nsegs - i); 1431 } 1432 1433 ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN | 1434 MPI_SGE_FL_SIZE_64 | addr); 1435 1436 ce_dva = ccb->ccb_cmd_dva + 1437 ((u_int8_t *)nsge - (u_int8_t *)mcb); 1438 1439 addr = (u_int32_t)(ce_dva >> 32); 1440 ce->sg_hi_addr = htole32(addr); 1441 addr = (u_int32_t)ce_dva; 1442 ce->sg_lo_addr = htole32(addr); 1443 1444 DNPRINTF(MPI_D_DMA, "%s: ce: 0x%08x 0x%08x 0x%08x\n", 1445 DEVNAME(sc), ce->sg_hdr, ce->sg_hi_addr, 1446 ce->sg_lo_addr); 1447 1448 ce = nce; 1449 } 1450 1451 DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc), 1452 i, dmap->dm_segs[i].ds_len, 1453 (u_int64_t)dmap->dm_segs[i].ds_addr); 1454 1455 sge = nsge; 1456 1457 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len); 1458 addr = (u_int32_t)((u_int64_t)dmap->dm_segs[i].ds_addr >> 32); 1459 sge->sg_hi_addr = htole32(addr); 1460 addr = (u_int32_t)dmap->dm_segs[i].ds_addr; 1461 sge->sg_lo_addr = htole32(addr); 1462 1463 DNPRINTF(MPI_D_DMA, "%s: %d: 0x%08x 0x%08x 0x%08x\n", 1464 DEVNAME(sc), i, sge->sg_hdr, sge->sg_hi_addr, 1465 sge->sg_lo_addr); 1466 1467 nsge = sge + 1; 1468 } 1469 1470 /* terminate list */ 1471 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 1472 MPI_SGE_FL_EOL); 1473 1474 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1475 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 1476 BUS_DMASYNC_PREWRITE); 1477 1478 return (0); 1479 } 1480 1481 void 1482 mpi_minphys(struct buf *bp, struct scsi_link *sl) 1483 { 1484 /* XXX */ 1485 if (bp->b_bcount > MAXPHYS) 1486 bp->b_bcount = MAXPHYS; 1487 minphys(bp); 1488 } 1489 1490 int 1491 mpi_scsi_probe(struct scsi_link *link) 1492 { 1493 struct mpi_softc *sc = link->adapter_softc; 1494 struct mpi_ecfg_hdr ehdr; 1495 struct mpi_cfg_sas_dev_pg0 pg0; 1496 u_int32_t address; 1497 1498 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS) 1499 return (0); 1500 1501 address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target; 1502 1503 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0, 1504 address, &ehdr) != 0) 1505 return (EIO); 1506 1507 if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0) 1508 return (0); 1509 1510 DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n", 1511 DEVNAME(sc), link->target); 1512 DNPRINTF(MPI_D_MISC, "%s: slot: 0x%04x enc_handle: 0x%04x\n", 1513 DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle)); 1514 DNPRINTF(MPI_D_MISC, "%s: sas_addr: 0x%016llx\n", DEVNAME(sc), 1515 letoh64(pg0.sas_addr)); 1516 DNPRINTF(MPI_D_MISC, "%s: parent_dev_handle: 0x%04x phy_num: 0x%02x " 1517 "access_status: 0x%02x\n", DEVNAME(sc), 1518 letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status); 1519 DNPRINTF(MPI_D_MISC, "%s: dev_handle: 0x%04x " 1520 "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc), 1521 letoh16(pg0.dev_handle), pg0.bus, pg0.target); 1522 DNPRINTF(MPI_D_MISC, "%s: device_info: 0x%08x\n", DEVNAME(sc), 1523 letoh32(pg0.device_info)); 1524 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%04x physical_port: 0x%02x\n", 1525 DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port); 1526 1527 if (ISSET(letoh32(pg0.device_info), 1528 MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) { 1529 DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n", 1530 DEVNAME(sc), link->target); 1531 link->flags |= SDEV_ATAPI; 1532 link->quirks |= SDEV_ONLYBIG; 1533 } 1534 1535 return (0); 1536 } 1537 1538 u_int32_t 1539 mpi_read(struct mpi_softc *sc, bus_size_t r) 1540 { 1541 u_int32_t rv; 1542 1543 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1544 BUS_SPACE_BARRIER_READ); 1545 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 1546 1547 DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv); 1548 1549 return (rv); 1550 } 1551 1552 void 1553 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v) 1554 { 1555 DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v); 1556 1557 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 1558 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1559 BUS_SPACE_BARRIER_WRITE); 1560 } 1561 1562 int 1563 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1564 u_int32_t target) 1565 { 1566 int i; 1567 1568 DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r, 1569 mask, target); 1570 1571 for (i = 0; i < 10000; i++) { 1572 if ((mpi_read(sc, r) & mask) == target) 1573 return (0); 1574 delay(1000); 1575 } 1576 1577 return (1); 1578 } 1579 1580 int 1581 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1582 u_int32_t target) 1583 { 1584 int i; 1585 1586 DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r, 1587 mask, target); 1588 1589 for (i = 0; i < 10000; i++) { 1590 if ((mpi_read(sc, r) & mask) != target) 1591 return (0); 1592 delay(1000); 1593 } 1594 1595 return (1); 1596 } 1597 1598 int 1599 mpi_init(struct mpi_softc *sc) 1600 { 1601 u_int32_t db; 1602 int i; 1603 1604 /* spin until the IOC leaves the RESET state */ 1605 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1606 MPI_DOORBELL_STATE_RESET) != 0) { 1607 DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave " 1608 "reset state\n", DEVNAME(sc)); 1609 return (1); 1610 } 1611 1612 /* check current ownership */ 1613 db = mpi_read_db(sc); 1614 if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) { 1615 DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n", 1616 DEVNAME(sc)); 1617 return (0); 1618 } 1619 1620 for (i = 0; i < 5; i++) { 1621 switch (db & MPI_DOORBELL_STATE) { 1622 case MPI_DOORBELL_STATE_READY: 1623 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n", 1624 DEVNAME(sc)); 1625 return (0); 1626 1627 case MPI_DOORBELL_STATE_OPER: 1628 case MPI_DOORBELL_STATE_FAULT: 1629 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being " 1630 "reset\n" , DEVNAME(sc)); 1631 if (mpi_reset_soft(sc) != 0) 1632 mpi_reset_hard(sc); 1633 break; 1634 1635 case MPI_DOORBELL_STATE_RESET: 1636 DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come " 1637 "out of reset\n", DEVNAME(sc)); 1638 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1639 MPI_DOORBELL_STATE_RESET) != 0) 1640 return (1); 1641 break; 1642 } 1643 db = mpi_read_db(sc); 1644 } 1645 1646 return (1); 1647 } 1648 1649 int 1650 mpi_reset_soft(struct mpi_softc *sc) 1651 { 1652 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc)); 1653 1654 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1655 return (1); 1656 1657 mpi_write_db(sc, 1658 MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET)); 1659 if (mpi_wait_eq(sc, MPI_INTR_STATUS, 1660 MPI_INTR_STATUS_IOCDOORBELL, 0) != 0) 1661 return (1); 1662 1663 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1664 MPI_DOORBELL_STATE_READY) != 0) 1665 return (1); 1666 1667 return (0); 1668 } 1669 1670 int 1671 mpi_reset_hard(struct mpi_softc *sc) 1672 { 1673 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc)); 1674 1675 /* enable diagnostic register */ 1676 mpi_write(sc, MPI_WRITESEQ, 0xff); 1677 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1); 1678 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2); 1679 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3); 1680 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4); 1681 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5); 1682 1683 /* reset ioc */ 1684 mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER); 1685 1686 delay(10000); 1687 1688 /* disable diagnostic register */ 1689 mpi_write(sc, MPI_WRITESEQ, 0xff); 1690 1691 /* restore pci bits? */ 1692 1693 /* firmware bits? */ 1694 return (0); 1695 } 1696 1697 int 1698 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords) 1699 { 1700 u_int32_t *query = buf; 1701 int i; 1702 1703 /* make sure the doorbell is not in use. */ 1704 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1705 return (1); 1706 1707 /* clear pending doorbell interrupts */ 1708 if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL) 1709 mpi_write_intr(sc, 0); 1710 1711 /* 1712 * first write the doorbell with the handshake function and the 1713 * dword count. 1714 */ 1715 mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) | 1716 MPI_DOORBELL_DWORDS(dwords)); 1717 1718 /* 1719 * the doorbell used bit will be set because a doorbell function has 1720 * started. Wait for the interrupt and then ack it. 1721 */ 1722 if (mpi_wait_db_int(sc) != 0) 1723 return (1); 1724 mpi_write_intr(sc, 0); 1725 1726 /* poll for the acknowledgement. */ 1727 if (mpi_wait_db_ack(sc) != 0) 1728 return (1); 1729 1730 /* write the query through the doorbell. */ 1731 for (i = 0; i < dwords; i++) { 1732 mpi_write_db(sc, htole32(query[i])); 1733 if (mpi_wait_db_ack(sc) != 0) 1734 return (1); 1735 } 1736 1737 return (0); 1738 } 1739 1740 int 1741 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword) 1742 { 1743 u_int16_t *words = (u_int16_t *)dword; 1744 int i; 1745 1746 for (i = 0; i < 2; i++) { 1747 if (mpi_wait_db_int(sc) != 0) 1748 return (1); 1749 words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK); 1750 mpi_write_intr(sc, 0); 1751 } 1752 1753 return (0); 1754 } 1755 1756 int 1757 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords) 1758 { 1759 struct mpi_msg_reply *reply = buf; 1760 u_int32_t *dbuf = buf, dummy; 1761 int i; 1762 1763 /* get the first dword so we can read the length out of the header. */ 1764 if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0) 1765 return (1); 1766 1767 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n", 1768 DEVNAME(sc), dwords, reply->msg_length); 1769 1770 /* 1771 * the total length, in dwords, is in the message length field of the 1772 * reply header. 1773 */ 1774 for (i = 1; i < MIN(dwords, reply->msg_length); i++) { 1775 if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0) 1776 return (1); 1777 } 1778 1779 /* if there's extra stuff to come off the ioc, discard it */ 1780 while (i++ < reply->msg_length) { 1781 if (mpi_handshake_recv_dword(sc, &dummy) != 0) 1782 return (1); 1783 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: " 1784 "0x%08x\n", DEVNAME(sc), dummy); 1785 } 1786 1787 /* wait for the doorbell used bit to be reset and clear the intr */ 1788 if (mpi_wait_db_int(sc) != 0) 1789 return (1); 1790 mpi_write_intr(sc, 0); 1791 1792 return (0); 1793 } 1794 1795 void 1796 mpi_empty_done(struct mpi_ccb *ccb) 1797 { 1798 /* nothing to do */ 1799 } 1800 1801 int 1802 mpi_iocfacts(struct mpi_softc *sc) 1803 { 1804 struct mpi_msg_iocfacts_request ifq; 1805 struct mpi_msg_iocfacts_reply ifp; 1806 1807 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc)); 1808 1809 bzero(&ifq, sizeof(ifq)); 1810 bzero(&ifp, sizeof(ifp)); 1811 1812 ifq.function = MPI_FUNCTION_IOC_FACTS; 1813 ifq.chain_offset = 0; 1814 ifq.msg_flags = 0; 1815 ifq.msg_context = htole32(0xdeadbeef); 1816 1817 if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 1818 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n", 1819 DEVNAME(sc)); 1820 return (1); 1821 } 1822 1823 if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 1824 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n", 1825 DEVNAME(sc)); 1826 return (1); 1827 } 1828 1829 DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n", 1830 DEVNAME(sc), ifp.function, ifp.msg_length, 1831 ifp.msg_version_maj, ifp.msg_version_min); 1832 DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x " 1833 "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags, 1834 ifp.ioc_number, ifp.header_version_maj, 1835 ifp.header_version_min); 1836 DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc), 1837 letoh32(ifp.msg_context)); 1838 DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n", 1839 DEVNAME(sc), letoh16(ifp.ioc_status), 1840 letoh16(ifp.ioc_exceptions)); 1841 DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc), 1842 letoh32(ifp.ioc_loginfo)); 1843 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x " 1844 "maxchdepth: %d\n", DEVNAME(sc), ifp.flags, 1845 ifp.block_size, ifp.whoinit, ifp.max_chain_depth); 1846 DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n", 1847 DEVNAME(sc), letoh16(ifp.request_frame_size), 1848 letoh16(ifp.reply_queue_depth)); 1849 DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc), 1850 letoh16(ifp.product_id)); 1851 DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc), 1852 letoh32(ifp.current_host_mfa_hi_addr)); 1853 DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d " 1854 "global_credits: %d\n", 1855 DEVNAME(sc), ifp.event_state, ifp.number_of_ports, 1856 letoh16(ifp.global_credits)); 1857 DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc), 1858 letoh32(ifp.current_sense_buffer_hi_addr)); 1859 DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n", 1860 DEVNAME(sc), ifp.max_buses, ifp.max_devices, 1861 letoh16(ifp.current_reply_frame_size)); 1862 DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc), 1863 letoh32(ifp.fw_image_size)); 1864 DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc), 1865 letoh32(ifp.ioc_capabilities)); 1866 DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x " 1867 "fw_version_dev: 0x%02x\n", DEVNAME(sc), 1868 ifp.fw_version_maj, ifp.fw_version_min, 1869 ifp.fw_version_unit, ifp.fw_version_dev); 1870 DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n", 1871 DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth)); 1872 DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x " 1873 "addr 0x%08x %08x\n", DEVNAME(sc), 1874 letoh32(ifp.host_page_buffer_sge.sg_hdr), 1875 letoh32(ifp.host_page_buffer_sge.sg_hi_addr), 1876 letoh32(ifp.host_page_buffer_sge.sg_lo_addr)); 1877 1878 sc->sc_maxcmds = letoh16(ifp.global_credits); 1879 sc->sc_maxchdepth = ifp.max_chain_depth; 1880 sc->sc_ioc_number = ifp.ioc_number; 1881 if (sc->sc_flags & MPI_F_SPI) 1882 sc->sc_buswidth = 16; 1883 else 1884 sc->sc_buswidth = 1885 (ifp.max_devices == 0) ? 256 : ifp.max_devices; 1886 if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) 1887 sc->sc_fw_len = letoh32(ifp.fw_image_size); 1888 1889 sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, letoh16(ifp.reply_queue_depth)); 1890 1891 /* 1892 * you can fit sg elements on the end of the io cmd if they fit in the 1893 * request frame size. 1894 */ 1895 sc->sc_first_sgl_len = ((letoh16(ifp.request_frame_size) * 4) - 1896 sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge); 1897 DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc), 1898 sc->sc_first_sgl_len); 1899 1900 sc->sc_chain_len = (letoh16(ifp.request_frame_size) * 4) / 1901 sizeof(struct mpi_sge); 1902 DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc), 1903 sc->sc_chain_len); 1904 1905 /* the sgl tailing the io cmd loses an entry to the chain element. */ 1906 sc->sc_max_sgl_len = MPI_MAX_SGL - 1; 1907 /* the sgl chains lose an entry for each chain element */ 1908 sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) / 1909 sc->sc_chain_len; 1910 DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc), 1911 sc->sc_max_sgl_len); 1912 1913 /* XXX we're ignoring the max chain depth */ 1914 1915 return (0); 1916 } 1917 1918 int 1919 mpi_iocinit(struct mpi_softc *sc) 1920 { 1921 struct mpi_msg_iocinit_request iiq; 1922 struct mpi_msg_iocinit_reply iip; 1923 u_int32_t hi_addr; 1924 1925 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc)); 1926 1927 bzero(&iiq, sizeof(iiq)); 1928 bzero(&iip, sizeof(iip)); 1929 1930 iiq.function = MPI_FUNCTION_IOC_INIT; 1931 iiq.whoinit = MPI_WHOINIT_HOST_DRIVER; 1932 1933 iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth; 1934 iiq.max_buses = 1; 1935 1936 iiq.msg_context = htole32(0xd00fd00f); 1937 1938 iiq.reply_frame_size = htole16(MPI_REPLY_SIZE); 1939 1940 hi_addr = (u_int32_t)((u_int64_t)MPI_DMA_DVA(sc->sc_requests) >> 32); 1941 iiq.host_mfa_hi_addr = htole32(hi_addr); 1942 iiq.sense_buffer_hi_addr = htole32(hi_addr); 1943 1944 iiq.msg_version_maj = 0x01; 1945 iiq.msg_version_min = 0x02; 1946 1947 iiq.hdr_version_unit = 0x0d; 1948 iiq.hdr_version_dev = 0x00; 1949 1950 if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) { 1951 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n", 1952 DEVNAME(sc)); 1953 return (1); 1954 } 1955 1956 if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) { 1957 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n", 1958 DEVNAME(sc)); 1959 return (1); 1960 } 1961 1962 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d " 1963 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function, 1964 iip.msg_length, iip.whoinit); 1965 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d " 1966 "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags, 1967 iip.max_buses, iip.max_devices, iip.flags); 1968 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 1969 letoh32(iip.msg_context)); 1970 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 1971 letoh16(iip.ioc_status)); 1972 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1973 letoh32(iip.ioc_loginfo)); 1974 1975 return (0); 1976 } 1977 1978 int 1979 mpi_portfacts(struct mpi_softc *sc) 1980 { 1981 struct mpi_ccb *ccb; 1982 struct mpi_msg_portfacts_request *pfq; 1983 volatile struct mpi_msg_portfacts_reply *pfp; 1984 int s, rv = 1; 1985 1986 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc)); 1987 1988 s = splbio(); 1989 ccb = mpi_get_ccb(sc); 1990 splx(s); 1991 if (ccb == NULL) { 1992 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n", 1993 DEVNAME(sc)); 1994 return (rv); 1995 } 1996 1997 ccb->ccb_done = mpi_empty_done; 1998 pfq = ccb->ccb_cmd; 1999 2000 pfq->function = MPI_FUNCTION_PORT_FACTS; 2001 pfq->chain_offset = 0; 2002 pfq->msg_flags = 0; 2003 pfq->port_number = 0; 2004 pfq->msg_context = htole32(ccb->ccb_id); 2005 2006 if (mpi_poll(sc, ccb, 50000) != 0) { 2007 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc)); 2008 goto err; 2009 } 2010 2011 if (ccb->ccb_rcb == NULL) { 2012 DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n", 2013 DEVNAME(sc)); 2014 goto err; 2015 } 2016 pfp = ccb->ccb_rcb->rcb_reply; 2017 2018 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n", 2019 DEVNAME(sc), pfp->function, pfp->msg_length); 2020 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n", 2021 DEVNAME(sc), pfp->msg_flags, pfp->port_number); 2022 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2023 letoh32(pfp->msg_context)); 2024 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2025 letoh16(pfp->ioc_status)); 2026 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2027 letoh32(pfp->ioc_loginfo)); 2028 DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n", 2029 DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type); 2030 DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n", 2031 DEVNAME(sc), letoh16(pfp->protocol_flags), 2032 letoh16(pfp->port_scsi_id)); 2033 DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d " 2034 "max_posted_cmd_buffers: %d\n", DEVNAME(sc), 2035 letoh16(pfp->max_persistent_ids), 2036 letoh16(pfp->max_posted_cmd_buffers)); 2037 DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc), 2038 letoh16(pfp->max_lan_buckets)); 2039 2040 sc->sc_porttype = pfp->port_type; 2041 if (sc->sc_target == -1) 2042 sc->sc_target = letoh16(pfp->port_scsi_id); 2043 2044 mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva); 2045 rv = 0; 2046 err: 2047 mpi_put_ccb(sc, ccb); 2048 2049 return (rv); 2050 } 2051 2052 int 2053 mpi_cfg_coalescing(struct mpi_softc *sc) 2054 { 2055 struct mpi_cfg_hdr hdr; 2056 struct mpi_cfg_ioc_pg1 pg; 2057 u_int32_t flags; 2058 2059 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) { 2060 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n", 2061 DEVNAME(sc)); 2062 return (1); 2063 } 2064 2065 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) { 2066 DNPRINTF(MPI_D_MISC, "%s: mpi_get_raid unable to fetch IOC " 2067 "page 1\n", DEVNAME(sc)); 2068 return (1); 2069 } 2070 2071 DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc)); 2072 DNPRINTF(MPI_D_MISC, "%s: flags: 0x08%x\n", DEVNAME(sc), 2073 letoh32(pg.flags)); 2074 DNPRINTF(MPI_D_MISC, "%s: coalescing_timeout: %d\n", DEVNAME(sc), 2075 letoh32(pg.coalescing_timeout)); 2076 DNPRINTF(MPI_D_MISC, "%s: coalescing_depth: %d pci_slot_num: %d\n", 2077 DEVNAME(sc), pg.coalescing_timeout, pg.pci_slot_num); 2078 2079 flags = letoh32(pg.flags); 2080 if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING)) 2081 return (0); 2082 2083 CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING)); 2084 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) { 2085 DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n", 2086 DEVNAME(sc)); 2087 return (1); 2088 } 2089 2090 return (0); 2091 } 2092 2093 int 2094 mpi_eventnotify(struct mpi_softc *sc) 2095 { 2096 struct mpi_ccb *ccb; 2097 struct mpi_msg_event_request *enq; 2098 int s; 2099 2100 s = splbio(); 2101 ccb = mpi_get_ccb(sc); 2102 splx(s); 2103 if (ccb == NULL) { 2104 DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n", 2105 DEVNAME(sc)); 2106 return (1); 2107 } 2108 2109 ccb->ccb_done = mpi_eventnotify_done; 2110 enq = ccb->ccb_cmd; 2111 2112 enq->function = MPI_FUNCTION_EVENT_NOTIFICATION; 2113 enq->chain_offset = 0; 2114 enq->event_switch = MPI_EVENT_SWITCH_ON; 2115 enq->msg_context = htole32(ccb->ccb_id); 2116 2117 mpi_start(sc, ccb); 2118 return (0); 2119 } 2120 2121 void 2122 mpi_eventnotify_done(struct mpi_ccb *ccb) 2123 { 2124 struct mpi_softc *sc = ccb->ccb_sc; 2125 struct mpi_msg_event_reply *enp = ccb->ccb_rcb->rcb_reply; 2126 2127 DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc)); 2128 2129 DNPRINTF(MPI_D_EVT, "%s: function: 0x%02x msg_length: %d " 2130 "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length, 2131 letoh16(enp->data_length)); 2132 DNPRINTF(MPI_D_EVT, "%s: ack_required: %d msg_flags 0x%02x\n", 2133 DEVNAME(sc), enp->ack_required, enp->msg_flags); 2134 DNPRINTF(MPI_D_EVT, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2135 letoh32(enp->msg_context)); 2136 DNPRINTF(MPI_D_EVT, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2137 letoh16(enp->ioc_status)); 2138 DNPRINTF(MPI_D_EVT, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2139 letoh32(enp->ioc_loginfo)); 2140 DNPRINTF(MPI_D_EVT, "%s: event: 0x%08x\n", DEVNAME(sc), 2141 letoh32(enp->event)); 2142 DNPRINTF(MPI_D_EVT, "%s: event_context: 0x%08x\n", DEVNAME(sc), 2143 letoh32(enp->event_context)); 2144 2145 switch (letoh32(enp->event)) { 2146 /* ignore these */ 2147 case MPI_EVENT_EVENT_CHANGE: 2148 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2149 break; 2150 2151 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2152 if (sc->sc_scsibus == NULL) 2153 break; 2154 2155 mpi_evt_sas(sc, ccb->ccb_rcb); 2156 break; 2157 2158 default: 2159 DNPRINTF(MPI_D_EVT, "%s: unhandled event 0x%02x\n", 2160 DEVNAME(sc), letoh32(enp->event)); 2161 break; 2162 } 2163 2164 if (enp->ack_required) 2165 mpi_eventack(sc, enp); 2166 mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva); 2167 2168 #if 0 2169 /* fc hbas have a bad habit of setting this without meaning it. */ 2170 if ((enp->msg_flags & MPI_EVENT_FLAGS_REPLY_KEPT) == 0) { 2171 mpi_put_ccb(sc, ccb); 2172 } 2173 #endif 2174 } 2175 2176 void 2177 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb) 2178 { 2179 #if 0 2180 struct mpi_evt_sas_change *ch; 2181 u_int8_t *data; 2182 2183 data = rcb->rcb_reply; 2184 data += sizeof(struct mpi_msg_event_reply); 2185 ch = (struct mpi_evt_sas_change *)data; 2186 2187 if (ch->bus != 0) 2188 return; 2189 2190 switch (ch->reason) { 2191 case MPI_EVT_SASCH_REASON_ADDED: 2192 case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED: 2193 if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) { 2194 printf("%s: unable to request attach of %d\n", 2195 DEVNAME(sc), ch->target); 2196 } 2197 break; 2198 2199 case MPI_EVT_SASCH_REASON_NOT_RESPONDING: 2200 scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE); 2201 if (scsi_req_detach(sc->sc_scsibus, ch->target, -1, 2202 DETACH_FORCE) != 0) { 2203 printf("%s: unable to request detach of %d\n", 2204 DEVNAME(sc), ch->target); 2205 } 2206 break; 2207 2208 case MPI_EVT_SASCH_REASON_SMART_DATA: 2209 case MPI_EVT_SASCH_REASON_UNSUPPORTED: 2210 case MPI_EVT_SASCH_REASON_INTERNAL_RESET: 2211 break; 2212 default: 2213 printf("%s: unknown reason for SAS device status change: " 2214 "0x%02x\n", DEVNAME(sc), ch->reason); 2215 break; 2216 } 2217 #endif 2218 } 2219 2220 void 2221 mpi_eventack(struct mpi_softc *sc, struct mpi_msg_event_reply *enp) 2222 { 2223 struct mpi_ccb *ccb; 2224 struct mpi_msg_eventack_request *eaq; 2225 2226 ccb = mpi_get_ccb(sc); 2227 if (ccb == NULL) { 2228 DNPRINTF(MPI_D_EVT, "%s: mpi_eventack ccb_get\n", DEVNAME(sc)); 2229 return; 2230 } 2231 2232 ccb->ccb_done = mpi_eventack_done; 2233 eaq = ccb->ccb_cmd; 2234 2235 eaq->function = MPI_FUNCTION_EVENT_ACK; 2236 eaq->msg_context = htole32(ccb->ccb_id); 2237 2238 eaq->event = enp->event; 2239 eaq->event_context = enp->event_context; 2240 2241 mpi_start(sc, ccb); 2242 return; 2243 } 2244 2245 void 2246 mpi_eventack_done(struct mpi_ccb *ccb) 2247 { 2248 struct mpi_softc *sc = ccb->ccb_sc; 2249 2250 DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc)); 2251 2252 mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva); 2253 mpi_put_ccb(sc, ccb); 2254 } 2255 2256 int 2257 mpi_portenable(struct mpi_softc *sc) 2258 { 2259 struct mpi_ccb *ccb; 2260 struct mpi_msg_portenable_request *peq; 2261 struct mpi_msg_portenable_repy *pep; 2262 int s; 2263 2264 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc)); 2265 2266 s = splbio(); 2267 ccb = mpi_get_ccb(sc); 2268 splx(s); 2269 if (ccb == NULL) { 2270 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n", 2271 DEVNAME(sc)); 2272 return (1); 2273 } 2274 2275 ccb->ccb_done = mpi_empty_done; 2276 peq = ccb->ccb_cmd; 2277 2278 peq->function = MPI_FUNCTION_PORT_ENABLE; 2279 peq->port_number = 0; 2280 peq->msg_context = htole32(ccb->ccb_id); 2281 2282 if (mpi_poll(sc, ccb, 50000) != 0) { 2283 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc)); 2284 return (1); 2285 } 2286 2287 if (ccb->ccb_rcb == NULL) { 2288 DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n", 2289 DEVNAME(sc)); 2290 return (1); 2291 } 2292 pep = ccb->ccb_rcb->rcb_reply; 2293 2294 mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva); 2295 mpi_put_ccb(sc, ccb); 2296 2297 return (0); 2298 } 2299 2300 int 2301 mpi_fwupload(struct mpi_softc *sc) 2302 { 2303 struct mpi_ccb *ccb; 2304 struct { 2305 struct mpi_msg_fwupload_request req; 2306 struct mpi_sge sge; 2307 } __packed *bundle; 2308 struct mpi_msg_fwupload_reply *upp; 2309 u_int64_t addr; 2310 int s; 2311 int rv = 0; 2312 2313 if (sc->sc_fw_len == 0) 2314 return (0); 2315 2316 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc)); 2317 2318 sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len); 2319 if (sc->sc_fw == NULL) { 2320 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n", 2321 DEVNAME(sc), sc->sc_fw_len); 2322 return (1); 2323 } 2324 2325 s = splbio(); 2326 ccb = mpi_get_ccb(sc); 2327 splx(s); 2328 if (ccb == NULL) { 2329 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n", 2330 DEVNAME(sc)); 2331 goto err; 2332 } 2333 2334 ccb->ccb_done = mpi_empty_done; 2335 bundle = ccb->ccb_cmd; 2336 2337 bundle->req.function = MPI_FUNCTION_FW_UPLOAD; 2338 bundle->req.msg_context = htole32(ccb->ccb_id); 2339 2340 bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW; 2341 2342 bundle->req.tce.details_length = 12; 2343 bundle->req.tce.image_size = htole32(sc->sc_fw_len); 2344 2345 bundle->sge.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2346 MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 2347 MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len); 2348 addr = MPI_DMA_DVA(sc->sc_fw); 2349 bundle->sge.sg_hi_addr = htole32((u_int32_t)(addr >> 32)); 2350 bundle->sge.sg_lo_addr = htole32((u_int32_t)addr); 2351 2352 if (mpi_poll(sc, ccb, 50000) != 0) { 2353 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc)); 2354 goto err; 2355 } 2356 2357 if (ccb->ccb_rcb == NULL) 2358 panic("%s: unable to do fw upload\n", DEVNAME(sc)); 2359 upp = ccb->ccb_rcb->rcb_reply; 2360 2361 if (letoh16(upp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2362 rv = 1; 2363 2364 mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva); 2365 mpi_put_ccb(sc, ccb); 2366 2367 return (rv); 2368 2369 err: 2370 mpi_dmamem_free(sc, sc->sc_fw); 2371 return (1); 2372 } 2373 2374 void 2375 mpi_get_raid(struct mpi_softc *sc) 2376 { 2377 struct mpi_cfg_hdr hdr; 2378 struct mpi_cfg_ioc_pg2 *vol_page; 2379 struct mpi_cfg_raid_vol *vol_list, *vol; 2380 size_t pagelen; 2381 u_int32_t capabilities; 2382 struct scsi_link *link; 2383 int i; 2384 2385 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc)); 2386 2387 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) { 2388 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header" 2389 "for IOC page 2\n", DEVNAME(sc)); 2390 return; 2391 } 2392 2393 pagelen = hdr.page_length * 4; /* dwords to bytes */ 2394 vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 2395 if (vol_page == NULL) { 2396 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate " 2397 "space for ioc config page 2\n", DEVNAME(sc)); 2398 return; 2399 } 2400 vol_list = (struct mpi_cfg_raid_vol *)(vol_page + 1); 2401 2402 if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) { 2403 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC " 2404 "page 2\n", DEVNAME(sc)); 2405 goto out; 2406 } 2407 2408 capabilities = letoh32(vol_page->capabilities); 2409 2410 DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc), 2411 letoh32(vol_page->capabilities)); 2412 DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d " 2413 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 2414 vol_page->active_vols, vol_page->max_vols, 2415 vol_page->active_physdisks, vol_page->max_physdisks); 2416 2417 /* don't walk list if there are no RAID capability */ 2418 if (capabilities == 0xdeadbeef) { 2419 printf("%s: deadbeef in raid configuration\n", DEVNAME(sc)); 2420 goto out; 2421 } 2422 2423 if ((capabilities & MPI_CFG_IOC_2_CAPABILITIES_RAID) == 0 || 2424 (vol_page->active_vols == 0)) 2425 goto out; 2426 2427 sc->sc_flags |= MPI_F_RAID; 2428 2429 for (i = 0; i < vol_page->active_vols; i++) { 2430 vol = &vol_list[i]; 2431 2432 DNPRINTF(MPI_D_RAID, "%s: id: %d bus: %d ioc: %d pg: %d\n", 2433 DEVNAME(sc), vol->vol_id, vol->vol_bus, vol->vol_ioc, 2434 vol->vol_page); 2435 DNPRINTF(MPI_D_RAID, "%s: type: 0x%02x flags: 0x%02x\n", 2436 DEVNAME(sc), vol->vol_type, vol->flags); 2437 2438 if (vol->vol_ioc != sc->sc_ioc_number || vol->vol_bus != 0) 2439 continue; 2440 2441 link = sc->sc_scsibus->sc_link[vol->vol_id][0]; 2442 if (link == NULL) 2443 continue; 2444 2445 link->flags |= SDEV_VIRTUAL; 2446 } 2447 2448 out: 2449 free(vol_page, M_TEMP); 2450 } 2451 2452 int 2453 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number, 2454 u_int32_t address, int flags, void *p) 2455 { 2456 struct mpi_ccb *ccb; 2457 struct mpi_msg_config_request *cq; 2458 struct mpi_msg_config_reply *cp; 2459 struct mpi_cfg_hdr *hdr = p; 2460 struct mpi_ecfg_hdr *ehdr = p; 2461 int etype = 0; 2462 int rv = 0; 2463 int s; 2464 2465 DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x " 2466 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number, 2467 address, flags, MPI_PG_FMT); 2468 2469 s = splbio(); 2470 ccb = mpi_get_ccb(sc); 2471 splx(s); 2472 if (ccb == NULL) { 2473 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n", 2474 DEVNAME(sc)); 2475 return (1); 2476 } 2477 2478 if (ISSET(flags, MPI_PG_EXTENDED)) { 2479 etype = type; 2480 type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED; 2481 } 2482 2483 cq = ccb->ccb_cmd; 2484 2485 cq->function = MPI_FUNCTION_CONFIG; 2486 cq->msg_context = htole32(ccb->ccb_id); 2487 2488 cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER; 2489 2490 cq->config_header.page_number = number; 2491 cq->config_header.page_type = type; 2492 cq->ext_page_type = etype; 2493 cq->page_address = htole32(address); 2494 cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2495 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 2496 2497 if (ISSET(flags, MPI_PG_POLL)) { 2498 ccb->ccb_done = mpi_empty_done; 2499 if (mpi_poll(sc, ccb, 50000) != 0) { 2500 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2501 DEVNAME(sc)); 2502 return (1); 2503 } 2504 } else { 2505 ccb->ccb_done = (void (*)(struct mpi_ccb *))wakeup; 2506 s = splbio(); 2507 mpi_start(sc, ccb); 2508 while (ccb->ccb_state != MPI_CCB_READY) 2509 tsleep(ccb, PRIBIO, "mpipghdr", 0); 2510 splx(s); 2511 } 2512 2513 if (ccb->ccb_rcb == NULL) 2514 panic("%s: unable to fetch config header\n", DEVNAME(sc)); 2515 cp = ccb->ccb_rcb->rcb_reply; 2516 2517 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2518 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2519 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2520 "msg_flags: 0x%02x\n", DEVNAME(sc), 2521 letoh16(cp->ext_page_length), cp->ext_page_type, 2522 cp->msg_flags); 2523 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2524 letoh32(cp->msg_context)); 2525 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2526 letoh16(cp->ioc_status)); 2527 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2528 letoh32(cp->ioc_loginfo)); 2529 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2530 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2531 cp->config_header.page_version, 2532 cp->config_header.page_length, 2533 cp->config_header.page_number, 2534 cp->config_header.page_type); 2535 2536 if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2537 rv = 1; 2538 else if (ISSET(flags, MPI_PG_EXTENDED)) { 2539 bzero(ehdr, sizeof(*ehdr)); 2540 ehdr->page_version = cp->config_header.page_version; 2541 ehdr->page_number = cp->config_header.page_number; 2542 ehdr->page_type = cp->config_header.page_type; 2543 ehdr->ext_page_length = cp->ext_page_length; 2544 ehdr->ext_page_type = cp->ext_page_type; 2545 } else 2546 *hdr = cp->config_header; 2547 2548 mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva); 2549 mpi_put_ccb(sc, ccb); 2550 2551 return (rv); 2552 } 2553 2554 int 2555 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags, 2556 void *p, int read, void *page, size_t len) 2557 { 2558 struct mpi_ccb *ccb; 2559 struct mpi_msg_config_request *cq; 2560 struct mpi_msg_config_reply *cp; 2561 struct mpi_cfg_hdr *hdr = p; 2562 struct mpi_ecfg_hdr *ehdr = p; 2563 u_int64_t dva; 2564 char *kva; 2565 int page_length; 2566 int rv = 0; 2567 int s; 2568 2569 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n", 2570 DEVNAME(sc), address, read, hdr->page_type); 2571 2572 page_length = ISSET(flags, MPI_PG_EXTENDED) ? 2573 letoh16(ehdr->ext_page_length) : hdr->page_length; 2574 2575 if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) || 2576 len < page_length * 4) 2577 return (1); 2578 2579 s = splbio(); 2580 ccb = mpi_get_ccb(sc); 2581 splx(s); 2582 if (ccb == NULL) { 2583 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc)); 2584 return (1); 2585 } 2586 2587 cq = ccb->ccb_cmd; 2588 2589 cq->function = MPI_FUNCTION_CONFIG; 2590 cq->msg_context = htole32(ccb->ccb_id); 2591 2592 cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT : 2593 MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT); 2594 2595 if (ISSET(flags, MPI_PG_EXTENDED)) { 2596 cq->config_header.page_version = ehdr->page_version; 2597 cq->config_header.page_number = ehdr->page_number; 2598 cq->config_header.page_type = ehdr->page_type; 2599 cq->ext_page_len = ehdr->ext_page_length; 2600 cq->ext_page_type = ehdr->ext_page_type; 2601 } else 2602 cq->config_header = *hdr; 2603 cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK; 2604 cq->page_address = htole32(address); 2605 cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2606 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 2607 (page_length * 4) | 2608 (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT)); 2609 2610 /* bounce the page via the request space to avoid more bus_dma games */ 2611 dva = ccb->ccb_cmd_dva + sizeof(struct mpi_msg_config_request); 2612 2613 cq->page_buffer.sg_hi_addr = htole32((u_int32_t)(dva >> 32)); 2614 cq->page_buffer.sg_lo_addr = htole32((u_int32_t)dva); 2615 2616 kva = ccb->ccb_cmd; 2617 kva += sizeof(struct mpi_msg_config_request); 2618 if (!read) 2619 bcopy(page, kva, len); 2620 2621 if (ISSET(flags, MPI_PG_POLL)) { 2622 ccb->ccb_done = mpi_empty_done; 2623 if (mpi_poll(sc, ccb, 50000) != 0) { 2624 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2625 DEVNAME(sc)); 2626 return (1); 2627 } 2628 } else { 2629 ccb->ccb_done = (void (*)(struct mpi_ccb *))wakeup; 2630 s = splbio(); 2631 mpi_start(sc, ccb); 2632 while (ccb->ccb_state != MPI_CCB_READY) 2633 tsleep(ccb, PRIBIO, "mpipghdr", 0); 2634 splx(s); 2635 } 2636 2637 if (ccb->ccb_rcb == NULL) { 2638 mpi_put_ccb(sc, ccb); 2639 return (1); 2640 } 2641 cp = ccb->ccb_rcb->rcb_reply; 2642 2643 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2644 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2645 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2646 "msg_flags: 0x%02x\n", DEVNAME(sc), 2647 letoh16(cp->ext_page_length), cp->ext_page_type, 2648 cp->msg_flags); 2649 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2650 letoh32(cp->msg_context)); 2651 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2652 letoh16(cp->ioc_status)); 2653 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2654 letoh32(cp->ioc_loginfo)); 2655 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2656 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2657 cp->config_header.page_version, 2658 cp->config_header.page_length, 2659 cp->config_header.page_number, 2660 cp->config_header.page_type); 2661 2662 if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2663 rv = 1; 2664 else if (read) 2665 bcopy(kva, page, len); 2666 2667 mpi_push_reply(sc, ccb->ccb_rcb->rcb_reply_dva); 2668 mpi_put_ccb(sc, ccb); 2669 2670 return (rv); 2671 } 2672 2673 int 2674 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag, 2675 struct proc *p) 2676 { 2677 struct mpi_softc *sc = (struct mpi_softc *)link->adapter_softc; 2678 2679 DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc)); 2680 2681 if (sc->sc_ioctl) 2682 return (sc->sc_ioctl(link->adapter_softc, cmd, addr)); 2683 else 2684 return (ENOTTY); 2685 } 2686 2687 #if NBIO > 0 2688 int 2689 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id) 2690 { 2691 int len, rv = EINVAL; 2692 u_int32_t address; 2693 struct mpi_cfg_hdr hdr; 2694 struct mpi_cfg_raid_vol_pg0 *rpg0; 2695 2696 /* get IOC page 2 */ 2697 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 2698 sc->sc_cfg_hdr.page_length * 4) != 0) { 2699 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to " 2700 "fetch IOC page 2\n", DEVNAME(sc)); 2701 goto done; 2702 } 2703 2704 /* XXX return something else than EINVAL to indicate within hs range */ 2705 if (id > sc->sc_vol_page->active_vols) { 2706 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol " 2707 "id: %d\n", DEVNAME(sc), id); 2708 goto done; 2709 } 2710 2711 /* replace current buffer with new one */ 2712 len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks * 2713 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk); 2714 rpg0 = malloc(len, M_TEMP, M_WAITOK | M_CANFAIL); 2715 if (rpg0 == NULL) { 2716 printf("%s: can't get memory for RAID page 0, " 2717 "bio disabled\n", DEVNAME(sc)); 2718 goto done; 2719 } 2720 if (sc->sc_rpg0) 2721 free(sc->sc_rpg0, M_DEVBUF); 2722 sc->sc_rpg0 = rpg0; 2723 2724 /* get raid vol page 0 */ 2725 address = sc->sc_vol_list[id].vol_id | 2726 (sc->sc_vol_list[id].vol_bus << 8); 2727 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 2728 address, &hdr) != 0) 2729 goto done; 2730 if (mpi_cfg_page(sc, address, &hdr, 1, rpg0, len)) { 2731 printf("%s: can't get RAID vol cfg page 0\n", DEVNAME(sc)); 2732 goto done; 2733 } 2734 2735 rv = 0; 2736 done: 2737 return (rv); 2738 } 2739 2740 int 2741 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr) 2742 { 2743 struct mpi_softc *sc = (struct mpi_softc *)dev; 2744 int error = 0; 2745 2746 DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc)); 2747 2748 /* make sure we have bio enabled */ 2749 if (sc->sc_ioctl != mpi_ioctl) 2750 return (EINVAL); 2751 2752 rw_enter_write(&sc->sc_lock); 2753 2754 switch (cmd) { 2755 case BIOCINQ: 2756 DNPRINTF(MPI_D_IOCTL, "inq\n"); 2757 error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr); 2758 break; 2759 2760 case BIOCVOL: 2761 DNPRINTF(MPI_D_IOCTL, "vol\n"); 2762 error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr); 2763 break; 2764 2765 case BIOCDISK: 2766 DNPRINTF(MPI_D_IOCTL, "disk\n"); 2767 error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr); 2768 break; 2769 2770 case BIOCALARM: 2771 DNPRINTF(MPI_D_IOCTL, "alarm\n"); 2772 break; 2773 2774 case BIOCBLINK: 2775 DNPRINTF(MPI_D_IOCTL, "blink\n"); 2776 break; 2777 2778 case BIOCSETSTATE: 2779 DNPRINTF(MPI_D_IOCTL, "setstate\n"); 2780 error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr); 2781 break; 2782 2783 default: 2784 DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n"); 2785 error = EINVAL; 2786 } 2787 2788 rw_exit_write(&sc->sc_lock); 2789 2790 return (error); 2791 } 2792 2793 int 2794 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi) 2795 { 2796 if (!(sc->sc_flags & MPI_F_RAID)) { 2797 bi->bi_novol = 0; 2798 bi->bi_nodisk = 0; 2799 } 2800 2801 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 2802 sc->sc_cfg_hdr.page_length * 4) != 0) { 2803 DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC " 2804 "page 2\n", DEVNAME(sc)); 2805 return (EINVAL); 2806 } 2807 2808 DNPRINTF(MPI_D_IOCTL, "%s: active_vols: %d max_vols: %d " 2809 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 2810 sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols, 2811 sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks); 2812 2813 bi->bi_novol = sc->sc_vol_page->active_vols; 2814 bi->bi_nodisk = sc->sc_vol_page->active_physdisks; 2815 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 2816 2817 return (0); 2818 } 2819 2820 int 2821 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv) 2822 { 2823 int i, vol, id, rv = EINVAL; 2824 struct device *dev; 2825 struct scsi_link *link; 2826 struct mpi_cfg_raid_vol_pg0 *rpg0; 2827 2828 id = bv->bv_volid; 2829 if (mpi_bio_get_pg0_raid(sc, id)) 2830 goto done; 2831 2832 if (id > sc->sc_vol_page->active_vols) 2833 return (EINVAL); /* XXX deal with hot spares */ 2834 2835 rpg0 = sc->sc_rpg0; 2836 if (rpg0 == NULL) 2837 goto done; 2838 2839 /* determine status */ 2840 switch (rpg0->volume_state) { 2841 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 2842 bv->bv_status = BIOC_SVONLINE; 2843 break; 2844 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 2845 bv->bv_status = BIOC_SVDEGRADED; 2846 break; 2847 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 2848 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 2849 bv->bv_status = BIOC_SVOFFLINE; 2850 break; 2851 default: 2852 bv->bv_status = BIOC_SVINVALID; 2853 } 2854 2855 /* override status if scrubbing or something */ 2856 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) 2857 bv->bv_status = BIOC_SVREBUILD; 2858 2859 bv->bv_size = (u_quad_t)letoh32(rpg0->max_lba) * 512; 2860 2861 switch (sc->sc_vol_list[id].vol_type) { 2862 case MPI_CFG_RAID_TYPE_RAID_IS: 2863 bv->bv_level = 0; 2864 break; 2865 case MPI_CFG_RAID_TYPE_RAID_IME: 2866 case MPI_CFG_RAID_TYPE_RAID_IM: 2867 bv->bv_level = 1; 2868 break; 2869 case MPI_CFG_RAID_TYPE_RAID_5: 2870 bv->bv_level = 5; 2871 break; 2872 case MPI_CFG_RAID_TYPE_RAID_6: 2873 bv->bv_level = 6; 2874 break; 2875 case MPI_CFG_RAID_TYPE_RAID_10: 2876 bv->bv_level = 10; 2877 break; 2878 case MPI_CFG_RAID_TYPE_RAID_50: 2879 bv->bv_level = 50; 2880 break; 2881 default: 2882 bv->bv_level = -1; 2883 } 2884 2885 bv->bv_nodisk = rpg0->num_phys_disks; 2886 2887 for (i = 0, vol = -1; i < sc->sc_buswidth; i++) { 2888 link = sc->sc_scsibus->sc_link[i][0]; 2889 if (link == NULL) 2890 continue; 2891 2892 /* skip if not a virtual disk */ 2893 if (!(link->flags & SDEV_VIRTUAL)) 2894 continue; 2895 2896 vol++; 2897 /* are we it? */ 2898 if (vol == bv->bv_volid) { 2899 dev = link->device_softc; 2900 memcpy(bv->bv_vendor, link->inqdata.vendor, 2901 sizeof bv->bv_vendor); 2902 bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0'; 2903 strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev); 2904 break; 2905 } 2906 } 2907 rv = 0; 2908 done: 2909 return (rv); 2910 } 2911 2912 int 2913 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd) 2914 { 2915 int pdid, id, rv = EINVAL; 2916 u_int32_t address; 2917 struct mpi_cfg_hdr hdr; 2918 struct mpi_cfg_raid_vol_pg0 *rpg0; 2919 struct mpi_cfg_raid_vol_pg0_physdisk *physdisk; 2920 struct mpi_cfg_raid_physdisk_pg0 pdpg0; 2921 2922 id = bd->bd_volid; 2923 if (mpi_bio_get_pg0_raid(sc, id)) 2924 goto done; 2925 2926 if (id > sc->sc_vol_page->active_vols) 2927 return (EINVAL); /* XXX deal with hot spares */ 2928 2929 rpg0 = sc->sc_rpg0; 2930 if (rpg0 == NULL) 2931 goto done; 2932 2933 pdid = bd->bd_diskid; 2934 if (pdid > rpg0->num_phys_disks) 2935 goto done; 2936 physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1); 2937 physdisk += pdid; 2938 2939 /* get raid phys disk page 0 */ 2940 address = physdisk->phys_disk_num; 2941 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address, 2942 &hdr) != 0) 2943 goto done; 2944 if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) { 2945 bd->bd_status = BIOC_SDFAILED; 2946 return (0); 2947 } 2948 bd->bd_channel = pdpg0.phys_disk_bus; 2949 bd->bd_target = pdpg0.phys_disk_id; 2950 bd->bd_lun = 0; 2951 bd->bd_size = (u_quad_t)pdpg0.max_lba * 512; 2952 strlcpy(bd->bd_vendor, pdpg0.vendor_id, sizeof(bd->bd_vendor)); 2953 2954 switch (pdpg0.phys_disk_state) { 2955 case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE: 2956 bd->bd_status = BIOC_SDONLINE; 2957 break; 2958 case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING: 2959 case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED: 2960 bd->bd_status = BIOC_SDFAILED; 2961 break; 2962 case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL: 2963 case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER: 2964 case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE: 2965 bd->bd_status = BIOC_SDOFFLINE; 2966 break; 2967 case MPI_CFG_RAID_PHYDISK_0_STATE_INIT: 2968 bd->bd_status = BIOC_SDSCRUB; 2969 break; 2970 case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT: 2971 default: 2972 bd->bd_status = BIOC_SDINVALID; 2973 break; 2974 } 2975 2976 /* XXX figure this out */ 2977 /* bd_serial[32]; */ 2978 /* bd_procdev[16]; */ 2979 2980 rv = 0; 2981 done: 2982 return (rv); 2983 } 2984 2985 int 2986 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs) 2987 { 2988 return (ENOTTY); 2989 } 2990 2991 #ifndef SMALL_KERNEL 2992 int 2993 mpi_create_sensors(struct mpi_softc *sc) 2994 { 2995 struct device *dev; 2996 struct scsi_link *link; 2997 int i, vol; 2998 2999 /* count volumes */ 3000 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3001 link = sc->sc_scsibus->sc_link[i][0]; 3002 if (link == NULL) 3003 continue; 3004 /* skip if not a virtual disk */ 3005 if (!(link->flags & SDEV_VIRTUAL)) 3006 continue; 3007 3008 vol++; 3009 } 3010 3011 sc->sc_sensors = malloc(sizeof(struct ksensor) * vol, 3012 M_DEVBUF, M_WAITOK|M_ZERO); 3013 if (sc->sc_sensors == NULL) 3014 return (1); 3015 3016 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), 3017 sizeof(sc->sc_sensordev.xname)); 3018 3019 for (i = 0, vol= 0; i < sc->sc_buswidth; i++) { 3020 link = sc->sc_scsibus->sc_link[i][0]; 3021 if (link == NULL) 3022 continue; 3023 /* skip if not a virtual disk */ 3024 if (!(link->flags & SDEV_VIRTUAL)) 3025 continue; 3026 3027 dev = link->device_softc; 3028 strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname, 3029 sizeof(sc->sc_sensors[vol].desc)); 3030 sc->sc_sensors[vol].type = SENSOR_DRIVE; 3031 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3032 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]); 3033 3034 vol++; 3035 } 3036 3037 if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL) 3038 goto bad; 3039 3040 sensordev_install(&sc->sc_sensordev); 3041 3042 return (0); 3043 3044 bad: 3045 free(sc->sc_sensors, M_DEVBUF); 3046 return (1); 3047 } 3048 3049 void 3050 mpi_refresh_sensors(void *arg) 3051 { 3052 int i, vol; 3053 struct scsi_link *link; 3054 struct mpi_softc *sc = arg; 3055 struct mpi_cfg_raid_vol_pg0 *rpg0; 3056 3057 rw_enter_write(&sc->sc_lock); 3058 3059 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3060 link = sc->sc_scsibus->sc_link[i][0]; 3061 if (link == NULL) 3062 continue; 3063 /* skip if not a virtual disk */ 3064 if (!(link->flags & SDEV_VIRTUAL)) 3065 continue; 3066 3067 if (mpi_bio_get_pg0_raid(sc, vol)) 3068 continue; 3069 3070 rpg0 = sc->sc_rpg0; 3071 if (rpg0 == NULL) 3072 goto done; 3073 3074 /* determine status */ 3075 switch (rpg0->volume_state) { 3076 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 3077 sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE; 3078 sc->sc_sensors[vol].status = SENSOR_S_OK; 3079 break; 3080 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 3081 sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL; 3082 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3083 break; 3084 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 3085 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 3086 sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL; 3087 sc->sc_sensors[vol].status = SENSOR_S_CRIT; 3088 break; 3089 default: 3090 sc->sc_sensors[vol].value = 0; /* unknown */ 3091 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3092 } 3093 3094 /* override status if scrubbing or something */ 3095 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) { 3096 sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD; 3097 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3098 } 3099 3100 vol++; 3101 } 3102 done: 3103 rw_exit_write(&sc->sc_lock); 3104 } 3105 #endif /* SMALL_KERNEL */ 3106 #endif /* NBIO > 0 */ 3107