1 /* $OpenBSD: mpi.c,v 1.175 2012/01/16 10:55:46 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "bio.h" 21 22 #include <sys/param.h> 23 #include <sys/systm.h> 24 #include <sys/buf.h> 25 #include <sys/device.h> 26 #include <sys/ioctl.h> 27 #include <sys/proc.h> 28 #include <sys/malloc.h> 29 #include <sys/kernel.h> 30 #include <sys/mutex.h> 31 #include <sys/rwlock.h> 32 #include <sys/sensors.h> 33 #include <sys/dkio.h> 34 35 #include <machine/bus.h> 36 37 #include <scsi/scsi_all.h> 38 #include <scsi/scsiconf.h> 39 40 #include <dev/biovar.h> 41 #include <dev/ic/mpireg.h> 42 #include <dev/ic/mpivar.h> 43 44 #ifdef MPI_DEBUG 45 uint32_t mpi_debug = 0 46 /* | MPI_D_CMD */ 47 /* | MPI_D_INTR */ 48 /* | MPI_D_MISC */ 49 /* | MPI_D_DMA */ 50 /* | MPI_D_IOCTL */ 51 /* | MPI_D_RW */ 52 /* | MPI_D_MEM */ 53 /* | MPI_D_CCB */ 54 /* | MPI_D_PPR */ 55 /* | MPI_D_RAID */ 56 /* | MPI_D_EVT */ 57 ; 58 #endif 59 60 struct cfdriver mpi_cd = { 61 NULL, 62 "mpi", 63 DV_DULL 64 }; 65 66 void mpi_scsi_cmd(struct scsi_xfer *); 67 void mpi_scsi_cmd_done(struct mpi_ccb *); 68 void mpi_minphys(struct buf *bp, struct scsi_link *sl); 69 int mpi_scsi_probe(struct scsi_link *); 70 int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, 71 int); 72 73 struct scsi_adapter mpi_switch = { 74 mpi_scsi_cmd, 75 mpi_minphys, 76 mpi_scsi_probe, 77 NULL, 78 mpi_scsi_ioctl 79 }; 80 81 struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t); 82 void mpi_dmamem_free(struct mpi_softc *, 83 struct mpi_dmamem *); 84 int mpi_alloc_ccbs(struct mpi_softc *); 85 void *mpi_get_ccb(void *); 86 void mpi_put_ccb(void *, void *); 87 int mpi_alloc_replies(struct mpi_softc *); 88 void mpi_push_replies(struct mpi_softc *); 89 void mpi_push_reply(struct mpi_softc *, struct mpi_rcb *); 90 91 void mpi_start(struct mpi_softc *, struct mpi_ccb *); 92 int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int); 93 void mpi_poll_done(struct mpi_ccb *); 94 void mpi_reply(struct mpi_softc *, u_int32_t); 95 96 void mpi_wait(struct mpi_softc *sc, struct mpi_ccb *); 97 void mpi_wait_done(struct mpi_ccb *); 98 99 int mpi_cfg_spi_port(struct mpi_softc *); 100 void mpi_squash_ppr(struct mpi_softc *); 101 void mpi_run_ppr(struct mpi_softc *); 102 int mpi_ppr(struct mpi_softc *, struct scsi_link *, 103 struct mpi_cfg_raid_physdisk *, int, int, int); 104 int mpi_inq(struct mpi_softc *, u_int16_t, int); 105 106 int mpi_cfg_sas(struct mpi_softc *); 107 int mpi_cfg_fc(struct mpi_softc *); 108 109 void mpi_timeout_xs(void *); 110 int mpi_load_xs(struct mpi_ccb *); 111 112 u_int32_t mpi_read(struct mpi_softc *, bus_size_t); 113 void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t); 114 int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t, 115 u_int32_t); 116 int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t, 117 u_int32_t); 118 119 int mpi_init(struct mpi_softc *); 120 int mpi_reset_soft(struct mpi_softc *); 121 int mpi_reset_hard(struct mpi_softc *); 122 123 int mpi_handshake_send(struct mpi_softc *, void *, size_t); 124 int mpi_handshake_recv_dword(struct mpi_softc *, 125 u_int32_t *); 126 int mpi_handshake_recv(struct mpi_softc *, void *, size_t); 127 128 void mpi_empty_done(struct mpi_ccb *); 129 130 int mpi_iocinit(struct mpi_softc *); 131 int mpi_iocfacts(struct mpi_softc *); 132 int mpi_portfacts(struct mpi_softc *); 133 int mpi_portenable(struct mpi_softc *); 134 int mpi_cfg_coalescing(struct mpi_softc *); 135 void mpi_get_raid(struct mpi_softc *); 136 int mpi_fwupload(struct mpi_softc *); 137 int mpi_scsi_probe_virtual(struct scsi_link *); 138 139 int mpi_eventnotify(struct mpi_softc *); 140 void mpi_eventnotify_done(struct mpi_ccb *); 141 void mpi_eventnotify_free(struct mpi_softc *, 142 struct mpi_rcb *); 143 void mpi_eventack(void *, void *); 144 void mpi_eventack_done(struct mpi_ccb *); 145 int mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *); 146 void mpi_evt_sas_detach(void *, void *); 147 void mpi_evt_sas_detach_done(struct mpi_ccb *); 148 void mpi_evt_fc_rescan(struct mpi_softc *); 149 void mpi_fc_rescan(void *, void *); 150 151 int mpi_req_cfg_header(struct mpi_softc *, u_int8_t, 152 u_int8_t, u_int32_t, int, void *); 153 int mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int, 154 void *, int, void *, size_t); 155 156 int mpi_ioctl_cache(struct scsi_link *, u_long, 157 struct dk_cache *); 158 159 #if NBIO > 0 160 int mpi_bio_get_pg0_raid(struct mpi_softc *, int); 161 int mpi_ioctl(struct device *, u_long, caddr_t); 162 int mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *); 163 int mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *); 164 int mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *); 165 int mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *); 166 #ifndef SMALL_KERNEL 167 int mpi_create_sensors(struct mpi_softc *); 168 void mpi_refresh_sensors(void *); 169 #endif /* SMALL_KERNEL */ 170 #endif /* NBIO > 0 */ 171 172 #define DEVNAME(s) ((s)->sc_dev.dv_xname) 173 174 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t)) 175 176 #define mpi_read_db(s) mpi_read((s), MPI_DOORBELL) 177 #define mpi_write_db(s, v) mpi_write((s), MPI_DOORBELL, (v)) 178 #define mpi_read_intr(s) mpi_read((s), MPI_INTR_STATUS) 179 #define mpi_write_intr(s, v) mpi_write((s), MPI_INTR_STATUS, (v)) 180 #define mpi_pop_reply(s) mpi_read((s), MPI_REPLY_QUEUE) 181 #define mpi_push_reply_db(s, v) mpi_write((s), MPI_REPLY_QUEUE, (v)) 182 183 #define mpi_wait_db_int(s) mpi_wait_ne((s), MPI_INTR_STATUS, \ 184 MPI_INTR_STATUS_DOORBELL, 0) 185 #define mpi_wait_db_ack(s) mpi_wait_eq((s), MPI_INTR_STATUS, \ 186 MPI_INTR_STATUS_IOCDOORBELL, 0) 187 188 #define MPI_PG_EXTENDED (1<<0) 189 #define MPI_PG_POLL (1<<1) 190 #define MPI_PG_FMT "\020" "\002POLL" "\001EXTENDED" 191 192 #define mpi_cfg_header(_s, _t, _n, _a, _h) \ 193 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 194 MPI_PG_POLL, (_h)) 195 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \ 196 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 197 MPI_PG_POLL|MPI_PG_EXTENDED, (_h)) 198 199 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \ 200 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \ 201 (_h), (_r), (_p), (_l)) 202 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \ 203 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \ 204 (_h), (_r), (_p), (_l)) 205 206 int 207 mpi_attach(struct mpi_softc *sc) 208 { 209 struct scsibus_attach_args saa; 210 struct mpi_ccb *ccb; 211 212 printf("\n"); 213 214 rw_init(&sc->sc_lock, "mpi_lock"); 215 mtx_init(&sc->sc_evt_rescan_mtx, IPL_BIO); 216 217 /* disable interrupts */ 218 mpi_write(sc, MPI_INTR_MASK, 219 MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL); 220 221 if (mpi_init(sc) != 0) { 222 printf("%s: unable to initialise\n", DEVNAME(sc)); 223 return (1); 224 } 225 226 if (mpi_iocfacts(sc) != 0) { 227 printf("%s: unable to get iocfacts\n", DEVNAME(sc)); 228 return (1); 229 } 230 231 if (mpi_alloc_ccbs(sc) != 0) { 232 /* error already printed */ 233 return (1); 234 } 235 236 if (mpi_alloc_replies(sc) != 0) { 237 printf("%s: unable to allocate reply space\n", DEVNAME(sc)); 238 goto free_ccbs; 239 } 240 241 if (mpi_iocinit(sc) != 0) { 242 printf("%s: unable to send iocinit\n", DEVNAME(sc)); 243 goto free_ccbs; 244 } 245 246 /* spin until we're operational */ 247 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 248 MPI_DOORBELL_STATE_OPER) != 0) { 249 printf("%s: state: 0x%08x\n", DEVNAME(sc), 250 mpi_read_db(sc) & MPI_DOORBELL_STATE); 251 printf("%s: operational state timeout\n", DEVNAME(sc)); 252 goto free_ccbs; 253 } 254 255 mpi_push_replies(sc); 256 257 if (mpi_portfacts(sc) != 0) { 258 printf("%s: unable to get portfacts\n", DEVNAME(sc)); 259 goto free_replies; 260 } 261 262 if (mpi_cfg_coalescing(sc) != 0) { 263 printf("%s: unable to configure coalescing\n", DEVNAME(sc)); 264 goto free_replies; 265 } 266 267 switch (sc->sc_porttype) { 268 case MPI_PORTFACTS_PORTTYPE_SAS: 269 SIMPLEQ_INIT(&sc->sc_evt_scan_queue); 270 mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO); 271 scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool, 272 mpi_evt_sas_detach, sc); 273 /* FALLTHROUGH */ 274 case MPI_PORTFACTS_PORTTYPE_FC: 275 if (mpi_eventnotify(sc) != 0) { 276 printf("%s: unable to enable events\n", DEVNAME(sc)); 277 goto free_replies; 278 } 279 break; 280 } 281 282 if (mpi_portenable(sc) != 0) { 283 printf("%s: unable to enable port\n", DEVNAME(sc)); 284 goto free_replies; 285 } 286 287 if (mpi_fwupload(sc) != 0) { 288 printf("%s: unable to upload firmware\n", DEVNAME(sc)); 289 goto free_replies; 290 } 291 292 switch (sc->sc_porttype) { 293 case MPI_PORTFACTS_PORTTYPE_SCSI: 294 if (mpi_cfg_spi_port(sc) != 0) 295 goto free_replies; 296 mpi_squash_ppr(sc); 297 break; 298 case MPI_PORTFACTS_PORTTYPE_SAS: 299 if (mpi_cfg_sas(sc) != 0) 300 goto free_replies; 301 break; 302 case MPI_PORTFACTS_PORTTYPE_FC: 303 if (mpi_cfg_fc(sc) != 0) 304 goto free_replies; 305 break; 306 } 307 308 /* get raid pages */ 309 mpi_get_raid(sc); 310 #if NBIO > 0 311 if (sc->sc_flags & MPI_F_RAID) { 312 if (bio_register(&sc->sc_dev, mpi_ioctl) != 0) 313 panic("%s: controller registration failed", 314 DEVNAME(sc)); 315 else { 316 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 317 2, 0, &sc->sc_cfg_hdr) != 0) { 318 panic("%s: can't get IOC page 2 hdr", 319 DEVNAME(sc)); 320 } 321 322 sc->sc_vol_page = malloc(sc->sc_cfg_hdr.page_length * 4, 323 M_TEMP, M_WAITOK | M_CANFAIL); 324 if (sc->sc_vol_page == NULL) { 325 panic("%s: can't get memory for IOC page 2, " 326 "bio disabled", DEVNAME(sc)); 327 } 328 329 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, 330 sc->sc_vol_page, 331 sc->sc_cfg_hdr.page_length * 4) != 0) { 332 panic("%s: can't get IOC page 2", DEVNAME(sc)); 333 } 334 335 sc->sc_vol_list = (struct mpi_cfg_raid_vol *) 336 (sc->sc_vol_page + 1); 337 338 sc->sc_ioctl = mpi_ioctl; 339 } 340 } 341 #endif /* NBIO > 0 */ 342 343 /* we should be good to go now, attach scsibus */ 344 sc->sc_link.adapter = &mpi_switch; 345 sc->sc_link.adapter_softc = sc; 346 sc->sc_link.adapter_target = sc->sc_target; 347 sc->sc_link.adapter_buswidth = sc->sc_buswidth; 348 sc->sc_link.openings = sc->sc_maxcmds / sc->sc_buswidth; 349 sc->sc_link.pool = &sc->sc_iopool; 350 351 bzero(&saa, sizeof(saa)); 352 saa.saa_sc_link = &sc->sc_link; 353 354 /* config_found() returns the scsibus attached to us */ 355 sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev, 356 &saa, scsiprint); 357 358 /* do domain validation */ 359 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI) 360 mpi_run_ppr(sc); 361 362 /* enable interrupts */ 363 mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL); 364 365 #if NBIO > 0 366 #ifndef SMALL_KERNEL 367 mpi_create_sensors(sc); 368 #endif /* SMALL_KERNEL */ 369 #endif /* NBIO > 0 */ 370 371 return (0); 372 373 free_replies: 374 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 375 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 376 mpi_dmamem_free(sc, sc->sc_replies); 377 free_ccbs: 378 while ((ccb = mpi_get_ccb(sc)) != NULL) 379 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 380 mpi_dmamem_free(sc, sc->sc_requests); 381 free(sc->sc_ccbs, M_DEVBUF); 382 383 return(1); 384 } 385 386 int 387 mpi_cfg_spi_port(struct mpi_softc *sc) 388 { 389 struct mpi_cfg_hdr hdr; 390 struct mpi_cfg_spi_port_pg1 port; 391 392 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0, 393 &hdr) != 0) 394 return (1); 395 396 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0) 397 return (1); 398 399 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc)); 400 DNPRINTF(MPI_D_MISC, "%s: port_scsi_id: %d port_resp_ids 0x%04x\n", 401 DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids)); 402 DNPRINTF(MPI_D_MISC, "%s: on_bus_timer_value: 0x%08x\n", DEVNAME(sc), 403 letoh32(port.port_scsi_id)); 404 DNPRINTF(MPI_D_MISC, "%s: target_config: 0x%02x id_config: 0x%04x\n", 405 DEVNAME(sc), port.target_config, letoh16(port.id_config)); 406 407 if (port.port_scsi_id == sc->sc_target && 408 port.port_resp_ids == htole16(1 << sc->sc_target) && 409 port.on_bus_timer_value != htole32(0x0)) 410 return (0); 411 412 DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc), 413 sc->sc_target); 414 port.port_scsi_id = sc->sc_target; 415 port.port_resp_ids = htole16(1 << sc->sc_target); 416 port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */ 417 418 if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) { 419 printf("%s: unable to configure port scsi id\n", DEVNAME(sc)); 420 return (1); 421 } 422 423 return (0); 424 } 425 426 void 427 mpi_squash_ppr(struct mpi_softc *sc) 428 { 429 struct mpi_cfg_hdr hdr; 430 struct mpi_cfg_spi_dev_pg1 page; 431 int i; 432 433 DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc)); 434 435 for (i = 0; i < sc->sc_buswidth; i++) { 436 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 437 1, i, &hdr) != 0) 438 return; 439 440 if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0) 441 return; 442 443 DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x " 444 "req_offset: 0x%02x req_period: 0x%02x " 445 "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i, 446 page.req_params1, page.req_offset, page.req_period, 447 page.req_params2, letoh32(page.configuration)); 448 449 page.req_params1 = 0x0; 450 page.req_offset = 0x0; 451 page.req_period = 0x0; 452 page.req_params2 = 0x0; 453 page.configuration = htole32(0x0); 454 455 if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0) 456 return; 457 } 458 } 459 460 void 461 mpi_run_ppr(struct mpi_softc *sc) 462 { 463 struct mpi_cfg_hdr hdr; 464 struct mpi_cfg_spi_port_pg0 port_pg; 465 struct mpi_cfg_ioc_pg3 *physdisk_pg; 466 struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk; 467 size_t pagelen; 468 struct scsi_link *link; 469 int i, tries; 470 471 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0, 472 &hdr) != 0) { 473 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n", 474 DEVNAME(sc)); 475 return; 476 } 477 478 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) { 479 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n", 480 DEVNAME(sc)); 481 return; 482 } 483 484 for (i = 0; i < sc->sc_buswidth; i++) { 485 link = scsi_get_link(sc->sc_scsibus, i, 0); 486 if (link == NULL) 487 continue; 488 489 /* do not ppr volumes */ 490 if (link->flags & SDEV_VIRTUAL) 491 continue; 492 493 tries = 0; 494 while (mpi_ppr(sc, link, NULL, port_pg.min_period, 495 port_pg.max_offset, tries) == EAGAIN) 496 tries++; 497 } 498 499 if ((sc->sc_flags & MPI_F_RAID) == 0) 500 return; 501 502 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0, 503 &hdr) != 0) { 504 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 505 "fetch ioc pg 3 header\n", DEVNAME(sc)); 506 return; 507 } 508 509 pagelen = hdr.page_length * 4; /* dwords to bytes */ 510 physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 511 if (physdisk_pg == NULL) { 512 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 513 "allocate ioc pg 3\n", DEVNAME(sc)); 514 return; 515 } 516 physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1); 517 518 if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) { 519 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to " 520 "fetch ioc page 3\n", DEVNAME(sc)); 521 goto out; 522 } 523 524 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc), 525 physdisk_pg->no_phys_disks); 526 527 for (i = 0; i < physdisk_pg->no_phys_disks; i++) { 528 physdisk = &physdisk_list[i]; 529 530 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d " 531 "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id, 532 physdisk->phys_disk_bus, physdisk->phys_disk_ioc, 533 physdisk->phys_disk_num); 534 535 if (physdisk->phys_disk_ioc != sc->sc_ioc_number) 536 continue; 537 538 tries = 0; 539 while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period, 540 port_pg.max_offset, tries) == EAGAIN) 541 tries++; 542 } 543 544 out: 545 free(physdisk_pg, M_TEMP); 546 } 547 548 int 549 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link, 550 struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try) 551 { 552 struct mpi_cfg_hdr hdr0, hdr1; 553 struct mpi_cfg_spi_dev_pg0 pg0; 554 struct mpi_cfg_spi_dev_pg1 pg1; 555 u_int32_t address; 556 int id; 557 int raid = 0; 558 559 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d " 560 "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try, 561 link->quirks); 562 563 if (try >= 3) 564 return (EIO); 565 566 if (physdisk == NULL) { 567 if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR) 568 return (EIO); 569 570 address = link->target; 571 id = link->target; 572 } else { 573 raid = 1; 574 address = (physdisk->phys_disk_bus << 8) | 575 (physdisk->phys_disk_id); 576 id = physdisk->phys_disk_num; 577 } 578 579 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0, 580 address, &hdr0) != 0) { 581 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n", 582 DEVNAME(sc)); 583 return (EIO); 584 } 585 586 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1, 587 address, &hdr1) != 0) { 588 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n", 589 DEVNAME(sc)); 590 return (EIO); 591 } 592 593 #ifdef MPI_DEBUG 594 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 595 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n", 596 DEVNAME(sc)); 597 return (EIO); 598 } 599 600 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 601 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 602 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 603 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 604 #endif 605 606 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 607 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n", 608 DEVNAME(sc)); 609 return (EIO); 610 } 611 612 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 613 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 614 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 615 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 616 617 pg1.req_params1 = 0; 618 pg1.req_offset = offset; 619 pg1.req_period = period; 620 pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH; 621 622 if (raid || !(link->quirks & SDEV_NOSYNC)) { 623 pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE; 624 625 switch (try) { 626 case 0: /* U320 */ 627 break; 628 case 1: /* U160 */ 629 pg1.req_period = 0x09; 630 break; 631 case 2: /* U80 */ 632 pg1.req_period = 0x0a; 633 break; 634 } 635 636 if (pg1.req_period < 0x09) { 637 /* Ultra320: enable QAS & PACKETIZED */ 638 pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS | 639 MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED; 640 } 641 if (pg1.req_period < 0xa) { 642 /* >= Ultra160: enable dual xfers */ 643 pg1.req_params1 |= 644 MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS; 645 } 646 } 647 648 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 649 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 650 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 651 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 652 653 if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) { 654 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n", 655 DEVNAME(sc)); 656 return (EIO); 657 } 658 659 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 660 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n", 661 DEVNAME(sc)); 662 return (EIO); 663 } 664 665 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 666 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 667 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 668 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 669 670 if (mpi_inq(sc, id, raid) != 0) { 671 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against " 672 "target %d\n", DEVNAME(sc), link->target); 673 return (EIO); 674 } 675 676 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 677 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after " 678 "inquiry\n", DEVNAME(sc)); 679 return (EIO); 680 } 681 682 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 683 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 684 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 685 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 686 687 if (!(letoh32(pg0.information) & 0x07) && (try == 0)) { 688 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n", 689 DEVNAME(sc)); 690 return (EAGAIN); 691 } 692 693 if ((((letoh32(pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) { 694 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n", 695 DEVNAME(sc)); 696 return (EAGAIN); 697 } 698 699 if (letoh32(pg0.information) & 0x0e) { 700 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n", 701 DEVNAME(sc), letoh32(pg0.information)); 702 return (EAGAIN); 703 } 704 705 switch(pg0.neg_period) { 706 case 0x08: 707 period = 160; 708 break; 709 case 0x09: 710 period = 80; 711 break; 712 case 0x0a: 713 period = 40; 714 break; 715 case 0x0b: 716 period = 20; 717 break; 718 case 0x0c: 719 period = 10; 720 break; 721 default: 722 period = 0; 723 break; 724 } 725 726 printf("%s: %s %d %s at %dMHz width %dbit offset %d " 727 "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target", 728 id, period ? "Sync" : "Async", period, 729 (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8, 730 pg0.neg_offset, 731 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0, 732 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0, 733 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0); 734 735 return (0); 736 } 737 738 int 739 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk) 740 { 741 struct mpi_ccb *ccb; 742 struct scsi_inquiry inq; 743 struct { 744 struct mpi_msg_scsi_io io; 745 struct mpi_sge sge; 746 struct scsi_inquiry_data inqbuf; 747 struct scsi_sense_data sense; 748 } __packed *bundle; 749 struct mpi_msg_scsi_io *io; 750 struct mpi_sge *sge; 751 u_int64_t addr; 752 753 DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc)); 754 755 bzero(&inq, sizeof(inq)); 756 inq.opcode = INQUIRY; 757 _lto2b(sizeof(struct scsi_inquiry_data), inq.length); 758 759 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 760 if (ccb == NULL) 761 return (1); 762 763 ccb->ccb_done = mpi_empty_done; 764 765 bundle = ccb->ccb_cmd; 766 io = &bundle->io; 767 sge = &bundle->sge; 768 769 io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH : 770 MPI_FUNCTION_SCSI_IO_REQUEST; 771 /* 772 * bus is always 0 773 * io->bus = htole16(sc->sc_bus); 774 */ 775 io->target_id = target; 776 777 io->cdb_length = sizeof(inq); 778 io->sense_buf_len = sizeof(struct scsi_sense_data); 779 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 780 781 io->msg_context = htole32(ccb->ccb_id); 782 783 /* 784 * always lun 0 785 * io->lun[0] = htobe16(link->lun); 786 */ 787 788 io->direction = MPI_SCSIIO_DIR_READ; 789 io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT; 790 791 bcopy(&inq, io->cdb, sizeof(inq)); 792 793 io->data_length = htole32(sizeof(struct scsi_inquiry_data)); 794 795 io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva + 796 ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle)); 797 798 sge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 | 799 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 800 (u_int32_t)sizeof(inq)); 801 802 addr = ccb->ccb_cmd_dva + 803 ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle); 804 sge->sg_hi_addr = htole32((u_int32_t)(addr >> 32)); 805 sge->sg_lo_addr = htole32((u_int32_t)addr); 806 807 if (mpi_poll(sc, ccb, 5000) != 0) 808 return (1); 809 810 if (ccb->ccb_rcb != NULL) 811 mpi_push_reply(sc, ccb->ccb_rcb); 812 813 scsi_io_put(&sc->sc_iopool, ccb); 814 815 return (0); 816 } 817 818 int 819 mpi_cfg_sas(struct mpi_softc *sc) 820 { 821 struct mpi_ecfg_hdr ehdr; 822 struct mpi_cfg_sas_iou_pg1 *pg; 823 size_t pagelen; 824 int rv = 0; 825 826 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0, 827 &ehdr) != 0) 828 return (EIO); 829 830 pagelen = letoh16(ehdr.ext_page_length) * 4; 831 pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO); 832 if (pg == NULL) 833 return (ENOMEM); 834 835 if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0) { 836 rv = EIO; 837 goto out; 838 } 839 840 if (pg->max_sata_q_depth != 32) { 841 pg->max_sata_q_depth = 32; 842 843 if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0) { 844 rv = EIO; 845 goto out; 846 } 847 } 848 849 out: 850 free(pg, M_TEMP); 851 return (rv); 852 } 853 854 int 855 mpi_cfg_fc(struct mpi_softc *sc) 856 { 857 struct mpi_cfg_hdr hdr; 858 struct mpi_cfg_fc_port_pg0 pg0; 859 struct mpi_cfg_fc_port_pg1 pg1; 860 861 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0, 862 &hdr) != 0) { 863 printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc)); 864 return (1); 865 } 866 867 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) { 868 printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc)); 869 return (1); 870 } 871 872 sc->sc_link.port_wwn = letoh64(pg0.wwpn); 873 sc->sc_link.node_wwn = letoh64(pg0.wwnn); 874 875 /* configure port config more to our liking */ 876 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0, 877 &hdr) != 0) { 878 printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc)); 879 return (1); 880 } 881 882 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) { 883 printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc)); 884 return (1); 885 } 886 887 SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR | 888 MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN)); 889 890 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) { 891 printf("%s: unable to set FC port page 1\n", DEVNAME(sc)); 892 return (1); 893 } 894 895 return (0); 896 } 897 898 void 899 mpi_detach(struct mpi_softc *sc) 900 { 901 902 } 903 904 int 905 mpi_intr(void *arg) 906 { 907 struct mpi_softc *sc = arg; 908 u_int32_t reg; 909 int rv = 0; 910 911 if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0) 912 return (rv); 913 914 while ((reg = mpi_pop_reply(sc)) != 0xffffffff) { 915 mpi_reply(sc, reg); 916 rv = 1; 917 } 918 919 return (rv); 920 } 921 922 void 923 mpi_reply(struct mpi_softc *sc, u_int32_t reg) 924 { 925 struct mpi_ccb *ccb; 926 struct mpi_rcb *rcb = NULL; 927 struct mpi_msg_reply *reply = NULL; 928 u_int32_t reply_dva; 929 int id; 930 int i; 931 932 DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg); 933 934 if (reg & MPI_REPLY_QUEUE_ADDRESS) { 935 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1; 936 i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) / 937 MPI_REPLY_SIZE; 938 rcb = &sc->sc_rcbs[i]; 939 940 bus_dmamap_sync(sc->sc_dmat, 941 MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset, 942 MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 943 944 reply = rcb->rcb_reply; 945 946 id = letoh32(reply->msg_context); 947 } else { 948 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) { 949 case MPI_REPLY_QUEUE_TYPE_INIT: 950 id = reg & MPI_REPLY_QUEUE_CONTEXT; 951 break; 952 953 default: 954 panic("%s: unsupported context reply", 955 DEVNAME(sc)); 956 } 957 } 958 959 DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n", 960 DEVNAME(sc), id, reply); 961 962 ccb = &sc->sc_ccbs[id]; 963 964 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 965 ccb->ccb_offset, MPI_REQUEST_SIZE, 966 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 967 ccb->ccb_state = MPI_CCB_READY; 968 ccb->ccb_rcb = rcb; 969 970 ccb->ccb_done(ccb); 971 } 972 973 struct mpi_dmamem * 974 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size) 975 { 976 struct mpi_dmamem *mdm; 977 int nsegs; 978 979 mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO); 980 if (mdm == NULL) 981 return (NULL); 982 983 mdm->mdm_size = size; 984 985 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 986 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0) 987 goto mdmfree; 988 989 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg, 990 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 991 goto destroy; 992 993 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size, 994 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0) 995 goto free; 996 997 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size, 998 NULL, BUS_DMA_NOWAIT) != 0) 999 goto unmap; 1000 1001 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x " 1002 "map: %#x nsegs: %d segs: %#x kva: %x\n", 1003 DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva); 1004 1005 return (mdm); 1006 1007 unmap: 1008 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size); 1009 free: 1010 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1011 destroy: 1012 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1013 mdmfree: 1014 free(mdm, M_DEVBUF); 1015 1016 return (NULL); 1017 } 1018 1019 void 1020 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm) 1021 { 1022 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm); 1023 1024 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map); 1025 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size); 1026 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1027 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1028 free(mdm, M_DEVBUF); 1029 } 1030 1031 int 1032 mpi_alloc_ccbs(struct mpi_softc *sc) 1033 { 1034 struct mpi_ccb *ccb; 1035 u_int8_t *cmd; 1036 int i; 1037 1038 SLIST_INIT(&sc->sc_ccb_free); 1039 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 1040 1041 sc->sc_ccbs = malloc(sizeof(struct mpi_ccb) * sc->sc_maxcmds, 1042 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 1043 if (sc->sc_ccbs == NULL) { 1044 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 1045 return (1); 1046 } 1047 1048 sc->sc_requests = mpi_dmamem_alloc(sc, 1049 MPI_REQUEST_SIZE * sc->sc_maxcmds); 1050 if (sc->sc_requests == NULL) { 1051 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 1052 goto free_ccbs; 1053 } 1054 cmd = MPI_DMA_KVA(sc->sc_requests); 1055 bzero(cmd, MPI_REQUEST_SIZE * sc->sc_maxcmds); 1056 1057 for (i = 0; i < sc->sc_maxcmds; i++) { 1058 ccb = &sc->sc_ccbs[i]; 1059 1060 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, 1061 sc->sc_max_sgl_len, MAXPHYS, 0, 1062 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1063 &ccb->ccb_dmamap) != 0) { 1064 printf("%s: unable to create dma map\n", DEVNAME(sc)); 1065 goto free_maps; 1066 } 1067 1068 ccb->ccb_sc = sc; 1069 ccb->ccb_id = i; 1070 ccb->ccb_offset = MPI_REQUEST_SIZE * i; 1071 ccb->ccb_state = MPI_CCB_READY; 1072 1073 ccb->ccb_cmd = &cmd[ccb->ccb_offset]; 1074 ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) + 1075 ccb->ccb_offset; 1076 1077 DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x " 1078 "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n", 1079 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc, 1080 ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd, 1081 ccb->ccb_cmd_dva); 1082 1083 mpi_put_ccb(sc, ccb); 1084 } 1085 1086 scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb); 1087 1088 return (0); 1089 1090 free_maps: 1091 while ((ccb = mpi_get_ccb(sc)) != NULL) 1092 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 1093 1094 mpi_dmamem_free(sc, sc->sc_requests); 1095 free_ccbs: 1096 free(sc->sc_ccbs, M_DEVBUF); 1097 1098 return (1); 1099 } 1100 1101 void * 1102 mpi_get_ccb(void *xsc) 1103 { 1104 struct mpi_softc *sc = xsc; 1105 struct mpi_ccb *ccb; 1106 1107 mtx_enter(&sc->sc_ccb_mtx); 1108 ccb = SLIST_FIRST(&sc->sc_ccb_free); 1109 if (ccb != NULL) { 1110 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link); 1111 ccb->ccb_state = MPI_CCB_READY; 1112 } 1113 mtx_leave(&sc->sc_ccb_mtx); 1114 1115 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb); 1116 1117 return (ccb); 1118 } 1119 1120 void 1121 mpi_put_ccb(void *xsc, void *io) 1122 { 1123 struct mpi_softc *sc = xsc; 1124 struct mpi_ccb *ccb = io; 1125 1126 DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb); 1127 1128 #ifdef DIAGNOSTIC 1129 if (ccb->ccb_state == MPI_CCB_FREE) 1130 panic("mpi_put_ccb: double free"); 1131 #endif 1132 1133 ccb->ccb_state = MPI_CCB_FREE; 1134 ccb->ccb_cookie = NULL; 1135 ccb->ccb_done = NULL; 1136 bzero(ccb->ccb_cmd, MPI_REQUEST_SIZE); 1137 mtx_enter(&sc->sc_ccb_mtx); 1138 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link); 1139 mtx_leave(&sc->sc_ccb_mtx); 1140 } 1141 1142 int 1143 mpi_alloc_replies(struct mpi_softc *sc) 1144 { 1145 DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc)); 1146 1147 sc->sc_rcbs = malloc(sc->sc_repq * sizeof(struct mpi_rcb), M_DEVBUF, 1148 M_WAITOK|M_CANFAIL); 1149 if (sc->sc_rcbs == NULL) 1150 return (1); 1151 1152 sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE); 1153 if (sc->sc_replies == NULL) { 1154 free(sc->sc_rcbs, M_DEVBUF); 1155 return (1); 1156 } 1157 1158 return (0); 1159 } 1160 1161 void 1162 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb) 1163 { 1164 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 1165 rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1166 mpi_push_reply_db(sc, rcb->rcb_reply_dva); 1167 } 1168 1169 void 1170 mpi_push_replies(struct mpi_softc *sc) 1171 { 1172 struct mpi_rcb *rcb; 1173 char *kva = MPI_DMA_KVA(sc->sc_replies); 1174 int i; 1175 1176 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 1177 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1178 1179 for (i = 0; i < sc->sc_repq; i++) { 1180 rcb = &sc->sc_rcbs[i]; 1181 1182 rcb->rcb_reply = kva + MPI_REPLY_SIZE * i; 1183 rcb->rcb_offset = MPI_REPLY_SIZE * i; 1184 rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) + 1185 MPI_REPLY_SIZE * i; 1186 mpi_push_reply_db(sc, rcb->rcb_reply_dva); 1187 } 1188 } 1189 1190 void 1191 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb) 1192 { 1193 DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc), 1194 ccb->ccb_cmd_dva); 1195 1196 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 1197 ccb->ccb_offset, MPI_REQUEST_SIZE, 1198 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1199 1200 ccb->ccb_state = MPI_CCB_QUEUED; 1201 mpi_write(sc, MPI_REQ_QUEUE, ccb->ccb_cmd_dva); 1202 } 1203 1204 int 1205 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout) 1206 { 1207 void (*done)(struct mpi_ccb *); 1208 void *cookie; 1209 int rv = 1; 1210 u_int32_t reg; 1211 1212 DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc), 1213 timeout); 1214 1215 done = ccb->ccb_done; 1216 cookie = ccb->ccb_cookie; 1217 1218 ccb->ccb_done = mpi_poll_done; 1219 ccb->ccb_cookie = &rv; 1220 1221 mpi_start(sc, ccb); 1222 while (rv == 1) { 1223 reg = mpi_pop_reply(sc); 1224 if (reg == 0xffffffff) { 1225 if (timeout-- == 0) { 1226 printf("%s: timeout\n", DEVNAME(sc)); 1227 goto timeout; 1228 } 1229 1230 delay(1000); 1231 continue; 1232 } 1233 1234 mpi_reply(sc, reg); 1235 } 1236 1237 ccb->ccb_cookie = cookie; 1238 done(ccb); 1239 1240 timeout: 1241 return (rv); 1242 } 1243 1244 void 1245 mpi_poll_done(struct mpi_ccb *ccb) 1246 { 1247 int *rv = ccb->ccb_cookie; 1248 1249 *rv = 0; 1250 } 1251 1252 void 1253 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb) 1254 { 1255 struct mutex cookie = MUTEX_INITIALIZER(IPL_BIO); 1256 void (*done)(struct mpi_ccb *); 1257 1258 done = ccb->ccb_done; 1259 ccb->ccb_done = mpi_wait_done; 1260 ccb->ccb_cookie = &cookie; 1261 1262 /* XXX this will wait forever for the ccb to complete */ 1263 1264 mpi_start(sc, ccb); 1265 1266 mtx_enter(&cookie); 1267 while (ccb->ccb_cookie != NULL) 1268 msleep(ccb, &cookie, PRIBIO, "mpiwait", 0); 1269 mtx_leave(&cookie); 1270 1271 done(ccb); 1272 } 1273 1274 void 1275 mpi_wait_done(struct mpi_ccb *ccb) 1276 { 1277 struct mutex *cookie = ccb->ccb_cookie; 1278 1279 mtx_enter(cookie); 1280 ccb->ccb_cookie = NULL; 1281 wakeup_one(ccb); 1282 mtx_leave(cookie); 1283 } 1284 1285 void 1286 mpi_scsi_cmd(struct scsi_xfer *xs) 1287 { 1288 struct scsi_link *link = xs->sc_link; 1289 struct mpi_softc *sc = link->adapter_softc; 1290 struct mpi_ccb *ccb; 1291 struct mpi_ccb_bundle *mcb; 1292 struct mpi_msg_scsi_io *io; 1293 1294 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc)); 1295 1296 if (xs->cmdlen > MPI_CDB_LEN) { 1297 DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n", 1298 DEVNAME(sc), xs->cmdlen); 1299 bzero(&xs->sense, sizeof(xs->sense)); 1300 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT; 1301 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1302 xs->sense.add_sense_code = 0x20; 1303 xs->error = XS_SENSE; 1304 scsi_done(xs); 1305 return; 1306 } 1307 1308 ccb = xs->io; 1309 1310 DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n", 1311 DEVNAME(sc), ccb->ccb_id, xs->flags); 1312 1313 ccb->ccb_cookie = xs; 1314 ccb->ccb_done = mpi_scsi_cmd_done; 1315 1316 mcb = ccb->ccb_cmd; 1317 io = &mcb->mcb_io; 1318 1319 io->function = MPI_FUNCTION_SCSI_IO_REQUEST; 1320 /* 1321 * bus is always 0 1322 * io->bus = htole16(sc->sc_bus); 1323 */ 1324 io->target_id = link->target; 1325 1326 io->cdb_length = xs->cmdlen; 1327 io->sense_buf_len = sizeof(xs->sense); 1328 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 1329 1330 io->msg_context = htole32(ccb->ccb_id); 1331 1332 io->lun[0] = htobe16(link->lun); 1333 1334 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1335 case SCSI_DATA_IN: 1336 io->direction = MPI_SCSIIO_DIR_READ; 1337 break; 1338 case SCSI_DATA_OUT: 1339 io->direction = MPI_SCSIIO_DIR_WRITE; 1340 break; 1341 default: 1342 io->direction = MPI_SCSIIO_DIR_NONE; 1343 break; 1344 } 1345 1346 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI && 1347 (link->quirks & SDEV_NOTAGS)) 1348 io->tagging = MPI_SCSIIO_ATTR_UNTAGGED; 1349 else 1350 io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q; 1351 1352 bcopy(xs->cmd, io->cdb, xs->cmdlen); 1353 1354 io->data_length = htole32(xs->datalen); 1355 1356 io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva + 1357 ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb)); 1358 1359 if (mpi_load_xs(ccb) != 0) { 1360 xs->error = XS_DRIVER_STUFFUP; 1361 scsi_done(xs); 1362 return; 1363 } 1364 1365 timeout_set(&xs->stimeout, mpi_timeout_xs, ccb); 1366 1367 if (xs->flags & SCSI_POLL) { 1368 if (mpi_poll(sc, ccb, xs->timeout) != 0) { 1369 xs->error = XS_DRIVER_STUFFUP; 1370 scsi_done(xs); 1371 } 1372 return; 1373 } 1374 1375 mpi_start(sc, ccb); 1376 } 1377 1378 void 1379 mpi_scsi_cmd_done(struct mpi_ccb *ccb) 1380 { 1381 struct mpi_softc *sc = ccb->ccb_sc; 1382 struct scsi_xfer *xs = ccb->ccb_cookie; 1383 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1384 bus_dmamap_t dmap = ccb->ccb_dmamap; 1385 struct mpi_msg_scsi_io_error *sie; 1386 1387 if (xs->datalen != 0) { 1388 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1389 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1390 BUS_DMASYNC_POSTWRITE); 1391 1392 bus_dmamap_unload(sc->sc_dmat, dmap); 1393 } 1394 1395 /* timeout_del */ 1396 xs->error = XS_NOERROR; 1397 xs->resid = 0; 1398 1399 if (ccb->ccb_rcb == NULL) { 1400 /* no scsi error, we're ok so drop out early */ 1401 xs->status = SCSI_OK; 1402 scsi_done(xs); 1403 return; 1404 } 1405 1406 sie = ccb->ccb_rcb->rcb_reply; 1407 1408 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d " 1409 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen, 1410 xs->flags); 1411 DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d " 1412 "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus, 1413 sie->msg_length, sie->function); 1414 DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d " 1415 "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length, 1416 sie->sense_buf_len, sie->msg_flags); 1417 DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 1418 letoh32(sie->msg_context)); 1419 DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x " 1420 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status, 1421 sie->scsi_state, letoh16(sie->ioc_status)); 1422 DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1423 letoh32(sie->ioc_loginfo)); 1424 DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc), 1425 letoh32(sie->transfer_count)); 1426 DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc), 1427 letoh32(sie->sense_count)); 1428 DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc), 1429 letoh32(sie->response_info)); 1430 DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc), 1431 letoh16(sie->tag)); 1432 1433 xs->status = sie->scsi_status; 1434 switch (letoh16(sie->ioc_status)) { 1435 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 1436 xs->resid = xs->datalen - letoh32(sie->transfer_count); 1437 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) { 1438 xs->error = XS_DRIVER_STUFFUP; 1439 break; 1440 } 1441 /* FALLTHROUGH */ 1442 case MPI_IOCSTATUS_SUCCESS: 1443 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 1444 switch (xs->status) { 1445 case SCSI_OK: 1446 xs->resid = 0; 1447 break; 1448 1449 case SCSI_CHECK: 1450 xs->error = XS_SENSE; 1451 break; 1452 1453 case SCSI_BUSY: 1454 case SCSI_QUEUE_FULL: 1455 xs->error = XS_BUSY; 1456 break; 1457 1458 default: 1459 xs->error = XS_DRIVER_STUFFUP; 1460 break; 1461 } 1462 break; 1463 1464 case MPI_IOCSTATUS_BUSY: 1465 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 1466 xs->error = XS_BUSY; 1467 break; 1468 1469 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 1470 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 1471 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1472 xs->error = XS_SELTIMEOUT; 1473 break; 1474 1475 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 1476 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 1477 xs->error = XS_RESET; 1478 break; 1479 1480 default: 1481 xs->error = XS_DRIVER_STUFFUP; 1482 break; 1483 } 1484 1485 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID) 1486 bcopy(&mcb->mcb_sense, &xs->sense, sizeof(xs->sense)); 1487 1488 DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc), 1489 xs->error, xs->status); 1490 1491 mpi_push_reply(sc, ccb->ccb_rcb); 1492 scsi_done(xs); 1493 } 1494 1495 void 1496 mpi_timeout_xs(void *arg) 1497 { 1498 /* XXX */ 1499 } 1500 1501 int 1502 mpi_load_xs(struct mpi_ccb *ccb) 1503 { 1504 struct mpi_softc *sc = ccb->ccb_sc; 1505 struct scsi_xfer *xs = ccb->ccb_cookie; 1506 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1507 struct mpi_msg_scsi_io *io = &mcb->mcb_io; 1508 struct mpi_sge *sge, *nsge = &mcb->mcb_sgl[0]; 1509 struct mpi_sge *ce = NULL, *nce; 1510 u_int64_t ce_dva; 1511 bus_dmamap_t dmap = ccb->ccb_dmamap; 1512 u_int32_t addr, flags; 1513 int i, error; 1514 1515 if (xs->datalen == 0) { 1516 nsge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 1517 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 1518 return (0); 1519 } 1520 1521 error = bus_dmamap_load(sc->sc_dmat, dmap, 1522 xs->data, xs->datalen, NULL, 1523 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 1524 if (error) { 1525 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 1526 return (1); 1527 } 1528 1529 flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64; 1530 if (xs->flags & SCSI_DATA_OUT) 1531 flags |= MPI_SGE_FL_DIR_OUT; 1532 1533 if (dmap->dm_nsegs > sc->sc_first_sgl_len) { 1534 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1]; 1535 io->chain_offset = ((u_int8_t *)ce - (u_int8_t *)io) / 4; 1536 } 1537 1538 for (i = 0; i < dmap->dm_nsegs; i++) { 1539 1540 if (nsge == ce) { 1541 nsge++; 1542 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST); 1543 1544 DNPRINTF(MPI_D_DMA, "%s: - 0x%08x 0x%08x 0x%08x\n", 1545 DEVNAME(sc), sge->sg_hdr, 1546 sge->sg_hi_addr, sge->sg_lo_addr); 1547 1548 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) { 1549 nce = &nsge[sc->sc_chain_len - 1]; 1550 addr = ((u_int8_t *)nce - (u_int8_t *)nsge) / 4; 1551 addr = addr << 16 | 1552 sizeof(struct mpi_sge) * sc->sc_chain_len; 1553 } else { 1554 nce = NULL; 1555 addr = sizeof(struct mpi_sge) * 1556 (dmap->dm_nsegs - i); 1557 } 1558 1559 ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN | 1560 MPI_SGE_FL_SIZE_64 | addr); 1561 1562 ce_dva = ccb->ccb_cmd_dva + 1563 ((u_int8_t *)nsge - (u_int8_t *)mcb); 1564 1565 addr = (u_int32_t)(ce_dva >> 32); 1566 ce->sg_hi_addr = htole32(addr); 1567 addr = (u_int32_t)ce_dva; 1568 ce->sg_lo_addr = htole32(addr); 1569 1570 DNPRINTF(MPI_D_DMA, "%s: ce: 0x%08x 0x%08x 0x%08x\n", 1571 DEVNAME(sc), ce->sg_hdr, ce->sg_hi_addr, 1572 ce->sg_lo_addr); 1573 1574 ce = nce; 1575 } 1576 1577 DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc), 1578 i, dmap->dm_segs[i].ds_len, 1579 (u_int64_t)dmap->dm_segs[i].ds_addr); 1580 1581 sge = nsge; 1582 1583 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len); 1584 addr = (u_int32_t)((u_int64_t)dmap->dm_segs[i].ds_addr >> 32); 1585 sge->sg_hi_addr = htole32(addr); 1586 addr = (u_int32_t)dmap->dm_segs[i].ds_addr; 1587 sge->sg_lo_addr = htole32(addr); 1588 1589 DNPRINTF(MPI_D_DMA, "%s: %d: 0x%08x 0x%08x 0x%08x\n", 1590 DEVNAME(sc), i, sge->sg_hdr, sge->sg_hi_addr, 1591 sge->sg_lo_addr); 1592 1593 nsge = sge + 1; 1594 } 1595 1596 /* terminate list */ 1597 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 1598 MPI_SGE_FL_EOL); 1599 1600 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1601 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 1602 BUS_DMASYNC_PREWRITE); 1603 1604 return (0); 1605 } 1606 1607 void 1608 mpi_minphys(struct buf *bp, struct scsi_link *sl) 1609 { 1610 /* XXX */ 1611 if (bp->b_bcount > MAXPHYS) 1612 bp->b_bcount = MAXPHYS; 1613 minphys(bp); 1614 } 1615 1616 int 1617 mpi_scsi_probe_virtual(struct scsi_link *link) 1618 { 1619 struct mpi_softc *sc = link->adapter_softc; 1620 struct mpi_cfg_hdr hdr; 1621 struct mpi_cfg_raid_vol_pg0 *rp0; 1622 int len; 1623 int rv; 1624 1625 if (!ISSET(sc->sc_flags, MPI_F_RAID)) 1626 return (0); 1627 1628 if (link->lun > 0) 1629 return (0); 1630 1631 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 1632 0, link->target, MPI_PG_POLL, &hdr); 1633 if (rv != 0) 1634 return (0); 1635 1636 len = hdr.page_length * 4; 1637 rp0 = malloc(len, M_TEMP, M_NOWAIT); 1638 if (rp0 == NULL) 1639 return (ENOMEM); 1640 1641 rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len); 1642 if (rv == 0) 1643 SET(link->flags, SDEV_VIRTUAL); 1644 1645 free(rp0, M_TEMP); 1646 return (0); 1647 } 1648 1649 int 1650 mpi_scsi_probe(struct scsi_link *link) 1651 { 1652 struct mpi_softc *sc = link->adapter_softc; 1653 struct mpi_ecfg_hdr ehdr; 1654 struct mpi_cfg_sas_dev_pg0 pg0; 1655 u_int32_t address; 1656 int rv; 1657 1658 rv = mpi_scsi_probe_virtual(link); 1659 if (rv != 0) 1660 return (rv); 1661 1662 if (ISSET(link->flags, SDEV_VIRTUAL)) 1663 return (0); 1664 1665 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS) 1666 return (0); 1667 1668 address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target; 1669 1670 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0, 1671 address, &ehdr) != 0) 1672 return (EIO); 1673 1674 if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0) 1675 return (0); 1676 1677 DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n", 1678 DEVNAME(sc), link->target); 1679 DNPRINTF(MPI_D_MISC, "%s: slot: 0x%04x enc_handle: 0x%04x\n", 1680 DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle)); 1681 DNPRINTF(MPI_D_MISC, "%s: sas_addr: 0x%016llx\n", DEVNAME(sc), 1682 letoh64(pg0.sas_addr)); 1683 DNPRINTF(MPI_D_MISC, "%s: parent_dev_handle: 0x%04x phy_num: 0x%02x " 1684 "access_status: 0x%02x\n", DEVNAME(sc), 1685 letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status); 1686 DNPRINTF(MPI_D_MISC, "%s: dev_handle: 0x%04x " 1687 "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc), 1688 letoh16(pg0.dev_handle), pg0.bus, pg0.target); 1689 DNPRINTF(MPI_D_MISC, "%s: device_info: 0x%08x\n", DEVNAME(sc), 1690 letoh32(pg0.device_info)); 1691 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%04x physical_port: 0x%02x\n", 1692 DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port); 1693 1694 if (ISSET(letoh32(pg0.device_info), 1695 MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) { 1696 DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n", 1697 DEVNAME(sc), link->target); 1698 link->flags |= SDEV_ATAPI; 1699 link->quirks |= SDEV_ONLYBIG; 1700 } 1701 1702 return (0); 1703 } 1704 1705 u_int32_t 1706 mpi_read(struct mpi_softc *sc, bus_size_t r) 1707 { 1708 u_int32_t rv; 1709 1710 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1711 BUS_SPACE_BARRIER_READ); 1712 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 1713 1714 DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv); 1715 1716 return (rv); 1717 } 1718 1719 void 1720 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v) 1721 { 1722 DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v); 1723 1724 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 1725 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1726 BUS_SPACE_BARRIER_WRITE); 1727 } 1728 1729 int 1730 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1731 u_int32_t target) 1732 { 1733 int i; 1734 1735 DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r, 1736 mask, target); 1737 1738 for (i = 0; i < 10000; i++) { 1739 if ((mpi_read(sc, r) & mask) == target) 1740 return (0); 1741 delay(1000); 1742 } 1743 1744 return (1); 1745 } 1746 1747 int 1748 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1749 u_int32_t target) 1750 { 1751 int i; 1752 1753 DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r, 1754 mask, target); 1755 1756 for (i = 0; i < 10000; i++) { 1757 if ((mpi_read(sc, r) & mask) != target) 1758 return (0); 1759 delay(1000); 1760 } 1761 1762 return (1); 1763 } 1764 1765 int 1766 mpi_init(struct mpi_softc *sc) 1767 { 1768 u_int32_t db; 1769 int i; 1770 1771 /* spin until the IOC leaves the RESET state */ 1772 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1773 MPI_DOORBELL_STATE_RESET) != 0) { 1774 DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave " 1775 "reset state\n", DEVNAME(sc)); 1776 return (1); 1777 } 1778 1779 /* check current ownership */ 1780 db = mpi_read_db(sc); 1781 if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) { 1782 DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n", 1783 DEVNAME(sc)); 1784 return (0); 1785 } 1786 1787 for (i = 0; i < 5; i++) { 1788 switch (db & MPI_DOORBELL_STATE) { 1789 case MPI_DOORBELL_STATE_READY: 1790 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n", 1791 DEVNAME(sc)); 1792 return (0); 1793 1794 case MPI_DOORBELL_STATE_OPER: 1795 case MPI_DOORBELL_STATE_FAULT: 1796 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being " 1797 "reset\n" , DEVNAME(sc)); 1798 if (mpi_reset_soft(sc) != 0) 1799 mpi_reset_hard(sc); 1800 break; 1801 1802 case MPI_DOORBELL_STATE_RESET: 1803 DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come " 1804 "out of reset\n", DEVNAME(sc)); 1805 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1806 MPI_DOORBELL_STATE_RESET) != 0) 1807 return (1); 1808 break; 1809 } 1810 db = mpi_read_db(sc); 1811 } 1812 1813 return (1); 1814 } 1815 1816 int 1817 mpi_reset_soft(struct mpi_softc *sc) 1818 { 1819 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc)); 1820 1821 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1822 return (1); 1823 1824 mpi_write_db(sc, 1825 MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET)); 1826 if (mpi_wait_eq(sc, MPI_INTR_STATUS, 1827 MPI_INTR_STATUS_IOCDOORBELL, 0) != 0) 1828 return (1); 1829 1830 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1831 MPI_DOORBELL_STATE_READY) != 0) 1832 return (1); 1833 1834 return (0); 1835 } 1836 1837 int 1838 mpi_reset_hard(struct mpi_softc *sc) 1839 { 1840 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc)); 1841 1842 /* enable diagnostic register */ 1843 mpi_write(sc, MPI_WRITESEQ, 0xff); 1844 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1); 1845 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2); 1846 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3); 1847 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4); 1848 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5); 1849 1850 /* reset ioc */ 1851 mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER); 1852 1853 delay(10000); 1854 1855 /* disable diagnostic register */ 1856 mpi_write(sc, MPI_WRITESEQ, 0xff); 1857 1858 /* restore pci bits? */ 1859 1860 /* firmware bits? */ 1861 return (0); 1862 } 1863 1864 int 1865 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords) 1866 { 1867 u_int32_t *query = buf; 1868 int i; 1869 1870 /* make sure the doorbell is not in use. */ 1871 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1872 return (1); 1873 1874 /* clear pending doorbell interrupts */ 1875 if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL) 1876 mpi_write_intr(sc, 0); 1877 1878 /* 1879 * first write the doorbell with the handshake function and the 1880 * dword count. 1881 */ 1882 mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) | 1883 MPI_DOORBELL_DWORDS(dwords)); 1884 1885 /* 1886 * the doorbell used bit will be set because a doorbell function has 1887 * started. Wait for the interrupt and then ack it. 1888 */ 1889 if (mpi_wait_db_int(sc) != 0) 1890 return (1); 1891 mpi_write_intr(sc, 0); 1892 1893 /* poll for the acknowledgement. */ 1894 if (mpi_wait_db_ack(sc) != 0) 1895 return (1); 1896 1897 /* write the query through the doorbell. */ 1898 for (i = 0; i < dwords; i++) { 1899 mpi_write_db(sc, htole32(query[i])); 1900 if (mpi_wait_db_ack(sc) != 0) 1901 return (1); 1902 } 1903 1904 return (0); 1905 } 1906 1907 int 1908 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword) 1909 { 1910 u_int16_t *words = (u_int16_t *)dword; 1911 int i; 1912 1913 for (i = 0; i < 2; i++) { 1914 if (mpi_wait_db_int(sc) != 0) 1915 return (1); 1916 words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK); 1917 mpi_write_intr(sc, 0); 1918 } 1919 1920 return (0); 1921 } 1922 1923 int 1924 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords) 1925 { 1926 struct mpi_msg_reply *reply = buf; 1927 u_int32_t *dbuf = buf, dummy; 1928 int i; 1929 1930 /* get the first dword so we can read the length out of the header. */ 1931 if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0) 1932 return (1); 1933 1934 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n", 1935 DEVNAME(sc), dwords, reply->msg_length); 1936 1937 /* 1938 * the total length, in dwords, is in the message length field of the 1939 * reply header. 1940 */ 1941 for (i = 1; i < MIN(dwords, reply->msg_length); i++) { 1942 if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0) 1943 return (1); 1944 } 1945 1946 /* if there's extra stuff to come off the ioc, discard it */ 1947 while (i++ < reply->msg_length) { 1948 if (mpi_handshake_recv_dword(sc, &dummy) != 0) 1949 return (1); 1950 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: " 1951 "0x%08x\n", DEVNAME(sc), dummy); 1952 } 1953 1954 /* wait for the doorbell used bit to be reset and clear the intr */ 1955 if (mpi_wait_db_int(sc) != 0) 1956 return (1); 1957 mpi_write_intr(sc, 0); 1958 1959 return (0); 1960 } 1961 1962 void 1963 mpi_empty_done(struct mpi_ccb *ccb) 1964 { 1965 /* nothing to do */ 1966 } 1967 1968 int 1969 mpi_iocfacts(struct mpi_softc *sc) 1970 { 1971 struct mpi_msg_iocfacts_request ifq; 1972 struct mpi_msg_iocfacts_reply ifp; 1973 1974 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc)); 1975 1976 bzero(&ifq, sizeof(ifq)); 1977 bzero(&ifp, sizeof(ifp)); 1978 1979 ifq.function = MPI_FUNCTION_IOC_FACTS; 1980 ifq.chain_offset = 0; 1981 ifq.msg_flags = 0; 1982 ifq.msg_context = htole32(0xdeadbeef); 1983 1984 if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 1985 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n", 1986 DEVNAME(sc)); 1987 return (1); 1988 } 1989 1990 if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 1991 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n", 1992 DEVNAME(sc)); 1993 return (1); 1994 } 1995 1996 DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n", 1997 DEVNAME(sc), ifp.function, ifp.msg_length, 1998 ifp.msg_version_maj, ifp.msg_version_min); 1999 DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x " 2000 "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags, 2001 ifp.ioc_number, ifp.header_version_maj, 2002 ifp.header_version_min); 2003 DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc), 2004 letoh32(ifp.msg_context)); 2005 DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n", 2006 DEVNAME(sc), letoh16(ifp.ioc_status), 2007 letoh16(ifp.ioc_exceptions)); 2008 DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc), 2009 letoh32(ifp.ioc_loginfo)); 2010 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x " 2011 "maxchdepth: %d\n", DEVNAME(sc), ifp.flags, 2012 ifp.block_size, ifp.whoinit, ifp.max_chain_depth); 2013 DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n", 2014 DEVNAME(sc), letoh16(ifp.request_frame_size), 2015 letoh16(ifp.reply_queue_depth)); 2016 DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc), 2017 letoh16(ifp.product_id)); 2018 DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc), 2019 letoh32(ifp.current_host_mfa_hi_addr)); 2020 DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d " 2021 "global_credits: %d\n", 2022 DEVNAME(sc), ifp.event_state, ifp.number_of_ports, 2023 letoh16(ifp.global_credits)); 2024 DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc), 2025 letoh32(ifp.current_sense_buffer_hi_addr)); 2026 DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n", 2027 DEVNAME(sc), ifp.max_buses, ifp.max_devices, 2028 letoh16(ifp.current_reply_frame_size)); 2029 DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc), 2030 letoh32(ifp.fw_image_size)); 2031 DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc), 2032 letoh32(ifp.ioc_capabilities)); 2033 DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x " 2034 "fw_version_dev: 0x%02x\n", DEVNAME(sc), 2035 ifp.fw_version_maj, ifp.fw_version_min, 2036 ifp.fw_version_unit, ifp.fw_version_dev); 2037 DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n", 2038 DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth)); 2039 DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x " 2040 "addr 0x%08x %08x\n", DEVNAME(sc), 2041 letoh32(ifp.host_page_buffer_sge.sg_hdr), 2042 letoh32(ifp.host_page_buffer_sge.sg_hi_addr), 2043 letoh32(ifp.host_page_buffer_sge.sg_lo_addr)); 2044 2045 sc->sc_maxcmds = letoh16(ifp.global_credits); 2046 sc->sc_maxchdepth = ifp.max_chain_depth; 2047 sc->sc_ioc_number = ifp.ioc_number; 2048 if (sc->sc_flags & MPI_F_SPI) 2049 sc->sc_buswidth = 16; 2050 else 2051 sc->sc_buswidth = 2052 (ifp.max_devices == 0) ? 256 : ifp.max_devices; 2053 if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) 2054 sc->sc_fw_len = letoh32(ifp.fw_image_size); 2055 2056 sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, letoh16(ifp.reply_queue_depth)); 2057 2058 /* 2059 * you can fit sg elements on the end of the io cmd if they fit in the 2060 * request frame size. 2061 */ 2062 sc->sc_first_sgl_len = ((letoh16(ifp.request_frame_size) * 4) - 2063 sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge); 2064 DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc), 2065 sc->sc_first_sgl_len); 2066 2067 sc->sc_chain_len = (letoh16(ifp.request_frame_size) * 4) / 2068 sizeof(struct mpi_sge); 2069 DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc), 2070 sc->sc_chain_len); 2071 2072 /* the sgl tailing the io cmd loses an entry to the chain element. */ 2073 sc->sc_max_sgl_len = MPI_MAX_SGL - 1; 2074 /* the sgl chains lose an entry for each chain element */ 2075 sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) / 2076 sc->sc_chain_len; 2077 DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc), 2078 sc->sc_max_sgl_len); 2079 2080 /* XXX we're ignoring the max chain depth */ 2081 2082 return (0); 2083 } 2084 2085 int 2086 mpi_iocinit(struct mpi_softc *sc) 2087 { 2088 struct mpi_msg_iocinit_request iiq; 2089 struct mpi_msg_iocinit_reply iip; 2090 u_int32_t hi_addr; 2091 2092 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc)); 2093 2094 bzero(&iiq, sizeof(iiq)); 2095 bzero(&iip, sizeof(iip)); 2096 2097 iiq.function = MPI_FUNCTION_IOC_INIT; 2098 iiq.whoinit = MPI_WHOINIT_HOST_DRIVER; 2099 2100 iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth; 2101 iiq.max_buses = 1; 2102 2103 iiq.msg_context = htole32(0xd00fd00f); 2104 2105 iiq.reply_frame_size = htole16(MPI_REPLY_SIZE); 2106 2107 hi_addr = (u_int32_t)((u_int64_t)MPI_DMA_DVA(sc->sc_requests) >> 32); 2108 iiq.host_mfa_hi_addr = htole32(hi_addr); 2109 iiq.sense_buffer_hi_addr = htole32(hi_addr); 2110 2111 iiq.msg_version_maj = 0x01; 2112 iiq.msg_version_min = 0x02; 2113 2114 iiq.hdr_version_unit = 0x0d; 2115 iiq.hdr_version_dev = 0x00; 2116 2117 if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) { 2118 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n", 2119 DEVNAME(sc)); 2120 return (1); 2121 } 2122 2123 if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) { 2124 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n", 2125 DEVNAME(sc)); 2126 return (1); 2127 } 2128 2129 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d " 2130 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function, 2131 iip.msg_length, iip.whoinit); 2132 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d " 2133 "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags, 2134 iip.max_buses, iip.max_devices, iip.flags); 2135 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2136 letoh32(iip.msg_context)); 2137 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2138 letoh16(iip.ioc_status)); 2139 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2140 letoh32(iip.ioc_loginfo)); 2141 2142 return (0); 2143 } 2144 2145 int 2146 mpi_portfacts(struct mpi_softc *sc) 2147 { 2148 struct mpi_ccb *ccb; 2149 struct mpi_msg_portfacts_request *pfq; 2150 volatile struct mpi_msg_portfacts_reply *pfp; 2151 int rv = 1; 2152 2153 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc)); 2154 2155 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2156 if (ccb == NULL) { 2157 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n", 2158 DEVNAME(sc)); 2159 return (rv); 2160 } 2161 2162 ccb->ccb_done = mpi_empty_done; 2163 pfq = ccb->ccb_cmd; 2164 2165 pfq->function = MPI_FUNCTION_PORT_FACTS; 2166 pfq->chain_offset = 0; 2167 pfq->msg_flags = 0; 2168 pfq->port_number = 0; 2169 pfq->msg_context = htole32(ccb->ccb_id); 2170 2171 if (mpi_poll(sc, ccb, 50000) != 0) { 2172 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc)); 2173 goto err; 2174 } 2175 2176 if (ccb->ccb_rcb == NULL) { 2177 DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n", 2178 DEVNAME(sc)); 2179 goto err; 2180 } 2181 pfp = ccb->ccb_rcb->rcb_reply; 2182 2183 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n", 2184 DEVNAME(sc), pfp->function, pfp->msg_length); 2185 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n", 2186 DEVNAME(sc), pfp->msg_flags, pfp->port_number); 2187 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2188 letoh32(pfp->msg_context)); 2189 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2190 letoh16(pfp->ioc_status)); 2191 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2192 letoh32(pfp->ioc_loginfo)); 2193 DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n", 2194 DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type); 2195 DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n", 2196 DEVNAME(sc), letoh16(pfp->protocol_flags), 2197 letoh16(pfp->port_scsi_id)); 2198 DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d " 2199 "max_posted_cmd_buffers: %d\n", DEVNAME(sc), 2200 letoh16(pfp->max_persistent_ids), 2201 letoh16(pfp->max_posted_cmd_buffers)); 2202 DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc), 2203 letoh16(pfp->max_lan_buckets)); 2204 2205 sc->sc_porttype = pfp->port_type; 2206 if (sc->sc_target == -1) 2207 sc->sc_target = letoh16(pfp->port_scsi_id); 2208 2209 mpi_push_reply(sc, ccb->ccb_rcb); 2210 rv = 0; 2211 err: 2212 scsi_io_put(&sc->sc_iopool, ccb); 2213 2214 return (rv); 2215 } 2216 2217 int 2218 mpi_cfg_coalescing(struct mpi_softc *sc) 2219 { 2220 struct mpi_cfg_hdr hdr; 2221 struct mpi_cfg_ioc_pg1 pg; 2222 u_int32_t flags; 2223 2224 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) { 2225 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n", 2226 DEVNAME(sc)); 2227 return (1); 2228 } 2229 2230 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) { 2231 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n", 2232 DEVNAME(sc)); 2233 return (1); 2234 } 2235 2236 DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc)); 2237 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%08x\n", DEVNAME(sc), 2238 letoh32(pg.flags)); 2239 DNPRINTF(MPI_D_MISC, "%s: coalescing_timeout: %d\n", DEVNAME(sc), 2240 letoh32(pg.coalescing_timeout)); 2241 DNPRINTF(MPI_D_MISC, "%s: coalescing_depth: %d pci_slot_num: %d\n", 2242 DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num); 2243 2244 flags = letoh32(pg.flags); 2245 if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING)) 2246 return (0); 2247 2248 CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING)); 2249 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) { 2250 DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n", 2251 DEVNAME(sc)); 2252 return (1); 2253 } 2254 2255 return (0); 2256 } 2257 2258 int 2259 mpi_eventnotify(struct mpi_softc *sc) 2260 { 2261 struct mpi_ccb *ccb; 2262 struct mpi_msg_event_request *enq; 2263 2264 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2265 if (ccb == NULL) { 2266 DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n", 2267 DEVNAME(sc)); 2268 return (1); 2269 } 2270 2271 sc->sc_evt_ccb = ccb; 2272 SIMPLEQ_INIT(&sc->sc_evt_ack_queue); 2273 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO); 2274 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool, 2275 mpi_eventack, sc); 2276 2277 ccb->ccb_done = mpi_eventnotify_done; 2278 enq = ccb->ccb_cmd; 2279 2280 enq->function = MPI_FUNCTION_EVENT_NOTIFICATION; 2281 enq->chain_offset = 0; 2282 enq->event_switch = MPI_EVENT_SWITCH_ON; 2283 enq->msg_context = htole32(ccb->ccb_id); 2284 2285 mpi_start(sc, ccb); 2286 return (0); 2287 } 2288 2289 void 2290 mpi_eventnotify_done(struct mpi_ccb *ccb) 2291 { 2292 struct mpi_softc *sc = ccb->ccb_sc; 2293 struct mpi_rcb *rcb = ccb->ccb_rcb; 2294 struct mpi_msg_event_reply *enp = rcb->rcb_reply; 2295 2296 DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc)); 2297 2298 DNPRINTF(MPI_D_EVT, "%s: function: 0x%02x msg_length: %d " 2299 "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length, 2300 letoh16(enp->data_length)); 2301 DNPRINTF(MPI_D_EVT, "%s: ack_required: %d msg_flags 0x%02x\n", 2302 DEVNAME(sc), enp->ack_required, enp->msg_flags); 2303 DNPRINTF(MPI_D_EVT, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2304 letoh32(enp->msg_context)); 2305 DNPRINTF(MPI_D_EVT, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2306 letoh16(enp->ioc_status)); 2307 DNPRINTF(MPI_D_EVT, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2308 letoh32(enp->ioc_loginfo)); 2309 DNPRINTF(MPI_D_EVT, "%s: event: 0x%08x\n", DEVNAME(sc), 2310 letoh32(enp->event)); 2311 DNPRINTF(MPI_D_EVT, "%s: event_context: 0x%08x\n", DEVNAME(sc), 2312 letoh32(enp->event_context)); 2313 2314 switch (letoh32(enp->event)) { 2315 /* ignore these */ 2316 case MPI_EVENT_EVENT_CHANGE: 2317 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2318 break; 2319 2320 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2321 if (sc->sc_scsibus == NULL) 2322 break; 2323 2324 if (mpi_evt_sas(sc, rcb) != 0) { 2325 /* reply is freed later on */ 2326 return; 2327 } 2328 break; 2329 2330 case MPI_EVENT_RESCAN: 2331 if (sc->sc_scsibus != NULL && 2332 sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC) 2333 mpi_evt_fc_rescan(sc); 2334 break; 2335 2336 default: 2337 DNPRINTF(MPI_D_EVT, "%s: unhandled event 0x%02x\n", 2338 DEVNAME(sc), letoh32(enp->event)); 2339 break; 2340 } 2341 2342 mpi_eventnotify_free(sc, rcb); 2343 } 2344 2345 void 2346 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb) 2347 { 2348 struct mpi_msg_event_reply *enp = rcb->rcb_reply; 2349 2350 if (enp->ack_required) { 2351 mtx_enter(&sc->sc_evt_ack_mtx); 2352 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link); 2353 mtx_leave(&sc->sc_evt_ack_mtx); 2354 scsi_ioh_add(&sc->sc_evt_ack_handler); 2355 } else 2356 mpi_push_reply(sc, rcb); 2357 } 2358 2359 int 2360 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb) 2361 { 2362 struct mpi_evt_sas_change *ch; 2363 u_int8_t *data; 2364 2365 data = rcb->rcb_reply; 2366 data += sizeof(struct mpi_msg_event_reply); 2367 ch = (struct mpi_evt_sas_change *)data; 2368 2369 if (ch->bus != 0) 2370 return (0); 2371 2372 switch (ch->reason) { 2373 case MPI_EVT_SASCH_REASON_ADDED: 2374 case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED: 2375 if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) { 2376 printf("%s: unable to request attach of %d\n", 2377 DEVNAME(sc), ch->target); 2378 } 2379 break; 2380 2381 case MPI_EVT_SASCH_REASON_NOT_RESPONDING: 2382 scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE); 2383 2384 mtx_enter(&sc->sc_evt_scan_mtx); 2385 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link); 2386 mtx_leave(&sc->sc_evt_scan_mtx); 2387 scsi_ioh_add(&sc->sc_evt_scan_handler); 2388 2389 /* we'll handle event ack later on */ 2390 return (1); 2391 2392 case MPI_EVT_SASCH_REASON_SMART_DATA: 2393 case MPI_EVT_SASCH_REASON_UNSUPPORTED: 2394 case MPI_EVT_SASCH_REASON_INTERNAL_RESET: 2395 break; 2396 default: 2397 printf("%s: unknown reason for SAS device status change: " 2398 "0x%02x\n", DEVNAME(sc), ch->reason); 2399 break; 2400 } 2401 2402 return (0); 2403 } 2404 2405 void 2406 mpi_evt_sas_detach(void *cookie, void *io) 2407 { 2408 struct mpi_softc *sc = cookie; 2409 struct mpi_ccb *ccb = io; 2410 struct mpi_rcb *rcb, *next; 2411 struct mpi_msg_event_reply *enp; 2412 struct mpi_evt_sas_change *ch; 2413 struct mpi_msg_scsi_task_request *str; 2414 2415 DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc)); 2416 2417 mtx_enter(&sc->sc_evt_scan_mtx); 2418 rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue); 2419 if (rcb != NULL) { 2420 next = SIMPLEQ_NEXT(rcb, rcb_link); 2421 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link); 2422 } 2423 mtx_leave(&sc->sc_evt_scan_mtx); 2424 2425 if (rcb == NULL) { 2426 scsi_io_put(&sc->sc_iopool, ccb); 2427 return; 2428 } 2429 2430 enp = rcb->rcb_reply; 2431 ch = (struct mpi_evt_sas_change *)(enp + 1); 2432 2433 ccb->ccb_done = mpi_evt_sas_detach_done; 2434 str = ccb->ccb_cmd; 2435 2436 str->target_id = ch->target; 2437 str->bus = 0; 2438 str->function = MPI_FUNCTION_SCSI_TASK_MGMT; 2439 2440 str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET; 2441 2442 str->msg_context = htole32(ccb->ccb_id); 2443 2444 mpi_eventnotify_free(sc, rcb); 2445 2446 mpi_start(sc, ccb); 2447 2448 if (next != NULL) 2449 scsi_ioh_add(&sc->sc_evt_scan_handler); 2450 } 2451 2452 void 2453 mpi_evt_sas_detach_done(struct mpi_ccb *ccb) 2454 { 2455 struct mpi_softc *sc = ccb->ccb_sc; 2456 struct mpi_msg_scsi_task_reply *r = ccb->ccb_rcb->rcb_reply; 2457 2458 if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1, 2459 DETACH_FORCE) != 0) { 2460 printf("%s: unable to request detach of %d\n", 2461 DEVNAME(sc), r->target_id); 2462 } 2463 2464 mpi_push_reply(sc, ccb->ccb_rcb); 2465 scsi_io_put(&sc->sc_iopool, ccb); 2466 } 2467 2468 void 2469 mpi_evt_fc_rescan(struct mpi_softc *sc) 2470 { 2471 int queue = 1; 2472 2473 mtx_enter(&sc->sc_evt_rescan_mtx); 2474 if (sc->sc_evt_rescan_sem) 2475 queue = 0; 2476 else 2477 sc->sc_evt_rescan_sem = 1; 2478 mtx_leave(&sc->sc_evt_rescan_mtx); 2479 2480 if (queue) { 2481 workq_queue_task(NULL, &sc->sc_evt_rescan, 0, 2482 mpi_fc_rescan, sc, NULL); 2483 } 2484 } 2485 2486 void 2487 mpi_fc_rescan(void *xsc, void *xarg) 2488 { 2489 struct mpi_softc *sc = xsc; 2490 struct mpi_cfg_hdr hdr; 2491 struct mpi_cfg_fc_device_pg0 pg; 2492 struct scsi_link *link; 2493 u_int8_t devmap[256 / NBBY]; 2494 u_int32_t id = 0xffffff; 2495 int i; 2496 2497 mtx_enter(&sc->sc_evt_rescan_mtx); 2498 sc->sc_evt_rescan_sem = 0; 2499 mtx_leave(&sc->sc_evt_rescan_mtx); 2500 2501 bzero(devmap, sizeof(devmap)); 2502 2503 do { 2504 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0, 2505 id, 0, &hdr) != 0) { 2506 printf("%s: header get for rescan of 0x%08x failed\n", 2507 DEVNAME(sc), id); 2508 return; 2509 } 2510 2511 bzero(&pg, sizeof(pg)); 2512 if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0) 2513 break; 2514 2515 if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) && 2516 pg.current_bus == 0) 2517 setbit(devmap, pg.current_target_id); 2518 2519 id = htole32(pg.port_id); 2520 } while (id <= 0xff0000); 2521 2522 for (i = 0; i < sc->sc_buswidth; i++) { 2523 link = scsi_get_link(sc->sc_scsibus, i, 0); 2524 2525 if (isset(devmap, i)) { 2526 if (link == NULL) 2527 scsi_probe_target(sc->sc_scsibus, i); 2528 } else { 2529 if (link != NULL) { 2530 scsi_activate(sc->sc_scsibus, i, -1, 2531 DVACT_DEACTIVATE); 2532 scsi_detach_target(sc->sc_scsibus, i, 2533 DETACH_FORCE); 2534 } 2535 } 2536 } 2537 } 2538 2539 void 2540 mpi_eventack(void *cookie, void *io) 2541 { 2542 struct mpi_softc *sc = cookie; 2543 struct mpi_ccb *ccb = io; 2544 struct mpi_rcb *rcb, *next; 2545 struct mpi_msg_event_reply *enp; 2546 struct mpi_msg_eventack_request *eaq; 2547 2548 DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc)); 2549 2550 mtx_enter(&sc->sc_evt_ack_mtx); 2551 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue); 2552 if (rcb != NULL) { 2553 next = SIMPLEQ_NEXT(rcb, rcb_link); 2554 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link); 2555 } 2556 mtx_leave(&sc->sc_evt_ack_mtx); 2557 2558 if (rcb == NULL) { 2559 scsi_io_put(&sc->sc_iopool, ccb); 2560 return; 2561 } 2562 2563 enp = rcb->rcb_reply; 2564 2565 ccb->ccb_done = mpi_eventack_done; 2566 eaq = ccb->ccb_cmd; 2567 2568 eaq->function = MPI_FUNCTION_EVENT_ACK; 2569 eaq->msg_context = htole32(ccb->ccb_id); 2570 2571 eaq->event = enp->event; 2572 eaq->event_context = enp->event_context; 2573 2574 mpi_push_reply(sc, rcb); 2575 mpi_start(sc, ccb); 2576 2577 if (next != NULL) 2578 scsi_ioh_add(&sc->sc_evt_ack_handler); 2579 } 2580 2581 void 2582 mpi_eventack_done(struct mpi_ccb *ccb) 2583 { 2584 struct mpi_softc *sc = ccb->ccb_sc; 2585 2586 DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc)); 2587 2588 mpi_push_reply(sc, ccb->ccb_rcb); 2589 scsi_io_put(&sc->sc_iopool, ccb); 2590 } 2591 2592 int 2593 mpi_portenable(struct mpi_softc *sc) 2594 { 2595 struct mpi_ccb *ccb; 2596 struct mpi_msg_portenable_request *peq; 2597 int rv = 0; 2598 2599 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc)); 2600 2601 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2602 if (ccb == NULL) { 2603 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n", 2604 DEVNAME(sc)); 2605 return (1); 2606 } 2607 2608 ccb->ccb_done = mpi_empty_done; 2609 peq = ccb->ccb_cmd; 2610 2611 peq->function = MPI_FUNCTION_PORT_ENABLE; 2612 peq->port_number = 0; 2613 peq->msg_context = htole32(ccb->ccb_id); 2614 2615 if (mpi_poll(sc, ccb, 50000) != 0) { 2616 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc)); 2617 return (1); 2618 } 2619 2620 if (ccb->ccb_rcb == NULL) { 2621 DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n", 2622 DEVNAME(sc)); 2623 rv = 1; 2624 } else 2625 mpi_push_reply(sc, ccb->ccb_rcb); 2626 2627 scsi_io_put(&sc->sc_iopool, ccb); 2628 2629 return (rv); 2630 } 2631 2632 int 2633 mpi_fwupload(struct mpi_softc *sc) 2634 { 2635 struct mpi_ccb *ccb; 2636 struct { 2637 struct mpi_msg_fwupload_request req; 2638 struct mpi_sge sge; 2639 } __packed *bundle; 2640 struct mpi_msg_fwupload_reply *upp; 2641 u_int64_t addr; 2642 int rv = 0; 2643 2644 if (sc->sc_fw_len == 0) 2645 return (0); 2646 2647 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc)); 2648 2649 sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len); 2650 if (sc->sc_fw == NULL) { 2651 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n", 2652 DEVNAME(sc), sc->sc_fw_len); 2653 return (1); 2654 } 2655 2656 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2657 if (ccb == NULL) { 2658 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n", 2659 DEVNAME(sc)); 2660 goto err; 2661 } 2662 2663 ccb->ccb_done = mpi_empty_done; 2664 bundle = ccb->ccb_cmd; 2665 2666 bundle->req.function = MPI_FUNCTION_FW_UPLOAD; 2667 bundle->req.msg_context = htole32(ccb->ccb_id); 2668 2669 bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW; 2670 2671 bundle->req.tce.details_length = 12; 2672 bundle->req.tce.image_size = htole32(sc->sc_fw_len); 2673 2674 bundle->sge.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2675 MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 2676 MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len); 2677 addr = MPI_DMA_DVA(sc->sc_fw); 2678 bundle->sge.sg_hi_addr = htole32((u_int32_t)(addr >> 32)); 2679 bundle->sge.sg_lo_addr = htole32((u_int32_t)addr); 2680 2681 if (mpi_poll(sc, ccb, 50000) != 0) { 2682 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc)); 2683 goto err; 2684 } 2685 2686 if (ccb->ccb_rcb == NULL) 2687 panic("%s: unable to do fw upload", DEVNAME(sc)); 2688 upp = ccb->ccb_rcb->rcb_reply; 2689 2690 if (letoh16(upp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2691 rv = 1; 2692 2693 mpi_push_reply(sc, ccb->ccb_rcb); 2694 scsi_io_put(&sc->sc_iopool, ccb); 2695 2696 return (rv); 2697 2698 err: 2699 mpi_dmamem_free(sc, sc->sc_fw); 2700 return (1); 2701 } 2702 2703 void 2704 mpi_get_raid(struct mpi_softc *sc) 2705 { 2706 struct mpi_cfg_hdr hdr; 2707 struct mpi_cfg_ioc_pg2 *vol_page; 2708 size_t pagelen; 2709 u_int32_t capabilities; 2710 2711 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc)); 2712 2713 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) { 2714 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header" 2715 "for IOC page 2\n", DEVNAME(sc)); 2716 return; 2717 } 2718 2719 pagelen = hdr.page_length * 4; /* dwords to bytes */ 2720 vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 2721 if (vol_page == NULL) { 2722 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate " 2723 "space for ioc config page 2\n", DEVNAME(sc)); 2724 return; 2725 } 2726 2727 if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) { 2728 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC " 2729 "page 2\n", DEVNAME(sc)); 2730 goto out; 2731 } 2732 2733 capabilities = letoh32(vol_page->capabilities); 2734 2735 DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc), 2736 letoh32(vol_page->capabilities)); 2737 DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d " 2738 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 2739 vol_page->active_vols, vol_page->max_vols, 2740 vol_page->active_physdisks, vol_page->max_physdisks); 2741 2742 /* don't walk list if there are no RAID capability */ 2743 if (capabilities == 0xdeadbeef) { 2744 printf("%s: deadbeef in raid configuration\n", DEVNAME(sc)); 2745 goto out; 2746 } 2747 2748 if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID)) 2749 sc->sc_flags |= MPI_F_RAID; 2750 2751 out: 2752 free(vol_page, M_TEMP); 2753 } 2754 2755 int 2756 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number, 2757 u_int32_t address, int flags, void *p) 2758 { 2759 struct mpi_ccb *ccb; 2760 struct mpi_msg_config_request *cq; 2761 struct mpi_msg_config_reply *cp; 2762 struct mpi_cfg_hdr *hdr = p; 2763 struct mpi_ecfg_hdr *ehdr = p; 2764 int etype = 0; 2765 int rv = 0; 2766 2767 DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x " 2768 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number, 2769 address, flags, MPI_PG_FMT); 2770 2771 ccb = scsi_io_get(&sc->sc_iopool, 2772 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0); 2773 if (ccb == NULL) { 2774 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n", 2775 DEVNAME(sc)); 2776 return (1); 2777 } 2778 2779 if (ISSET(flags, MPI_PG_EXTENDED)) { 2780 etype = type; 2781 type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED; 2782 } 2783 2784 cq = ccb->ccb_cmd; 2785 2786 cq->function = MPI_FUNCTION_CONFIG; 2787 cq->msg_context = htole32(ccb->ccb_id); 2788 2789 cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER; 2790 2791 cq->config_header.page_number = number; 2792 cq->config_header.page_type = type; 2793 cq->ext_page_type = etype; 2794 cq->page_address = htole32(address); 2795 cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2796 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 2797 2798 ccb->ccb_done = mpi_empty_done; 2799 if (ISSET(flags, MPI_PG_POLL)) { 2800 if (mpi_poll(sc, ccb, 50000) != 0) { 2801 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2802 DEVNAME(sc)); 2803 return (1); 2804 } 2805 } else 2806 mpi_wait(sc, ccb); 2807 2808 if (ccb->ccb_rcb == NULL) 2809 panic("%s: unable to fetch config header", DEVNAME(sc)); 2810 cp = ccb->ccb_rcb->rcb_reply; 2811 2812 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2813 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2814 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2815 "msg_flags: 0x%02x\n", DEVNAME(sc), 2816 letoh16(cp->ext_page_length), cp->ext_page_type, 2817 cp->msg_flags); 2818 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2819 letoh32(cp->msg_context)); 2820 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2821 letoh16(cp->ioc_status)); 2822 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2823 letoh32(cp->ioc_loginfo)); 2824 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2825 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2826 cp->config_header.page_version, 2827 cp->config_header.page_length, 2828 cp->config_header.page_number, 2829 cp->config_header.page_type); 2830 2831 if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2832 rv = 1; 2833 else if (ISSET(flags, MPI_PG_EXTENDED)) { 2834 bzero(ehdr, sizeof(*ehdr)); 2835 ehdr->page_version = cp->config_header.page_version; 2836 ehdr->page_number = cp->config_header.page_number; 2837 ehdr->page_type = cp->config_header.page_type; 2838 ehdr->ext_page_length = cp->ext_page_length; 2839 ehdr->ext_page_type = cp->ext_page_type; 2840 } else 2841 *hdr = cp->config_header; 2842 2843 mpi_push_reply(sc, ccb->ccb_rcb); 2844 scsi_io_put(&sc->sc_iopool, ccb); 2845 2846 return (rv); 2847 } 2848 2849 int 2850 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags, 2851 void *p, int read, void *page, size_t len) 2852 { 2853 struct mpi_ccb *ccb; 2854 struct mpi_msg_config_request *cq; 2855 struct mpi_msg_config_reply *cp; 2856 struct mpi_cfg_hdr *hdr = p; 2857 struct mpi_ecfg_hdr *ehdr = p; 2858 u_int64_t dva; 2859 char *kva; 2860 int page_length; 2861 int rv = 0; 2862 2863 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n", 2864 DEVNAME(sc), address, read, hdr->page_type); 2865 2866 page_length = ISSET(flags, MPI_PG_EXTENDED) ? 2867 letoh16(ehdr->ext_page_length) : hdr->page_length; 2868 2869 if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) || 2870 len < page_length * 4) 2871 return (1); 2872 2873 ccb = scsi_io_get(&sc->sc_iopool, 2874 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0); 2875 if (ccb == NULL) { 2876 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc)); 2877 return (1); 2878 } 2879 2880 cq = ccb->ccb_cmd; 2881 2882 cq->function = MPI_FUNCTION_CONFIG; 2883 cq->msg_context = htole32(ccb->ccb_id); 2884 2885 cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT : 2886 MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT); 2887 2888 if (ISSET(flags, MPI_PG_EXTENDED)) { 2889 cq->config_header.page_version = ehdr->page_version; 2890 cq->config_header.page_number = ehdr->page_number; 2891 cq->config_header.page_type = ehdr->page_type; 2892 cq->ext_page_len = ehdr->ext_page_length; 2893 cq->ext_page_type = ehdr->ext_page_type; 2894 } else 2895 cq->config_header = *hdr; 2896 cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK; 2897 cq->page_address = htole32(address); 2898 cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2899 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 2900 (page_length * 4) | 2901 (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT)); 2902 2903 /* bounce the page via the request space to avoid more bus_dma games */ 2904 dva = ccb->ccb_cmd_dva + sizeof(struct mpi_msg_config_request); 2905 2906 cq->page_buffer.sg_hi_addr = htole32((u_int32_t)(dva >> 32)); 2907 cq->page_buffer.sg_lo_addr = htole32((u_int32_t)dva); 2908 2909 kva = ccb->ccb_cmd; 2910 kva += sizeof(struct mpi_msg_config_request); 2911 if (!read) 2912 bcopy(page, kva, len); 2913 2914 ccb->ccb_done = mpi_empty_done; 2915 if (ISSET(flags, MPI_PG_POLL)) { 2916 if (mpi_poll(sc, ccb, 50000) != 0) { 2917 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2918 DEVNAME(sc)); 2919 return (1); 2920 } 2921 } else 2922 mpi_wait(sc, ccb); 2923 2924 if (ccb->ccb_rcb == NULL) { 2925 scsi_io_put(&sc->sc_iopool, ccb); 2926 return (1); 2927 } 2928 cp = ccb->ccb_rcb->rcb_reply; 2929 2930 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2931 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2932 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2933 "msg_flags: 0x%02x\n", DEVNAME(sc), 2934 letoh16(cp->ext_page_length), cp->ext_page_type, 2935 cp->msg_flags); 2936 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2937 letoh32(cp->msg_context)); 2938 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2939 letoh16(cp->ioc_status)); 2940 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2941 letoh32(cp->ioc_loginfo)); 2942 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2943 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2944 cp->config_header.page_version, 2945 cp->config_header.page_length, 2946 cp->config_header.page_number, 2947 cp->config_header.page_type); 2948 2949 if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2950 rv = 1; 2951 else if (read) 2952 bcopy(kva, page, len); 2953 2954 mpi_push_reply(sc, ccb->ccb_rcb); 2955 scsi_io_put(&sc->sc_iopool, ccb); 2956 2957 return (rv); 2958 } 2959 2960 int 2961 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag) 2962 { 2963 struct mpi_softc *sc = (struct mpi_softc *)link->adapter_softc; 2964 2965 DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc)); 2966 2967 switch (cmd) { 2968 case DIOCGCACHE: 2969 case DIOCSCACHE: 2970 if (ISSET(link->flags, SDEV_VIRTUAL)) { 2971 return (mpi_ioctl_cache(link, cmd, 2972 (struct dk_cache *)addr)); 2973 } 2974 break; 2975 2976 default: 2977 if (sc->sc_ioctl) 2978 return (sc->sc_ioctl(link->adapter_softc, cmd, addr)); 2979 2980 break; 2981 } 2982 2983 return (ENOTTY); 2984 } 2985 2986 int 2987 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc) 2988 { 2989 struct mpi_softc *sc = (struct mpi_softc *)link->adapter_softc; 2990 struct mpi_ccb *ccb; 2991 int len, rv; 2992 struct mpi_cfg_hdr hdr; 2993 struct mpi_cfg_raid_vol_pg0 *rpg0; 2994 int enabled; 2995 struct mpi_msg_raid_action_request *req; 2996 struct mpi_msg_raid_action_reply *rep; 2997 struct mpi_raid_settings settings; 2998 2999 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3000 link->target, MPI_PG_POLL, &hdr); 3001 if (rv != 0) 3002 return (EIO); 3003 3004 len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks * 3005 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk); 3006 rpg0 = malloc(len, M_TEMP, M_NOWAIT); 3007 if (rpg0 == NULL) 3008 return (ENOMEM); 3009 3010 if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, 3011 rpg0, len) != 0) { 3012 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n", 3013 DEVNAME(sc)); 3014 rv = EIO; 3015 goto done; 3016 } 3017 3018 enabled = ISSET(letoh16(rpg0->settings.volume_settings), 3019 MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0; 3020 3021 if (cmd == DIOCGCACHE) { 3022 dc->wrcache = enabled; 3023 dc->rdcache = 0; 3024 goto done; 3025 } /* else DIOCSCACHE */ 3026 3027 if (dc->rdcache) { 3028 rv = EOPNOTSUPP; 3029 goto done; 3030 } 3031 3032 if (((dc->wrcache) ? 1 : 0) == enabled) 3033 goto done; 3034 3035 settings = rpg0->settings; 3036 if (dc->wrcache) { 3037 SET(settings.volume_settings, 3038 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)); 3039 } else { 3040 CLR(settings.volume_settings, 3041 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)); 3042 } 3043 3044 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 3045 if (ccb == NULL) { 3046 rv = ENOMEM; 3047 goto done; 3048 } 3049 3050 req = ccb->ccb_cmd; 3051 req->function = MPI_FUNCTION_RAID_ACTION; 3052 req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS; 3053 req->vol_id = rpg0->volume_id; 3054 req->vol_bus = rpg0->volume_bus; 3055 req->msg_context = htole32(ccb->ccb_id); 3056 3057 memcpy(&req->data_word, &settings, sizeof(req->data_word)); 3058 ccb->ccb_done = mpi_empty_done; 3059 if (mpi_poll(sc, ccb, 50000) != 0) { 3060 rv = EIO; 3061 goto done; 3062 } 3063 3064 rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb; 3065 if (rep == NULL) 3066 panic("%s: raid volume settings change failed", DEVNAME(sc)); 3067 3068 switch (letoh16(rep->action_status)) { 3069 case MPI_RAID_ACTION_STATUS_OK: 3070 rv = 0; 3071 break; 3072 default: 3073 rv = EIO; 3074 break; 3075 } 3076 3077 mpi_push_reply(sc, ccb->ccb_rcb); 3078 scsi_io_put(&sc->sc_iopool, ccb); 3079 3080 done: 3081 free(rpg0, M_TEMP); 3082 return (rv); 3083 } 3084 3085 #if NBIO > 0 3086 int 3087 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id) 3088 { 3089 int len, rv = EINVAL; 3090 u_int32_t address; 3091 struct mpi_cfg_hdr hdr; 3092 struct mpi_cfg_raid_vol_pg0 *rpg0; 3093 3094 /* get IOC page 2 */ 3095 if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 3096 sc->sc_cfg_hdr.page_length * 4) != 0) { 3097 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to " 3098 "fetch IOC page 2\n", DEVNAME(sc)); 3099 goto done; 3100 } 3101 3102 /* XXX return something else than EINVAL to indicate within hs range */ 3103 if (id > sc->sc_vol_page->active_vols) { 3104 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol " 3105 "id: %d\n", DEVNAME(sc), id); 3106 goto done; 3107 } 3108 3109 /* replace current buffer with new one */ 3110 len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks * 3111 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk); 3112 rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL); 3113 if (rpg0 == NULL) { 3114 printf("%s: can't get memory for RAID page 0, " 3115 "bio disabled\n", DEVNAME(sc)); 3116 goto done; 3117 } 3118 if (sc->sc_rpg0) 3119 free(sc->sc_rpg0, M_DEVBUF); 3120 sc->sc_rpg0 = rpg0; 3121 3122 /* get raid vol page 0 */ 3123 address = sc->sc_vol_list[id].vol_id | 3124 (sc->sc_vol_list[id].vol_bus << 8); 3125 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3126 address, 0, &hdr) != 0) 3127 goto done; 3128 if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) { 3129 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n", 3130 DEVNAME(sc)); 3131 goto done; 3132 } 3133 3134 rv = 0; 3135 done: 3136 return (rv); 3137 } 3138 3139 int 3140 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr) 3141 { 3142 struct mpi_softc *sc = (struct mpi_softc *)dev; 3143 int error = 0; 3144 3145 DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc)); 3146 3147 /* make sure we have bio enabled */ 3148 if (sc->sc_ioctl != mpi_ioctl) 3149 return (EINVAL); 3150 3151 rw_enter_write(&sc->sc_lock); 3152 3153 switch (cmd) { 3154 case BIOCINQ: 3155 DNPRINTF(MPI_D_IOCTL, "inq\n"); 3156 error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr); 3157 break; 3158 3159 case BIOCVOL: 3160 DNPRINTF(MPI_D_IOCTL, "vol\n"); 3161 error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr); 3162 break; 3163 3164 case BIOCDISK: 3165 DNPRINTF(MPI_D_IOCTL, "disk\n"); 3166 error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr); 3167 break; 3168 3169 case BIOCALARM: 3170 DNPRINTF(MPI_D_IOCTL, "alarm\n"); 3171 break; 3172 3173 case BIOCBLINK: 3174 DNPRINTF(MPI_D_IOCTL, "blink\n"); 3175 break; 3176 3177 case BIOCSETSTATE: 3178 DNPRINTF(MPI_D_IOCTL, "setstate\n"); 3179 error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr); 3180 break; 3181 3182 default: 3183 DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n"); 3184 error = EINVAL; 3185 } 3186 3187 rw_exit_write(&sc->sc_lock); 3188 3189 return (error); 3190 } 3191 3192 int 3193 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi) 3194 { 3195 if (!(sc->sc_flags & MPI_F_RAID)) { 3196 bi->bi_novol = 0; 3197 bi->bi_nodisk = 0; 3198 } 3199 3200 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 3201 sc->sc_cfg_hdr.page_length * 4) != 0) { 3202 DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC " 3203 "page 2\n", DEVNAME(sc)); 3204 return (EINVAL); 3205 } 3206 3207 DNPRINTF(MPI_D_IOCTL, "%s: active_vols: %d max_vols: %d " 3208 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 3209 sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols, 3210 sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks); 3211 3212 bi->bi_novol = sc->sc_vol_page->active_vols; 3213 bi->bi_nodisk = sc->sc_vol_page->active_physdisks; 3214 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 3215 3216 return (0); 3217 } 3218 3219 int 3220 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv) 3221 { 3222 int i, vol, id, rv = EINVAL; 3223 struct device *dev; 3224 struct scsi_link *link; 3225 struct mpi_cfg_raid_vol_pg0 *rpg0; 3226 char *vendp; 3227 3228 id = bv->bv_volid; 3229 if (mpi_bio_get_pg0_raid(sc, id)) 3230 goto done; 3231 3232 if (id > sc->sc_vol_page->active_vols) 3233 return (EINVAL); /* XXX deal with hot spares */ 3234 3235 rpg0 = sc->sc_rpg0; 3236 if (rpg0 == NULL) 3237 goto done; 3238 3239 /* determine status */ 3240 switch (rpg0->volume_state) { 3241 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 3242 bv->bv_status = BIOC_SVONLINE; 3243 break; 3244 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 3245 bv->bv_status = BIOC_SVDEGRADED; 3246 break; 3247 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 3248 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 3249 bv->bv_status = BIOC_SVOFFLINE; 3250 break; 3251 default: 3252 bv->bv_status = BIOC_SVINVALID; 3253 } 3254 3255 /* override status if scrubbing or something */ 3256 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) 3257 bv->bv_status = BIOC_SVREBUILD; 3258 3259 bv->bv_size = (u_quad_t)letoh32(rpg0->max_lba) * 512; 3260 3261 switch (sc->sc_vol_list[id].vol_type) { 3262 case MPI_CFG_RAID_TYPE_RAID_IS: 3263 bv->bv_level = 0; 3264 break; 3265 case MPI_CFG_RAID_TYPE_RAID_IME: 3266 case MPI_CFG_RAID_TYPE_RAID_IM: 3267 bv->bv_level = 1; 3268 break; 3269 case MPI_CFG_RAID_TYPE_RAID_5: 3270 bv->bv_level = 5; 3271 break; 3272 case MPI_CFG_RAID_TYPE_RAID_6: 3273 bv->bv_level = 6; 3274 break; 3275 case MPI_CFG_RAID_TYPE_RAID_10: 3276 bv->bv_level = 10; 3277 break; 3278 case MPI_CFG_RAID_TYPE_RAID_50: 3279 bv->bv_level = 50; 3280 break; 3281 default: 3282 bv->bv_level = -1; 3283 } 3284 3285 bv->bv_nodisk = rpg0->num_phys_disks; 3286 3287 for (i = 0, vol = -1; i < sc->sc_buswidth; i++) { 3288 link = scsi_get_link(sc->sc_scsibus, i, 0); 3289 if (link == NULL) 3290 continue; 3291 3292 /* skip if not a virtual disk */ 3293 if (!(link->flags & SDEV_VIRTUAL)) 3294 continue; 3295 3296 vol++; 3297 /* are we it? */ 3298 if (vol == bv->bv_volid) { 3299 dev = link->device_softc; 3300 vendp = link->inqdata.vendor; 3301 memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor); 3302 bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0'; 3303 strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev); 3304 break; 3305 } 3306 } 3307 rv = 0; 3308 done: 3309 return (rv); 3310 } 3311 3312 int 3313 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd) 3314 { 3315 int pdid, id, rv = EINVAL; 3316 u_int32_t address; 3317 struct mpi_cfg_hdr hdr; 3318 struct mpi_cfg_raid_vol_pg0 *rpg0; 3319 struct mpi_cfg_raid_vol_pg0_physdisk *physdisk; 3320 struct mpi_cfg_raid_physdisk_pg0 pdpg0; 3321 3322 id = bd->bd_volid; 3323 if (mpi_bio_get_pg0_raid(sc, id)) 3324 goto done; 3325 3326 if (id > sc->sc_vol_page->active_vols) 3327 return (EINVAL); /* XXX deal with hot spares */ 3328 3329 rpg0 = sc->sc_rpg0; 3330 if (rpg0 == NULL) 3331 goto done; 3332 3333 pdid = bd->bd_diskid; 3334 if (pdid > rpg0->num_phys_disks) 3335 goto done; 3336 physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1); 3337 physdisk += pdid; 3338 3339 /* get raid phys disk page 0 */ 3340 address = physdisk->phys_disk_num; 3341 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address, 3342 &hdr) != 0) 3343 goto done; 3344 if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) { 3345 bd->bd_status = BIOC_SDFAILED; 3346 return (0); 3347 } 3348 bd->bd_channel = pdpg0.phys_disk_bus; 3349 bd->bd_target = pdpg0.phys_disk_id; 3350 bd->bd_lun = 0; 3351 bd->bd_size = (u_quad_t)letoh32(pdpg0.max_lba) * 512; 3352 strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor)); 3353 3354 switch (pdpg0.phys_disk_state) { 3355 case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE: 3356 bd->bd_status = BIOC_SDONLINE; 3357 break; 3358 case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING: 3359 case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED: 3360 bd->bd_status = BIOC_SDFAILED; 3361 break; 3362 case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL: 3363 case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER: 3364 case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE: 3365 bd->bd_status = BIOC_SDOFFLINE; 3366 break; 3367 case MPI_CFG_RAID_PHYDISK_0_STATE_INIT: 3368 bd->bd_status = BIOC_SDSCRUB; 3369 break; 3370 case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT: 3371 default: 3372 bd->bd_status = BIOC_SDINVALID; 3373 break; 3374 } 3375 3376 /* XXX figure this out */ 3377 /* bd_serial[32]; */ 3378 /* bd_procdev[16]; */ 3379 3380 rv = 0; 3381 done: 3382 return (rv); 3383 } 3384 3385 int 3386 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs) 3387 { 3388 return (ENOTTY); 3389 } 3390 3391 #ifndef SMALL_KERNEL 3392 int 3393 mpi_create_sensors(struct mpi_softc *sc) 3394 { 3395 struct device *dev; 3396 struct scsi_link *link; 3397 int i, vol; 3398 3399 /* count volumes */ 3400 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3401 link = scsi_get_link(sc->sc_scsibus, i, 0); 3402 if (link == NULL) 3403 continue; 3404 /* skip if not a virtual disk */ 3405 if (!(link->flags & SDEV_VIRTUAL)) 3406 continue; 3407 3408 vol++; 3409 } 3410 if (vol == 0) 3411 return (0); 3412 3413 sc->sc_sensors = malloc(sizeof(struct ksensor) * vol, 3414 M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); 3415 if (sc->sc_sensors == NULL) 3416 return (1); 3417 3418 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), 3419 sizeof(sc->sc_sensordev.xname)); 3420 3421 for (i = 0, vol= 0; i < sc->sc_buswidth; i++) { 3422 link = scsi_get_link(sc->sc_scsibus, i, 0); 3423 if (link == NULL) 3424 continue; 3425 /* skip if not a virtual disk */ 3426 if (!(link->flags & SDEV_VIRTUAL)) 3427 continue; 3428 3429 dev = link->device_softc; 3430 strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname, 3431 sizeof(sc->sc_sensors[vol].desc)); 3432 sc->sc_sensors[vol].type = SENSOR_DRIVE; 3433 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3434 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]); 3435 3436 vol++; 3437 } 3438 3439 if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL) 3440 goto bad; 3441 3442 sensordev_install(&sc->sc_sensordev); 3443 3444 return (0); 3445 3446 bad: 3447 free(sc->sc_sensors, M_DEVBUF); 3448 return (1); 3449 } 3450 3451 void 3452 mpi_refresh_sensors(void *arg) 3453 { 3454 int i, vol; 3455 struct scsi_link *link; 3456 struct mpi_softc *sc = arg; 3457 struct mpi_cfg_raid_vol_pg0 *rpg0; 3458 3459 rw_enter_write(&sc->sc_lock); 3460 3461 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3462 link = scsi_get_link(sc->sc_scsibus, i, 0); 3463 if (link == NULL) 3464 continue; 3465 /* skip if not a virtual disk */ 3466 if (!(link->flags & SDEV_VIRTUAL)) 3467 continue; 3468 3469 if (mpi_bio_get_pg0_raid(sc, vol)) 3470 continue; 3471 3472 rpg0 = sc->sc_rpg0; 3473 if (rpg0 == NULL) 3474 goto done; 3475 3476 /* determine status */ 3477 switch (rpg0->volume_state) { 3478 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 3479 sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE; 3480 sc->sc_sensors[vol].status = SENSOR_S_OK; 3481 break; 3482 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 3483 sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL; 3484 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3485 break; 3486 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 3487 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 3488 sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL; 3489 sc->sc_sensors[vol].status = SENSOR_S_CRIT; 3490 break; 3491 default: 3492 sc->sc_sensors[vol].value = 0; /* unknown */ 3493 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3494 } 3495 3496 /* override status if scrubbing or something */ 3497 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) { 3498 sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD; 3499 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3500 } 3501 3502 vol++; 3503 } 3504 done: 3505 rw_exit_write(&sc->sc_lock); 3506 } 3507 #endif /* SMALL_KERNEL */ 3508 #endif /* NBIO > 0 */ 3509