1 /* $OpenBSD: mpi.c,v 1.183 2013/01/18 05:49:52 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "bio.h" 21 22 #include <sys/param.h> 23 #include <sys/systm.h> 24 #include <sys/buf.h> 25 #include <sys/device.h> 26 #include <sys/ioctl.h> 27 #include <sys/proc.h> 28 #include <sys/malloc.h> 29 #include <sys/kernel.h> 30 #include <sys/mutex.h> 31 #include <sys/rwlock.h> 32 #include <sys/sensors.h> 33 #include <sys/dkio.h> 34 35 #include <machine/bus.h> 36 37 #include <scsi/scsi_all.h> 38 #include <scsi/scsiconf.h> 39 40 #include <dev/biovar.h> 41 #include <dev/ic/mpireg.h> 42 #include <dev/ic/mpivar.h> 43 44 #ifdef MPI_DEBUG 45 uint32_t mpi_debug = 0 46 /* | MPI_D_CMD */ 47 /* | MPI_D_INTR */ 48 /* | MPI_D_MISC */ 49 /* | MPI_D_DMA */ 50 /* | MPI_D_IOCTL */ 51 /* | MPI_D_RW */ 52 /* | MPI_D_MEM */ 53 /* | MPI_D_CCB */ 54 /* | MPI_D_PPR */ 55 /* | MPI_D_RAID */ 56 /* | MPI_D_EVT */ 57 ; 58 #endif 59 60 struct cfdriver mpi_cd = { 61 NULL, 62 "mpi", 63 DV_DULL 64 }; 65 66 void mpi_scsi_cmd(struct scsi_xfer *); 67 void mpi_scsi_cmd_done(struct mpi_ccb *); 68 void mpi_minphys(struct buf *bp, struct scsi_link *sl); 69 int mpi_scsi_probe(struct scsi_link *); 70 int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, 71 int); 72 73 struct scsi_adapter mpi_switch = { 74 mpi_scsi_cmd, 75 mpi_minphys, 76 mpi_scsi_probe, 77 NULL, 78 mpi_scsi_ioctl 79 }; 80 81 struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t); 82 void mpi_dmamem_free(struct mpi_softc *, 83 struct mpi_dmamem *); 84 int mpi_alloc_ccbs(struct mpi_softc *); 85 void *mpi_get_ccb(void *); 86 void mpi_put_ccb(void *, void *); 87 int mpi_alloc_replies(struct mpi_softc *); 88 void mpi_push_replies(struct mpi_softc *); 89 void mpi_push_reply(struct mpi_softc *, struct mpi_rcb *); 90 91 void mpi_start(struct mpi_softc *, struct mpi_ccb *); 92 int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int); 93 void mpi_poll_done(struct mpi_ccb *); 94 void mpi_reply(struct mpi_softc *, u_int32_t); 95 96 void mpi_wait(struct mpi_softc *sc, struct mpi_ccb *); 97 void mpi_wait_done(struct mpi_ccb *); 98 99 int mpi_cfg_spi_port(struct mpi_softc *); 100 void mpi_squash_ppr(struct mpi_softc *); 101 void mpi_run_ppr(struct mpi_softc *); 102 int mpi_ppr(struct mpi_softc *, struct scsi_link *, 103 struct mpi_cfg_raid_physdisk *, int, int, int); 104 int mpi_inq(struct mpi_softc *, u_int16_t, int); 105 106 int mpi_cfg_sas(struct mpi_softc *); 107 int mpi_cfg_fc(struct mpi_softc *); 108 109 void mpi_timeout_xs(void *); 110 int mpi_load_xs(struct mpi_ccb *); 111 112 u_int32_t mpi_read(struct mpi_softc *, bus_size_t); 113 void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t); 114 int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t, 115 u_int32_t); 116 int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t, 117 u_int32_t); 118 119 int mpi_init(struct mpi_softc *); 120 int mpi_reset_soft(struct mpi_softc *); 121 int mpi_reset_hard(struct mpi_softc *); 122 123 int mpi_handshake_send(struct mpi_softc *, void *, size_t); 124 int mpi_handshake_recv_dword(struct mpi_softc *, 125 u_int32_t *); 126 int mpi_handshake_recv(struct mpi_softc *, void *, size_t); 127 128 void mpi_empty_done(struct mpi_ccb *); 129 130 int mpi_iocinit(struct mpi_softc *); 131 int mpi_iocfacts(struct mpi_softc *); 132 int mpi_portfacts(struct mpi_softc *); 133 int mpi_portenable(struct mpi_softc *); 134 int mpi_cfg_coalescing(struct mpi_softc *); 135 void mpi_get_raid(struct mpi_softc *); 136 int mpi_fwupload(struct mpi_softc *); 137 int mpi_scsi_probe_virtual(struct scsi_link *); 138 139 int mpi_eventnotify(struct mpi_softc *); 140 void mpi_eventnotify_done(struct mpi_ccb *); 141 void mpi_eventnotify_free(struct mpi_softc *, 142 struct mpi_rcb *); 143 void mpi_eventack(void *, void *); 144 void mpi_eventack_done(struct mpi_ccb *); 145 int mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *); 146 void mpi_evt_sas_detach(void *, void *); 147 void mpi_evt_sas_detach_done(struct mpi_ccb *); 148 void mpi_evt_fc_rescan(struct mpi_softc *); 149 void mpi_fc_rescan(void *, void *); 150 151 int mpi_req_cfg_header(struct mpi_softc *, u_int8_t, 152 u_int8_t, u_int32_t, int, void *); 153 int mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int, 154 void *, int, void *, size_t); 155 156 int mpi_ioctl_cache(struct scsi_link *, u_long, 157 struct dk_cache *); 158 159 #if NBIO > 0 160 int mpi_bio_get_pg0_raid(struct mpi_softc *, int); 161 int mpi_ioctl(struct device *, u_long, caddr_t); 162 int mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *); 163 int mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *); 164 int mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *); 165 int mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *); 166 #ifndef SMALL_KERNEL 167 int mpi_create_sensors(struct mpi_softc *); 168 void mpi_refresh_sensors(void *); 169 #endif /* SMALL_KERNEL */ 170 #endif /* NBIO > 0 */ 171 172 #define DEVNAME(s) ((s)->sc_dev.dv_xname) 173 174 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t)) 175 176 #define mpi_read_db(s) mpi_read((s), MPI_DOORBELL) 177 #define mpi_write_db(s, v) mpi_write((s), MPI_DOORBELL, (v)) 178 #define mpi_read_intr(s) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \ 179 MPI_INTR_STATUS) 180 #define mpi_write_intr(s, v) mpi_write((s), MPI_INTR_STATUS, (v)) 181 #define mpi_pop_reply(s) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \ 182 MPI_REPLY_QUEUE) 183 #define mpi_push_reply_db(s, v) bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \ 184 MPI_REPLY_QUEUE, (v)) 185 186 #define mpi_wait_db_int(s) mpi_wait_ne((s), MPI_INTR_STATUS, \ 187 MPI_INTR_STATUS_DOORBELL, 0) 188 #define mpi_wait_db_ack(s) mpi_wait_eq((s), MPI_INTR_STATUS, \ 189 MPI_INTR_STATUS_IOCDOORBELL, 0) 190 191 #define MPI_PG_EXTENDED (1<<0) 192 #define MPI_PG_POLL (1<<1) 193 #define MPI_PG_FMT "\020" "\002POLL" "\001EXTENDED" 194 195 #define mpi_cfg_header(_s, _t, _n, _a, _h) \ 196 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 197 MPI_PG_POLL, (_h)) 198 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \ 199 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 200 MPI_PG_POLL|MPI_PG_EXTENDED, (_h)) 201 202 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \ 203 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \ 204 (_h), (_r), (_p), (_l)) 205 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \ 206 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \ 207 (_h), (_r), (_p), (_l)) 208 209 int 210 mpi_attach(struct mpi_softc *sc) 211 { 212 struct scsibus_attach_args saa; 213 struct mpi_ccb *ccb; 214 215 printf("\n"); 216 217 rw_init(&sc->sc_lock, "mpi_lock"); 218 mtx_init(&sc->sc_evt_rescan_mtx, IPL_BIO); 219 220 /* disable interrupts */ 221 mpi_write(sc, MPI_INTR_MASK, 222 MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL); 223 224 if (mpi_init(sc) != 0) { 225 printf("%s: unable to initialise\n", DEVNAME(sc)); 226 return (1); 227 } 228 229 if (mpi_iocfacts(sc) != 0) { 230 printf("%s: unable to get iocfacts\n", DEVNAME(sc)); 231 return (1); 232 } 233 234 if (mpi_alloc_ccbs(sc) != 0) { 235 /* error already printed */ 236 return (1); 237 } 238 239 if (mpi_alloc_replies(sc) != 0) { 240 printf("%s: unable to allocate reply space\n", DEVNAME(sc)); 241 goto free_ccbs; 242 } 243 244 if (mpi_iocinit(sc) != 0) { 245 printf("%s: unable to send iocinit\n", DEVNAME(sc)); 246 goto free_ccbs; 247 } 248 249 /* spin until we're operational */ 250 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 251 MPI_DOORBELL_STATE_OPER) != 0) { 252 printf("%s: state: 0x%08x\n", DEVNAME(sc), 253 mpi_read_db(sc) & MPI_DOORBELL_STATE); 254 printf("%s: operational state timeout\n", DEVNAME(sc)); 255 goto free_ccbs; 256 } 257 258 mpi_push_replies(sc); 259 260 if (mpi_portfacts(sc) != 0) { 261 printf("%s: unable to get portfacts\n", DEVNAME(sc)); 262 goto free_replies; 263 } 264 265 if (mpi_cfg_coalescing(sc) != 0) { 266 printf("%s: unable to configure coalescing\n", DEVNAME(sc)); 267 goto free_replies; 268 } 269 270 switch (sc->sc_porttype) { 271 case MPI_PORTFACTS_PORTTYPE_SAS: 272 SIMPLEQ_INIT(&sc->sc_evt_scan_queue); 273 mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO); 274 scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool, 275 mpi_evt_sas_detach, sc); 276 /* FALLTHROUGH */ 277 case MPI_PORTFACTS_PORTTYPE_FC: 278 if (mpi_eventnotify(sc) != 0) { 279 printf("%s: unable to enable events\n", DEVNAME(sc)); 280 goto free_replies; 281 } 282 break; 283 } 284 285 if (mpi_portenable(sc) != 0) { 286 printf("%s: unable to enable port\n", DEVNAME(sc)); 287 goto free_replies; 288 } 289 290 if (mpi_fwupload(sc) != 0) { 291 printf("%s: unable to upload firmware\n", DEVNAME(sc)); 292 goto free_replies; 293 } 294 295 switch (sc->sc_porttype) { 296 case MPI_PORTFACTS_PORTTYPE_SCSI: 297 if (mpi_cfg_spi_port(sc) != 0) { 298 printf("%s: unable to configure spi\n", DEVNAME(sc)); 299 goto free_replies; 300 } 301 mpi_squash_ppr(sc); 302 break; 303 case MPI_PORTFACTS_PORTTYPE_SAS: 304 if (mpi_cfg_sas(sc) != 0) { 305 printf("%s: unable to configure sas\n", DEVNAME(sc)); 306 goto free_replies; 307 } 308 break; 309 case MPI_PORTFACTS_PORTTYPE_FC: 310 if (mpi_cfg_fc(sc) != 0) { 311 printf("%s: unable to configure fc\n", DEVNAME(sc)); 312 goto free_replies; 313 } 314 break; 315 } 316 317 /* get raid pages */ 318 mpi_get_raid(sc); 319 #if NBIO > 0 320 if (sc->sc_flags & MPI_F_RAID) { 321 if (bio_register(&sc->sc_dev, mpi_ioctl) != 0) 322 panic("%s: controller registration failed", 323 DEVNAME(sc)); 324 else { 325 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 326 2, 0, &sc->sc_cfg_hdr) != 0) { 327 panic("%s: can't get IOC page 2 hdr", 328 DEVNAME(sc)); 329 } 330 331 sc->sc_vol_page = malloc(sc->sc_cfg_hdr.page_length * 4, 332 M_TEMP, M_WAITOK | M_CANFAIL); 333 if (sc->sc_vol_page == NULL) { 334 panic("%s: can't get memory for IOC page 2, " 335 "bio disabled", DEVNAME(sc)); 336 } 337 338 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, 339 sc->sc_vol_page, 340 sc->sc_cfg_hdr.page_length * 4) != 0) { 341 panic("%s: can't get IOC page 2", DEVNAME(sc)); 342 } 343 344 sc->sc_vol_list = (struct mpi_cfg_raid_vol *) 345 (sc->sc_vol_page + 1); 346 347 sc->sc_ioctl = mpi_ioctl; 348 } 349 } 350 #endif /* NBIO > 0 */ 351 352 /* we should be good to go now, attach scsibus */ 353 sc->sc_link.adapter = &mpi_switch; 354 sc->sc_link.adapter_softc = sc; 355 sc->sc_link.adapter_target = sc->sc_target; 356 sc->sc_link.adapter_buswidth = sc->sc_buswidth; 357 sc->sc_link.openings = MAX(sc->sc_maxcmds / sc->sc_buswidth, 16); 358 sc->sc_link.pool = &sc->sc_iopool; 359 360 bzero(&saa, sizeof(saa)); 361 saa.saa_sc_link = &sc->sc_link; 362 363 /* config_found() returns the scsibus attached to us */ 364 sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev, 365 &saa, scsiprint); 366 367 /* do domain validation */ 368 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI) 369 mpi_run_ppr(sc); 370 371 /* enable interrupts */ 372 mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL); 373 374 #if NBIO > 0 375 #ifndef SMALL_KERNEL 376 mpi_create_sensors(sc); 377 #endif /* SMALL_KERNEL */ 378 #endif /* NBIO > 0 */ 379 380 return (0); 381 382 free_replies: 383 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 384 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 385 mpi_dmamem_free(sc, sc->sc_replies); 386 free_ccbs: 387 while ((ccb = mpi_get_ccb(sc)) != NULL) 388 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 389 mpi_dmamem_free(sc, sc->sc_requests); 390 free(sc->sc_ccbs, M_DEVBUF); 391 392 return(1); 393 } 394 395 int 396 mpi_cfg_spi_port(struct mpi_softc *sc) 397 { 398 struct mpi_cfg_hdr hdr; 399 struct mpi_cfg_spi_port_pg1 port; 400 401 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0, 402 &hdr) != 0) 403 return (1); 404 405 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0) 406 return (1); 407 408 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc)); 409 DNPRINTF(MPI_D_MISC, "%s: port_scsi_id: %d port_resp_ids 0x%04x\n", 410 DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids)); 411 DNPRINTF(MPI_D_MISC, "%s: on_bus_timer_value: 0x%08x\n", DEVNAME(sc), 412 letoh32(port.port_scsi_id)); 413 DNPRINTF(MPI_D_MISC, "%s: target_config: 0x%02x id_config: 0x%04x\n", 414 DEVNAME(sc), port.target_config, letoh16(port.id_config)); 415 416 if (port.port_scsi_id == sc->sc_target && 417 port.port_resp_ids == htole16(1 << sc->sc_target) && 418 port.on_bus_timer_value != htole32(0x0)) 419 return (0); 420 421 DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc), 422 sc->sc_target); 423 port.port_scsi_id = sc->sc_target; 424 port.port_resp_ids = htole16(1 << sc->sc_target); 425 port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */ 426 427 if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) { 428 printf("%s: unable to configure port scsi id\n", DEVNAME(sc)); 429 return (1); 430 } 431 432 return (0); 433 } 434 435 void 436 mpi_squash_ppr(struct mpi_softc *sc) 437 { 438 struct mpi_cfg_hdr hdr; 439 struct mpi_cfg_spi_dev_pg1 page; 440 int i; 441 442 DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc)); 443 444 for (i = 0; i < sc->sc_buswidth; i++) { 445 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 446 1, i, &hdr) != 0) 447 return; 448 449 if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0) 450 return; 451 452 DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x " 453 "req_offset: 0x%02x req_period: 0x%02x " 454 "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i, 455 page.req_params1, page.req_offset, page.req_period, 456 page.req_params2, letoh32(page.configuration)); 457 458 page.req_params1 = 0x0; 459 page.req_offset = 0x0; 460 page.req_period = 0x0; 461 page.req_params2 = 0x0; 462 page.configuration = htole32(0x0); 463 464 if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0) 465 return; 466 } 467 } 468 469 void 470 mpi_run_ppr(struct mpi_softc *sc) 471 { 472 struct mpi_cfg_hdr hdr; 473 struct mpi_cfg_spi_port_pg0 port_pg; 474 struct mpi_cfg_ioc_pg3 *physdisk_pg; 475 struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk; 476 size_t pagelen; 477 struct scsi_link *link; 478 int i, tries; 479 480 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0, 481 &hdr) != 0) { 482 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n", 483 DEVNAME(sc)); 484 return; 485 } 486 487 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) { 488 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n", 489 DEVNAME(sc)); 490 return; 491 } 492 493 for (i = 0; i < sc->sc_buswidth; i++) { 494 link = scsi_get_link(sc->sc_scsibus, i, 0); 495 if (link == NULL) 496 continue; 497 498 /* do not ppr volumes */ 499 if (link->flags & SDEV_VIRTUAL) 500 continue; 501 502 tries = 0; 503 while (mpi_ppr(sc, link, NULL, port_pg.min_period, 504 port_pg.max_offset, tries) == EAGAIN) 505 tries++; 506 } 507 508 if ((sc->sc_flags & MPI_F_RAID) == 0) 509 return; 510 511 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0, 512 &hdr) != 0) { 513 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 514 "fetch ioc pg 3 header\n", DEVNAME(sc)); 515 return; 516 } 517 518 pagelen = hdr.page_length * 4; /* dwords to bytes */ 519 physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 520 if (physdisk_pg == NULL) { 521 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 522 "allocate ioc pg 3\n", DEVNAME(sc)); 523 return; 524 } 525 physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1); 526 527 if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) { 528 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to " 529 "fetch ioc page 3\n", DEVNAME(sc)); 530 goto out; 531 } 532 533 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc), 534 physdisk_pg->no_phys_disks); 535 536 for (i = 0; i < physdisk_pg->no_phys_disks; i++) { 537 physdisk = &physdisk_list[i]; 538 539 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d " 540 "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id, 541 physdisk->phys_disk_bus, physdisk->phys_disk_ioc, 542 physdisk->phys_disk_num); 543 544 if (physdisk->phys_disk_ioc != sc->sc_ioc_number) 545 continue; 546 547 tries = 0; 548 while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period, 549 port_pg.max_offset, tries) == EAGAIN) 550 tries++; 551 } 552 553 out: 554 free(physdisk_pg, M_TEMP); 555 } 556 557 int 558 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link, 559 struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try) 560 { 561 struct mpi_cfg_hdr hdr0, hdr1; 562 struct mpi_cfg_spi_dev_pg0 pg0; 563 struct mpi_cfg_spi_dev_pg1 pg1; 564 u_int32_t address; 565 int id; 566 int raid = 0; 567 568 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d " 569 "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try, 570 link->quirks); 571 572 if (try >= 3) 573 return (EIO); 574 575 if (physdisk == NULL) { 576 if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR) 577 return (EIO); 578 579 address = link->target; 580 id = link->target; 581 } else { 582 raid = 1; 583 address = (physdisk->phys_disk_bus << 8) | 584 (physdisk->phys_disk_id); 585 id = physdisk->phys_disk_num; 586 } 587 588 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0, 589 address, &hdr0) != 0) { 590 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n", 591 DEVNAME(sc)); 592 return (EIO); 593 } 594 595 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1, 596 address, &hdr1) != 0) { 597 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n", 598 DEVNAME(sc)); 599 return (EIO); 600 } 601 602 #ifdef MPI_DEBUG 603 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 604 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n", 605 DEVNAME(sc)); 606 return (EIO); 607 } 608 609 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 610 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 611 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 612 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 613 #endif 614 615 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 616 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n", 617 DEVNAME(sc)); 618 return (EIO); 619 } 620 621 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 622 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 623 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 624 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 625 626 pg1.req_params1 = 0; 627 pg1.req_offset = offset; 628 pg1.req_period = period; 629 pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH; 630 631 if (raid || !(link->quirks & SDEV_NOSYNC)) { 632 pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE; 633 634 switch (try) { 635 case 0: /* U320 */ 636 break; 637 case 1: /* U160 */ 638 pg1.req_period = 0x09; 639 break; 640 case 2: /* U80 */ 641 pg1.req_period = 0x0a; 642 break; 643 } 644 645 if (pg1.req_period < 0x09) { 646 /* Ultra320: enable QAS & PACKETIZED */ 647 pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS | 648 MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED; 649 } 650 if (pg1.req_period < 0xa) { 651 /* >= Ultra160: enable dual xfers */ 652 pg1.req_params1 |= 653 MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS; 654 } 655 } 656 657 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 658 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 659 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 660 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 661 662 if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) { 663 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n", 664 DEVNAME(sc)); 665 return (EIO); 666 } 667 668 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 669 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n", 670 DEVNAME(sc)); 671 return (EIO); 672 } 673 674 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 675 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 676 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 677 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 678 679 if (mpi_inq(sc, id, raid) != 0) { 680 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against " 681 "target %d\n", DEVNAME(sc), link->target); 682 return (EIO); 683 } 684 685 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 686 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after " 687 "inquiry\n", DEVNAME(sc)); 688 return (EIO); 689 } 690 691 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 692 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 693 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 694 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 695 696 if (!(letoh32(pg0.information) & 0x07) && (try == 0)) { 697 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n", 698 DEVNAME(sc)); 699 return (EAGAIN); 700 } 701 702 if ((((letoh32(pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) { 703 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n", 704 DEVNAME(sc)); 705 return (EAGAIN); 706 } 707 708 if (letoh32(pg0.information) & 0x0e) { 709 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n", 710 DEVNAME(sc), letoh32(pg0.information)); 711 return (EAGAIN); 712 } 713 714 switch(pg0.neg_period) { 715 case 0x08: 716 period = 160; 717 break; 718 case 0x09: 719 period = 80; 720 break; 721 case 0x0a: 722 period = 40; 723 break; 724 case 0x0b: 725 period = 20; 726 break; 727 case 0x0c: 728 period = 10; 729 break; 730 default: 731 period = 0; 732 break; 733 } 734 735 printf("%s: %s %d %s at %dMHz width %dbit offset %d " 736 "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target", 737 id, period ? "Sync" : "Async", period, 738 (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8, 739 pg0.neg_offset, 740 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0, 741 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0, 742 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0); 743 744 return (0); 745 } 746 747 int 748 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk) 749 { 750 struct mpi_ccb *ccb; 751 struct scsi_inquiry inq; 752 struct { 753 struct mpi_msg_scsi_io io; 754 struct mpi_sge sge; 755 struct scsi_inquiry_data inqbuf; 756 struct scsi_sense_data sense; 757 } __packed *bundle; 758 struct mpi_msg_scsi_io *io; 759 struct mpi_sge *sge; 760 u_int64_t addr; 761 762 DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc)); 763 764 bzero(&inq, sizeof(inq)); 765 inq.opcode = INQUIRY; 766 _lto2b(sizeof(struct scsi_inquiry_data), inq.length); 767 768 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 769 if (ccb == NULL) 770 return (1); 771 772 ccb->ccb_done = mpi_empty_done; 773 774 bundle = ccb->ccb_cmd; 775 io = &bundle->io; 776 sge = &bundle->sge; 777 778 io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH : 779 MPI_FUNCTION_SCSI_IO_REQUEST; 780 /* 781 * bus is always 0 782 * io->bus = htole16(sc->sc_bus); 783 */ 784 io->target_id = target; 785 786 io->cdb_length = sizeof(inq); 787 io->sense_buf_len = sizeof(struct scsi_sense_data); 788 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 789 790 io->msg_context = htole32(ccb->ccb_id); 791 792 /* 793 * always lun 0 794 * io->lun[0] = htobe16(link->lun); 795 */ 796 797 io->direction = MPI_SCSIIO_DIR_READ; 798 io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT; 799 800 bcopy(&inq, io->cdb, sizeof(inq)); 801 802 io->data_length = htole32(sizeof(struct scsi_inquiry_data)); 803 804 io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva + 805 ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle)); 806 807 sge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 | 808 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 809 (u_int32_t)sizeof(inq)); 810 811 addr = ccb->ccb_cmd_dva + 812 ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle); 813 sge->sg_addr = htole64(addr); 814 815 if (mpi_poll(sc, ccb, 5000) != 0) 816 return (1); 817 818 if (ccb->ccb_rcb != NULL) 819 mpi_push_reply(sc, ccb->ccb_rcb); 820 821 scsi_io_put(&sc->sc_iopool, ccb); 822 823 return (0); 824 } 825 826 int 827 mpi_cfg_sas(struct mpi_softc *sc) 828 { 829 struct mpi_ecfg_hdr ehdr; 830 struct mpi_cfg_sas_iou_pg1 *pg; 831 size_t pagelen; 832 int rv = 0; 833 834 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0, 835 &ehdr) != 0) 836 return (0); 837 838 pagelen = letoh16(ehdr.ext_page_length) * 4; 839 pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO); 840 if (pg == NULL) 841 return (ENOMEM); 842 843 if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0) 844 goto out; 845 846 if (pg->max_sata_q_depth != 32) { 847 pg->max_sata_q_depth = 32; 848 849 if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0) 850 goto out; 851 } 852 853 out: 854 free(pg, M_TEMP); 855 return (rv); 856 } 857 858 int 859 mpi_cfg_fc(struct mpi_softc *sc) 860 { 861 struct mpi_cfg_hdr hdr; 862 struct mpi_cfg_fc_port_pg0 pg0; 863 struct mpi_cfg_fc_port_pg1 pg1; 864 865 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0, 866 &hdr) != 0) { 867 printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc)); 868 return (1); 869 } 870 871 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) { 872 printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc)); 873 return (1); 874 } 875 876 sc->sc_link.port_wwn = letoh64(pg0.wwpn); 877 sc->sc_link.node_wwn = letoh64(pg0.wwnn); 878 879 /* configure port config more to our liking */ 880 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0, 881 &hdr) != 0) { 882 printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc)); 883 return (1); 884 } 885 886 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) { 887 printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc)); 888 return (1); 889 } 890 891 SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR | 892 MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN)); 893 894 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) { 895 printf("%s: unable to set FC port page 1\n", DEVNAME(sc)); 896 return (1); 897 } 898 899 return (0); 900 } 901 902 void 903 mpi_detach(struct mpi_softc *sc) 904 { 905 906 } 907 908 int 909 mpi_intr(void *arg) 910 { 911 struct mpi_softc *sc = arg; 912 u_int32_t reg; 913 int rv = 0; 914 915 if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0) 916 return (rv); 917 918 while ((reg = mpi_pop_reply(sc)) != 0xffffffff) { 919 mpi_reply(sc, reg); 920 rv = 1; 921 } 922 923 return (rv); 924 } 925 926 void 927 mpi_reply(struct mpi_softc *sc, u_int32_t reg) 928 { 929 struct mpi_ccb *ccb; 930 struct mpi_rcb *rcb = NULL; 931 struct mpi_msg_reply *reply = NULL; 932 u_int32_t reply_dva; 933 int id; 934 int i; 935 936 DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg); 937 938 if (reg & MPI_REPLY_QUEUE_ADDRESS) { 939 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1; 940 i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) / 941 MPI_REPLY_SIZE; 942 rcb = &sc->sc_rcbs[i]; 943 944 bus_dmamap_sync(sc->sc_dmat, 945 MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset, 946 MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 947 948 reply = rcb->rcb_reply; 949 950 id = letoh32(reply->msg_context); 951 } else { 952 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) { 953 case MPI_REPLY_QUEUE_TYPE_INIT: 954 id = reg & MPI_REPLY_QUEUE_CONTEXT; 955 break; 956 957 default: 958 panic("%s: unsupported context reply", 959 DEVNAME(sc)); 960 } 961 } 962 963 DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n", 964 DEVNAME(sc), id, reply); 965 966 ccb = &sc->sc_ccbs[id]; 967 968 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 969 ccb->ccb_offset, MPI_REQUEST_SIZE, 970 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 971 ccb->ccb_state = MPI_CCB_READY; 972 ccb->ccb_rcb = rcb; 973 974 ccb->ccb_done(ccb); 975 } 976 977 struct mpi_dmamem * 978 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size) 979 { 980 struct mpi_dmamem *mdm; 981 int nsegs; 982 983 mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO); 984 if (mdm == NULL) 985 return (NULL); 986 987 mdm->mdm_size = size; 988 989 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 990 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0) 991 goto mdmfree; 992 993 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg, 994 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 995 goto destroy; 996 997 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size, 998 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0) 999 goto free; 1000 1001 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size, 1002 NULL, BUS_DMA_NOWAIT) != 0) 1003 goto unmap; 1004 1005 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x " 1006 "map: %#x nsegs: %d segs: %#x kva: %x\n", 1007 DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva); 1008 1009 return (mdm); 1010 1011 unmap: 1012 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size); 1013 free: 1014 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1015 destroy: 1016 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1017 mdmfree: 1018 free(mdm, M_DEVBUF); 1019 1020 return (NULL); 1021 } 1022 1023 void 1024 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm) 1025 { 1026 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm); 1027 1028 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map); 1029 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size); 1030 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1031 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1032 free(mdm, M_DEVBUF); 1033 } 1034 1035 int 1036 mpi_alloc_ccbs(struct mpi_softc *sc) 1037 { 1038 struct mpi_ccb *ccb; 1039 u_int8_t *cmd; 1040 int i; 1041 1042 SLIST_INIT(&sc->sc_ccb_free); 1043 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 1044 1045 sc->sc_ccbs = malloc(sizeof(struct mpi_ccb) * sc->sc_maxcmds, 1046 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 1047 if (sc->sc_ccbs == NULL) { 1048 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 1049 return (1); 1050 } 1051 1052 sc->sc_requests = mpi_dmamem_alloc(sc, 1053 MPI_REQUEST_SIZE * sc->sc_maxcmds); 1054 if (sc->sc_requests == NULL) { 1055 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 1056 goto free_ccbs; 1057 } 1058 cmd = MPI_DMA_KVA(sc->sc_requests); 1059 bzero(cmd, MPI_REQUEST_SIZE * sc->sc_maxcmds); 1060 1061 for (i = 0; i < sc->sc_maxcmds; i++) { 1062 ccb = &sc->sc_ccbs[i]; 1063 1064 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, 1065 sc->sc_max_sgl_len, MAXPHYS, 0, 1066 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1067 &ccb->ccb_dmamap) != 0) { 1068 printf("%s: unable to create dma map\n", DEVNAME(sc)); 1069 goto free_maps; 1070 } 1071 1072 ccb->ccb_sc = sc; 1073 ccb->ccb_id = i; 1074 ccb->ccb_offset = MPI_REQUEST_SIZE * i; 1075 ccb->ccb_state = MPI_CCB_READY; 1076 1077 ccb->ccb_cmd = &cmd[ccb->ccb_offset]; 1078 ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) + 1079 ccb->ccb_offset; 1080 1081 DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x " 1082 "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n", 1083 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc, 1084 ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd, 1085 ccb->ccb_cmd_dva); 1086 1087 mpi_put_ccb(sc, ccb); 1088 } 1089 1090 scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb); 1091 1092 return (0); 1093 1094 free_maps: 1095 while ((ccb = mpi_get_ccb(sc)) != NULL) 1096 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 1097 1098 mpi_dmamem_free(sc, sc->sc_requests); 1099 free_ccbs: 1100 free(sc->sc_ccbs, M_DEVBUF); 1101 1102 return (1); 1103 } 1104 1105 void * 1106 mpi_get_ccb(void *xsc) 1107 { 1108 struct mpi_softc *sc = xsc; 1109 struct mpi_ccb *ccb; 1110 1111 mtx_enter(&sc->sc_ccb_mtx); 1112 ccb = SLIST_FIRST(&sc->sc_ccb_free); 1113 if (ccb != NULL) { 1114 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link); 1115 ccb->ccb_state = MPI_CCB_READY; 1116 } 1117 mtx_leave(&sc->sc_ccb_mtx); 1118 1119 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb); 1120 1121 return (ccb); 1122 } 1123 1124 void 1125 mpi_put_ccb(void *xsc, void *io) 1126 { 1127 struct mpi_softc *sc = xsc; 1128 struct mpi_ccb *ccb = io; 1129 1130 DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb); 1131 1132 #ifdef DIAGNOSTIC 1133 if (ccb->ccb_state == MPI_CCB_FREE) 1134 panic("mpi_put_ccb: double free"); 1135 #endif 1136 1137 ccb->ccb_state = MPI_CCB_FREE; 1138 ccb->ccb_cookie = NULL; 1139 ccb->ccb_done = NULL; 1140 bzero(ccb->ccb_cmd, MPI_REQUEST_SIZE); 1141 mtx_enter(&sc->sc_ccb_mtx); 1142 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link); 1143 mtx_leave(&sc->sc_ccb_mtx); 1144 } 1145 1146 int 1147 mpi_alloc_replies(struct mpi_softc *sc) 1148 { 1149 DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc)); 1150 1151 sc->sc_rcbs = malloc(sc->sc_repq * sizeof(struct mpi_rcb), M_DEVBUF, 1152 M_WAITOK|M_CANFAIL); 1153 if (sc->sc_rcbs == NULL) 1154 return (1); 1155 1156 sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE); 1157 if (sc->sc_replies == NULL) { 1158 free(sc->sc_rcbs, M_DEVBUF); 1159 return (1); 1160 } 1161 1162 return (0); 1163 } 1164 1165 void 1166 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb) 1167 { 1168 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 1169 rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1170 mpi_push_reply_db(sc, rcb->rcb_reply_dva); 1171 } 1172 1173 void 1174 mpi_push_replies(struct mpi_softc *sc) 1175 { 1176 struct mpi_rcb *rcb; 1177 char *kva = MPI_DMA_KVA(sc->sc_replies); 1178 int i; 1179 1180 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 1181 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1182 1183 for (i = 0; i < sc->sc_repq; i++) { 1184 rcb = &sc->sc_rcbs[i]; 1185 1186 rcb->rcb_reply = kva + MPI_REPLY_SIZE * i; 1187 rcb->rcb_offset = MPI_REPLY_SIZE * i; 1188 rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) + 1189 MPI_REPLY_SIZE * i; 1190 mpi_push_reply_db(sc, rcb->rcb_reply_dva); 1191 } 1192 } 1193 1194 void 1195 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb) 1196 { 1197 DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc), 1198 ccb->ccb_cmd_dva); 1199 1200 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 1201 ccb->ccb_offset, MPI_REQUEST_SIZE, 1202 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1203 1204 ccb->ccb_state = MPI_CCB_QUEUED; 1205 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1206 MPI_REQ_QUEUE, ccb->ccb_cmd_dva); 1207 } 1208 1209 int 1210 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout) 1211 { 1212 void (*done)(struct mpi_ccb *); 1213 void *cookie; 1214 int rv = 1; 1215 u_int32_t reg; 1216 1217 DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc), 1218 timeout); 1219 1220 done = ccb->ccb_done; 1221 cookie = ccb->ccb_cookie; 1222 1223 ccb->ccb_done = mpi_poll_done; 1224 ccb->ccb_cookie = &rv; 1225 1226 mpi_start(sc, ccb); 1227 while (rv == 1) { 1228 reg = mpi_pop_reply(sc); 1229 if (reg == 0xffffffff) { 1230 if (timeout-- == 0) { 1231 printf("%s: timeout\n", DEVNAME(sc)); 1232 goto timeout; 1233 } 1234 1235 delay(1000); 1236 continue; 1237 } 1238 1239 mpi_reply(sc, reg); 1240 } 1241 1242 ccb->ccb_cookie = cookie; 1243 done(ccb); 1244 1245 timeout: 1246 return (rv); 1247 } 1248 1249 void 1250 mpi_poll_done(struct mpi_ccb *ccb) 1251 { 1252 int *rv = ccb->ccb_cookie; 1253 1254 *rv = 0; 1255 } 1256 1257 void 1258 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb) 1259 { 1260 struct mutex cookie = MUTEX_INITIALIZER(IPL_BIO); 1261 void (*done)(struct mpi_ccb *); 1262 1263 done = ccb->ccb_done; 1264 ccb->ccb_done = mpi_wait_done; 1265 ccb->ccb_cookie = &cookie; 1266 1267 /* XXX this will wait forever for the ccb to complete */ 1268 1269 mpi_start(sc, ccb); 1270 1271 mtx_enter(&cookie); 1272 while (ccb->ccb_cookie != NULL) 1273 msleep(ccb, &cookie, PRIBIO, "mpiwait", 0); 1274 mtx_leave(&cookie); 1275 1276 done(ccb); 1277 } 1278 1279 void 1280 mpi_wait_done(struct mpi_ccb *ccb) 1281 { 1282 struct mutex *cookie = ccb->ccb_cookie; 1283 1284 mtx_enter(cookie); 1285 ccb->ccb_cookie = NULL; 1286 wakeup_one(ccb); 1287 mtx_leave(cookie); 1288 } 1289 1290 void 1291 mpi_scsi_cmd(struct scsi_xfer *xs) 1292 { 1293 struct scsi_link *link = xs->sc_link; 1294 struct mpi_softc *sc = link->adapter_softc; 1295 struct mpi_ccb *ccb; 1296 struct mpi_ccb_bundle *mcb; 1297 struct mpi_msg_scsi_io *io; 1298 1299 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc)); 1300 1301 if (xs->cmdlen > MPI_CDB_LEN) { 1302 DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n", 1303 DEVNAME(sc), xs->cmdlen); 1304 bzero(&xs->sense, sizeof(xs->sense)); 1305 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT; 1306 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1307 xs->sense.add_sense_code = 0x20; 1308 xs->error = XS_SENSE; 1309 scsi_done(xs); 1310 return; 1311 } 1312 1313 ccb = xs->io; 1314 1315 DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n", 1316 DEVNAME(sc), ccb->ccb_id, xs->flags); 1317 1318 ccb->ccb_cookie = xs; 1319 ccb->ccb_done = mpi_scsi_cmd_done; 1320 1321 mcb = ccb->ccb_cmd; 1322 io = &mcb->mcb_io; 1323 1324 io->function = MPI_FUNCTION_SCSI_IO_REQUEST; 1325 /* 1326 * bus is always 0 1327 * io->bus = htole16(sc->sc_bus); 1328 */ 1329 io->target_id = link->target; 1330 1331 io->cdb_length = xs->cmdlen; 1332 io->sense_buf_len = sizeof(xs->sense); 1333 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 1334 1335 io->msg_context = htole32(ccb->ccb_id); 1336 1337 io->lun[0] = htobe16(link->lun); 1338 1339 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1340 case SCSI_DATA_IN: 1341 io->direction = MPI_SCSIIO_DIR_READ; 1342 break; 1343 case SCSI_DATA_OUT: 1344 io->direction = MPI_SCSIIO_DIR_WRITE; 1345 break; 1346 default: 1347 io->direction = MPI_SCSIIO_DIR_NONE; 1348 break; 1349 } 1350 1351 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI && 1352 (link->quirks & SDEV_NOTAGS)) 1353 io->tagging = MPI_SCSIIO_ATTR_UNTAGGED; 1354 else 1355 io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q; 1356 1357 bcopy(xs->cmd, io->cdb, xs->cmdlen); 1358 1359 io->data_length = htole32(xs->datalen); 1360 1361 io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva + 1362 ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb)); 1363 1364 if (mpi_load_xs(ccb) != 0) { 1365 xs->error = XS_DRIVER_STUFFUP; 1366 scsi_done(xs); 1367 return; 1368 } 1369 1370 timeout_set(&xs->stimeout, mpi_timeout_xs, ccb); 1371 1372 if (xs->flags & SCSI_POLL) { 1373 if (mpi_poll(sc, ccb, xs->timeout) != 0) { 1374 xs->error = XS_DRIVER_STUFFUP; 1375 scsi_done(xs); 1376 } 1377 return; 1378 } 1379 1380 mpi_start(sc, ccb); 1381 } 1382 1383 void 1384 mpi_scsi_cmd_done(struct mpi_ccb *ccb) 1385 { 1386 struct mpi_softc *sc = ccb->ccb_sc; 1387 struct scsi_xfer *xs = ccb->ccb_cookie; 1388 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1389 bus_dmamap_t dmap = ccb->ccb_dmamap; 1390 struct mpi_msg_scsi_io_error *sie; 1391 1392 if (xs->datalen != 0) { 1393 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1394 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1395 BUS_DMASYNC_POSTWRITE); 1396 1397 bus_dmamap_unload(sc->sc_dmat, dmap); 1398 } 1399 1400 /* timeout_del */ 1401 xs->error = XS_NOERROR; 1402 xs->resid = 0; 1403 1404 if (ccb->ccb_rcb == NULL) { 1405 /* no scsi error, we're ok so drop out early */ 1406 xs->status = SCSI_OK; 1407 scsi_done(xs); 1408 return; 1409 } 1410 1411 sie = ccb->ccb_rcb->rcb_reply; 1412 1413 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d " 1414 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen, 1415 xs->flags); 1416 DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d " 1417 "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus, 1418 sie->msg_length, sie->function); 1419 DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d " 1420 "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length, 1421 sie->sense_buf_len, sie->msg_flags); 1422 DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 1423 letoh32(sie->msg_context)); 1424 DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x " 1425 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status, 1426 sie->scsi_state, letoh16(sie->ioc_status)); 1427 DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1428 letoh32(sie->ioc_loginfo)); 1429 DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc), 1430 letoh32(sie->transfer_count)); 1431 DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc), 1432 letoh32(sie->sense_count)); 1433 DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc), 1434 letoh32(sie->response_info)); 1435 DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc), 1436 letoh16(sie->tag)); 1437 1438 xs->status = sie->scsi_status; 1439 switch (letoh16(sie->ioc_status)) { 1440 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 1441 xs->resid = xs->datalen - letoh32(sie->transfer_count); 1442 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) { 1443 xs->error = XS_DRIVER_STUFFUP; 1444 break; 1445 } 1446 /* FALLTHROUGH */ 1447 case MPI_IOCSTATUS_SUCCESS: 1448 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 1449 switch (xs->status) { 1450 case SCSI_OK: 1451 xs->resid = 0; 1452 break; 1453 1454 case SCSI_CHECK: 1455 xs->error = XS_SENSE; 1456 break; 1457 1458 case SCSI_BUSY: 1459 case SCSI_QUEUE_FULL: 1460 xs->error = XS_BUSY; 1461 break; 1462 1463 default: 1464 xs->error = XS_DRIVER_STUFFUP; 1465 break; 1466 } 1467 break; 1468 1469 case MPI_IOCSTATUS_BUSY: 1470 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 1471 xs->error = XS_BUSY; 1472 break; 1473 1474 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 1475 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 1476 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1477 xs->error = XS_SELTIMEOUT; 1478 break; 1479 1480 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 1481 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 1482 xs->error = XS_RESET; 1483 break; 1484 1485 default: 1486 xs->error = XS_DRIVER_STUFFUP; 1487 break; 1488 } 1489 1490 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID) 1491 bcopy(&mcb->mcb_sense, &xs->sense, sizeof(xs->sense)); 1492 1493 DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc), 1494 xs->error, xs->status); 1495 1496 mpi_push_reply(sc, ccb->ccb_rcb); 1497 scsi_done(xs); 1498 } 1499 1500 void 1501 mpi_timeout_xs(void *arg) 1502 { 1503 /* XXX */ 1504 } 1505 1506 int 1507 mpi_load_xs(struct mpi_ccb *ccb) 1508 { 1509 struct mpi_softc *sc = ccb->ccb_sc; 1510 struct scsi_xfer *xs = ccb->ccb_cookie; 1511 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1512 struct mpi_msg_scsi_io *io = &mcb->mcb_io; 1513 struct mpi_sge *sge, *nsge = &mcb->mcb_sgl[0]; 1514 struct mpi_sge *ce = NULL, *nce; 1515 u_int64_t ce_dva; 1516 bus_dmamap_t dmap = ccb->ccb_dmamap; 1517 u_int32_t addr, flags; 1518 int i, error; 1519 1520 if (xs->datalen == 0) { 1521 nsge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 1522 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 1523 return (0); 1524 } 1525 1526 error = bus_dmamap_load(sc->sc_dmat, dmap, 1527 xs->data, xs->datalen, NULL, BUS_DMA_STREAMING | 1528 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 1529 if (error) { 1530 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 1531 return (1); 1532 } 1533 1534 flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64; 1535 if (xs->flags & SCSI_DATA_OUT) 1536 flags |= MPI_SGE_FL_DIR_OUT; 1537 1538 if (dmap->dm_nsegs > sc->sc_first_sgl_len) { 1539 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1]; 1540 io->chain_offset = ((u_int8_t *)ce - (u_int8_t *)io) / 4; 1541 } 1542 1543 for (i = 0; i < dmap->dm_nsegs; i++) { 1544 1545 if (nsge == ce) { 1546 nsge++; 1547 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST); 1548 1549 DNPRINTF(MPI_D_DMA, "%s: - 0x%08x 0x%016llx\n", 1550 DEVNAME(sc), sge->sg_hdr, 1551 sge->sg_addr); 1552 1553 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) { 1554 nce = &nsge[sc->sc_chain_len - 1]; 1555 addr = ((u_int8_t *)nce - (u_int8_t *)nsge) / 4; 1556 addr = addr << 16 | 1557 sizeof(struct mpi_sge) * sc->sc_chain_len; 1558 } else { 1559 nce = NULL; 1560 addr = sizeof(struct mpi_sge) * 1561 (dmap->dm_nsegs - i); 1562 } 1563 1564 ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN | 1565 MPI_SGE_FL_SIZE_64 | addr); 1566 1567 ce_dva = ccb->ccb_cmd_dva + 1568 ((u_int8_t *)nsge - (u_int8_t *)mcb); 1569 1570 ce->sg_addr = htole64(ce_dva); 1571 1572 DNPRINTF(MPI_D_DMA, "%s: ce: 0x%08x 0x%016llx\n", 1573 DEVNAME(sc), ce->sg_hdr, ce->sg_addr); 1574 1575 ce = nce; 1576 } 1577 1578 DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc), 1579 i, dmap->dm_segs[i].ds_len, 1580 (u_int64_t)dmap->dm_segs[i].ds_addr); 1581 1582 sge = nsge; 1583 1584 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len); 1585 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr); 1586 1587 DNPRINTF(MPI_D_DMA, "%s: %d: 0x%08x 0x%016llx\n", 1588 DEVNAME(sc), i, sge->sg_hdr, sge->sg_addr); 1589 1590 nsge = sge + 1; 1591 } 1592 1593 /* terminate list */ 1594 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 1595 MPI_SGE_FL_EOL); 1596 1597 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1598 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 1599 BUS_DMASYNC_PREWRITE); 1600 1601 return (0); 1602 } 1603 1604 void 1605 mpi_minphys(struct buf *bp, struct scsi_link *sl) 1606 { 1607 /* XXX */ 1608 if (bp->b_bcount > MAXPHYS) 1609 bp->b_bcount = MAXPHYS; 1610 minphys(bp); 1611 } 1612 1613 int 1614 mpi_scsi_probe_virtual(struct scsi_link *link) 1615 { 1616 struct mpi_softc *sc = link->adapter_softc; 1617 struct mpi_cfg_hdr hdr; 1618 struct mpi_cfg_raid_vol_pg0 *rp0; 1619 int len; 1620 int rv; 1621 1622 if (!ISSET(sc->sc_flags, MPI_F_RAID)) 1623 return (0); 1624 1625 if (link->lun > 0) 1626 return (0); 1627 1628 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 1629 0, link->target, MPI_PG_POLL, &hdr); 1630 if (rv != 0) 1631 return (0); 1632 1633 len = hdr.page_length * 4; 1634 rp0 = malloc(len, M_TEMP, M_NOWAIT); 1635 if (rp0 == NULL) 1636 return (ENOMEM); 1637 1638 rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len); 1639 if (rv == 0) 1640 SET(link->flags, SDEV_VIRTUAL); 1641 1642 free(rp0, M_TEMP); 1643 return (0); 1644 } 1645 1646 int 1647 mpi_scsi_probe(struct scsi_link *link) 1648 { 1649 struct mpi_softc *sc = link->adapter_softc; 1650 struct mpi_ecfg_hdr ehdr; 1651 struct mpi_cfg_sas_dev_pg0 pg0; 1652 u_int32_t address; 1653 int rv; 1654 1655 rv = mpi_scsi_probe_virtual(link); 1656 if (rv != 0) 1657 return (rv); 1658 1659 if (ISSET(link->flags, SDEV_VIRTUAL)) 1660 return (0); 1661 1662 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS) 1663 return (0); 1664 1665 address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target; 1666 1667 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0, 1668 address, &ehdr) != 0) 1669 return (EIO); 1670 1671 if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0) 1672 return (0); 1673 1674 DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n", 1675 DEVNAME(sc), link->target); 1676 DNPRINTF(MPI_D_MISC, "%s: slot: 0x%04x enc_handle: 0x%04x\n", 1677 DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle)); 1678 DNPRINTF(MPI_D_MISC, "%s: sas_addr: 0x%016llx\n", DEVNAME(sc), 1679 letoh64(pg0.sas_addr)); 1680 DNPRINTF(MPI_D_MISC, "%s: parent_dev_handle: 0x%04x phy_num: 0x%02x " 1681 "access_status: 0x%02x\n", DEVNAME(sc), 1682 letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status); 1683 DNPRINTF(MPI_D_MISC, "%s: dev_handle: 0x%04x " 1684 "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc), 1685 letoh16(pg0.dev_handle), pg0.bus, pg0.target); 1686 DNPRINTF(MPI_D_MISC, "%s: device_info: 0x%08x\n", DEVNAME(sc), 1687 letoh32(pg0.device_info)); 1688 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%04x physical_port: 0x%02x\n", 1689 DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port); 1690 1691 if (ISSET(letoh32(pg0.device_info), 1692 MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) { 1693 DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n", 1694 DEVNAME(sc), link->target); 1695 link->flags |= SDEV_ATAPI; 1696 link->quirks |= SDEV_ONLYBIG; 1697 } 1698 1699 return (0); 1700 } 1701 1702 u_int32_t 1703 mpi_read(struct mpi_softc *sc, bus_size_t r) 1704 { 1705 u_int32_t rv; 1706 1707 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1708 BUS_SPACE_BARRIER_READ); 1709 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 1710 1711 DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv); 1712 1713 return (rv); 1714 } 1715 1716 void 1717 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v) 1718 { 1719 DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v); 1720 1721 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 1722 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1723 BUS_SPACE_BARRIER_WRITE); 1724 } 1725 1726 int 1727 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1728 u_int32_t target) 1729 { 1730 int i; 1731 1732 DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r, 1733 mask, target); 1734 1735 for (i = 0; i < 10000; i++) { 1736 if ((mpi_read(sc, r) & mask) == target) 1737 return (0); 1738 delay(1000); 1739 } 1740 1741 return (1); 1742 } 1743 1744 int 1745 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1746 u_int32_t target) 1747 { 1748 int i; 1749 1750 DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r, 1751 mask, target); 1752 1753 for (i = 0; i < 10000; i++) { 1754 if ((mpi_read(sc, r) & mask) != target) 1755 return (0); 1756 delay(1000); 1757 } 1758 1759 return (1); 1760 } 1761 1762 int 1763 mpi_init(struct mpi_softc *sc) 1764 { 1765 u_int32_t db; 1766 int i; 1767 1768 /* spin until the IOC leaves the RESET state */ 1769 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1770 MPI_DOORBELL_STATE_RESET) != 0) { 1771 DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave " 1772 "reset state\n", DEVNAME(sc)); 1773 return (1); 1774 } 1775 1776 /* check current ownership */ 1777 db = mpi_read_db(sc); 1778 if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) { 1779 DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n", 1780 DEVNAME(sc)); 1781 return (0); 1782 } 1783 1784 for (i = 0; i < 5; i++) { 1785 switch (db & MPI_DOORBELL_STATE) { 1786 case MPI_DOORBELL_STATE_READY: 1787 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n", 1788 DEVNAME(sc)); 1789 return (0); 1790 1791 case MPI_DOORBELL_STATE_OPER: 1792 case MPI_DOORBELL_STATE_FAULT: 1793 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being " 1794 "reset\n" , DEVNAME(sc)); 1795 if (mpi_reset_soft(sc) != 0) 1796 mpi_reset_hard(sc); 1797 break; 1798 1799 case MPI_DOORBELL_STATE_RESET: 1800 DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come " 1801 "out of reset\n", DEVNAME(sc)); 1802 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1803 MPI_DOORBELL_STATE_RESET) != 0) 1804 return (1); 1805 break; 1806 } 1807 db = mpi_read_db(sc); 1808 } 1809 1810 return (1); 1811 } 1812 1813 int 1814 mpi_reset_soft(struct mpi_softc *sc) 1815 { 1816 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc)); 1817 1818 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1819 return (1); 1820 1821 mpi_write_db(sc, 1822 MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET)); 1823 if (mpi_wait_eq(sc, MPI_INTR_STATUS, 1824 MPI_INTR_STATUS_IOCDOORBELL, 0) != 0) 1825 return (1); 1826 1827 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1828 MPI_DOORBELL_STATE_READY) != 0) 1829 return (1); 1830 1831 return (0); 1832 } 1833 1834 int 1835 mpi_reset_hard(struct mpi_softc *sc) 1836 { 1837 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc)); 1838 1839 /* enable diagnostic register */ 1840 mpi_write(sc, MPI_WRITESEQ, 0xff); 1841 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1); 1842 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2); 1843 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3); 1844 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4); 1845 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5); 1846 1847 /* reset ioc */ 1848 mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER); 1849 1850 delay(10000); 1851 1852 /* disable diagnostic register */ 1853 mpi_write(sc, MPI_WRITESEQ, 0xff); 1854 1855 /* restore pci bits? */ 1856 1857 /* firmware bits? */ 1858 return (0); 1859 } 1860 1861 int 1862 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords) 1863 { 1864 u_int32_t *query = buf; 1865 int i; 1866 1867 /* make sure the doorbell is not in use. */ 1868 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1869 return (1); 1870 1871 /* clear pending doorbell interrupts */ 1872 if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL) 1873 mpi_write_intr(sc, 0); 1874 1875 /* 1876 * first write the doorbell with the handshake function and the 1877 * dword count. 1878 */ 1879 mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) | 1880 MPI_DOORBELL_DWORDS(dwords)); 1881 1882 /* 1883 * the doorbell used bit will be set because a doorbell function has 1884 * started. Wait for the interrupt and then ack it. 1885 */ 1886 if (mpi_wait_db_int(sc) != 0) 1887 return (1); 1888 mpi_write_intr(sc, 0); 1889 1890 /* poll for the acknowledgement. */ 1891 if (mpi_wait_db_ack(sc) != 0) 1892 return (1); 1893 1894 /* write the query through the doorbell. */ 1895 for (i = 0; i < dwords; i++) { 1896 mpi_write_db(sc, htole32(query[i])); 1897 if (mpi_wait_db_ack(sc) != 0) 1898 return (1); 1899 } 1900 1901 return (0); 1902 } 1903 1904 int 1905 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword) 1906 { 1907 u_int16_t *words = (u_int16_t *)dword; 1908 int i; 1909 1910 for (i = 0; i < 2; i++) { 1911 if (mpi_wait_db_int(sc) != 0) 1912 return (1); 1913 words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK); 1914 mpi_write_intr(sc, 0); 1915 } 1916 1917 return (0); 1918 } 1919 1920 int 1921 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords) 1922 { 1923 struct mpi_msg_reply *reply = buf; 1924 u_int32_t *dbuf = buf, dummy; 1925 int i; 1926 1927 /* get the first dword so we can read the length out of the header. */ 1928 if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0) 1929 return (1); 1930 1931 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n", 1932 DEVNAME(sc), dwords, reply->msg_length); 1933 1934 /* 1935 * the total length, in dwords, is in the message length field of the 1936 * reply header. 1937 */ 1938 for (i = 1; i < MIN(dwords, reply->msg_length); i++) { 1939 if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0) 1940 return (1); 1941 } 1942 1943 /* if there's extra stuff to come off the ioc, discard it */ 1944 while (i++ < reply->msg_length) { 1945 if (mpi_handshake_recv_dword(sc, &dummy) != 0) 1946 return (1); 1947 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: " 1948 "0x%08x\n", DEVNAME(sc), dummy); 1949 } 1950 1951 /* wait for the doorbell used bit to be reset and clear the intr */ 1952 if (mpi_wait_db_int(sc) != 0) 1953 return (1); 1954 mpi_write_intr(sc, 0); 1955 1956 return (0); 1957 } 1958 1959 void 1960 mpi_empty_done(struct mpi_ccb *ccb) 1961 { 1962 /* nothing to do */ 1963 } 1964 1965 int 1966 mpi_iocfacts(struct mpi_softc *sc) 1967 { 1968 struct mpi_msg_iocfacts_request ifq; 1969 struct mpi_msg_iocfacts_reply ifp; 1970 1971 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc)); 1972 1973 bzero(&ifq, sizeof(ifq)); 1974 bzero(&ifp, sizeof(ifp)); 1975 1976 ifq.function = MPI_FUNCTION_IOC_FACTS; 1977 ifq.chain_offset = 0; 1978 ifq.msg_flags = 0; 1979 ifq.msg_context = htole32(0xdeadbeef); 1980 1981 if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 1982 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n", 1983 DEVNAME(sc)); 1984 return (1); 1985 } 1986 1987 if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 1988 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n", 1989 DEVNAME(sc)); 1990 return (1); 1991 } 1992 1993 DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n", 1994 DEVNAME(sc), ifp.function, ifp.msg_length, 1995 ifp.msg_version_maj, ifp.msg_version_min); 1996 DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x " 1997 "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags, 1998 ifp.ioc_number, ifp.header_version_maj, 1999 ifp.header_version_min); 2000 DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc), 2001 letoh32(ifp.msg_context)); 2002 DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n", 2003 DEVNAME(sc), letoh16(ifp.ioc_status), 2004 letoh16(ifp.ioc_exceptions)); 2005 DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc), 2006 letoh32(ifp.ioc_loginfo)); 2007 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x " 2008 "maxchdepth: %d\n", DEVNAME(sc), ifp.flags, 2009 ifp.block_size, ifp.whoinit, ifp.max_chain_depth); 2010 DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n", 2011 DEVNAME(sc), letoh16(ifp.request_frame_size), 2012 letoh16(ifp.reply_queue_depth)); 2013 DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc), 2014 letoh16(ifp.product_id)); 2015 DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc), 2016 letoh32(ifp.current_host_mfa_hi_addr)); 2017 DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d " 2018 "global_credits: %d\n", 2019 DEVNAME(sc), ifp.event_state, ifp.number_of_ports, 2020 letoh16(ifp.global_credits)); 2021 DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc), 2022 letoh32(ifp.current_sense_buffer_hi_addr)); 2023 DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n", 2024 DEVNAME(sc), ifp.max_buses, ifp.max_devices, 2025 letoh16(ifp.current_reply_frame_size)); 2026 DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc), 2027 letoh32(ifp.fw_image_size)); 2028 DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc), 2029 letoh32(ifp.ioc_capabilities)); 2030 DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x " 2031 "fw_version_dev: 0x%02x\n", DEVNAME(sc), 2032 ifp.fw_version_maj, ifp.fw_version_min, 2033 ifp.fw_version_unit, ifp.fw_version_dev); 2034 DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n", 2035 DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth)); 2036 DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x " 2037 "addr 0x%016llx\n", DEVNAME(sc), 2038 letoh32(ifp.host_page_buffer_sge.sg_hdr), 2039 letoh64(ifp.host_page_buffer_sge.sg_addr)); 2040 sc->sc_maxcmds = letoh16(ifp.global_credits); 2041 sc->sc_maxchdepth = ifp.max_chain_depth; 2042 sc->sc_ioc_number = ifp.ioc_number; 2043 if (sc->sc_flags & MPI_F_SPI) 2044 sc->sc_buswidth = 16; 2045 else 2046 sc->sc_buswidth = 2047 (ifp.max_devices == 0) ? 256 : ifp.max_devices; 2048 if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) 2049 sc->sc_fw_len = letoh32(ifp.fw_image_size); 2050 2051 sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, letoh16(ifp.reply_queue_depth)); 2052 2053 /* 2054 * you can fit sg elements on the end of the io cmd if they fit in the 2055 * request frame size. 2056 */ 2057 sc->sc_first_sgl_len = ((letoh16(ifp.request_frame_size) * 4) - 2058 sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge); 2059 DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc), 2060 sc->sc_first_sgl_len); 2061 2062 sc->sc_chain_len = (letoh16(ifp.request_frame_size) * 4) / 2063 sizeof(struct mpi_sge); 2064 DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc), 2065 sc->sc_chain_len); 2066 2067 /* the sgl tailing the io cmd loses an entry to the chain element. */ 2068 sc->sc_max_sgl_len = MPI_MAX_SGL - 1; 2069 /* the sgl chains lose an entry for each chain element */ 2070 sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) / 2071 sc->sc_chain_len; 2072 DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc), 2073 sc->sc_max_sgl_len); 2074 2075 /* XXX we're ignoring the max chain depth */ 2076 2077 return (0); 2078 } 2079 2080 int 2081 mpi_iocinit(struct mpi_softc *sc) 2082 { 2083 struct mpi_msg_iocinit_request iiq; 2084 struct mpi_msg_iocinit_reply iip; 2085 u_int32_t hi_addr; 2086 2087 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc)); 2088 2089 bzero(&iiq, sizeof(iiq)); 2090 bzero(&iip, sizeof(iip)); 2091 2092 iiq.function = MPI_FUNCTION_IOC_INIT; 2093 iiq.whoinit = MPI_WHOINIT_HOST_DRIVER; 2094 2095 iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth; 2096 iiq.max_buses = 1; 2097 2098 iiq.msg_context = htole32(0xd00fd00f); 2099 2100 iiq.reply_frame_size = htole16(MPI_REPLY_SIZE); 2101 2102 hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests) >> 32); 2103 iiq.host_mfa_hi_addr = htole32(hi_addr); 2104 iiq.sense_buffer_hi_addr = htole32(hi_addr); 2105 2106 iiq.msg_version_maj = 0x01; 2107 iiq.msg_version_min = 0x02; 2108 2109 iiq.hdr_version_unit = 0x0d; 2110 iiq.hdr_version_dev = 0x00; 2111 2112 if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) { 2113 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n", 2114 DEVNAME(sc)); 2115 return (1); 2116 } 2117 2118 if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) { 2119 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n", 2120 DEVNAME(sc)); 2121 return (1); 2122 } 2123 2124 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d " 2125 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function, 2126 iip.msg_length, iip.whoinit); 2127 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d " 2128 "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags, 2129 iip.max_buses, iip.max_devices, iip.flags); 2130 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2131 letoh32(iip.msg_context)); 2132 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2133 letoh16(iip.ioc_status)); 2134 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2135 letoh32(iip.ioc_loginfo)); 2136 2137 return (0); 2138 } 2139 2140 int 2141 mpi_portfacts(struct mpi_softc *sc) 2142 { 2143 struct mpi_ccb *ccb; 2144 struct mpi_msg_portfacts_request *pfq; 2145 volatile struct mpi_msg_portfacts_reply *pfp; 2146 int rv = 1; 2147 2148 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc)); 2149 2150 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2151 if (ccb == NULL) { 2152 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n", 2153 DEVNAME(sc)); 2154 return (rv); 2155 } 2156 2157 ccb->ccb_done = mpi_empty_done; 2158 pfq = ccb->ccb_cmd; 2159 2160 pfq->function = MPI_FUNCTION_PORT_FACTS; 2161 pfq->chain_offset = 0; 2162 pfq->msg_flags = 0; 2163 pfq->port_number = 0; 2164 pfq->msg_context = htole32(ccb->ccb_id); 2165 2166 if (mpi_poll(sc, ccb, 50000) != 0) { 2167 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc)); 2168 goto err; 2169 } 2170 2171 if (ccb->ccb_rcb == NULL) { 2172 DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n", 2173 DEVNAME(sc)); 2174 goto err; 2175 } 2176 pfp = ccb->ccb_rcb->rcb_reply; 2177 2178 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n", 2179 DEVNAME(sc), pfp->function, pfp->msg_length); 2180 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n", 2181 DEVNAME(sc), pfp->msg_flags, pfp->port_number); 2182 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2183 letoh32(pfp->msg_context)); 2184 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2185 letoh16(pfp->ioc_status)); 2186 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2187 letoh32(pfp->ioc_loginfo)); 2188 DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n", 2189 DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type); 2190 DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n", 2191 DEVNAME(sc), letoh16(pfp->protocol_flags), 2192 letoh16(pfp->port_scsi_id)); 2193 DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d " 2194 "max_posted_cmd_buffers: %d\n", DEVNAME(sc), 2195 letoh16(pfp->max_persistent_ids), 2196 letoh16(pfp->max_posted_cmd_buffers)); 2197 DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc), 2198 letoh16(pfp->max_lan_buckets)); 2199 2200 sc->sc_porttype = pfp->port_type; 2201 if (sc->sc_target == -1) 2202 sc->sc_target = letoh16(pfp->port_scsi_id); 2203 2204 mpi_push_reply(sc, ccb->ccb_rcb); 2205 rv = 0; 2206 err: 2207 scsi_io_put(&sc->sc_iopool, ccb); 2208 2209 return (rv); 2210 } 2211 2212 int 2213 mpi_cfg_coalescing(struct mpi_softc *sc) 2214 { 2215 struct mpi_cfg_hdr hdr; 2216 struct mpi_cfg_ioc_pg1 pg; 2217 u_int32_t flags; 2218 2219 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) { 2220 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n", 2221 DEVNAME(sc)); 2222 return (1); 2223 } 2224 2225 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) { 2226 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n", 2227 DEVNAME(sc)); 2228 return (1); 2229 } 2230 2231 DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc)); 2232 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%08x\n", DEVNAME(sc), 2233 letoh32(pg.flags)); 2234 DNPRINTF(MPI_D_MISC, "%s: coalescing_timeout: %d\n", DEVNAME(sc), 2235 letoh32(pg.coalescing_timeout)); 2236 DNPRINTF(MPI_D_MISC, "%s: coalescing_depth: %d pci_slot_num: %d\n", 2237 DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num); 2238 2239 flags = letoh32(pg.flags); 2240 if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING)) 2241 return (0); 2242 2243 CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING)); 2244 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) { 2245 DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n", 2246 DEVNAME(sc)); 2247 return (1); 2248 } 2249 2250 return (0); 2251 } 2252 2253 int 2254 mpi_eventnotify(struct mpi_softc *sc) 2255 { 2256 struct mpi_ccb *ccb; 2257 struct mpi_msg_event_request *enq; 2258 2259 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2260 if (ccb == NULL) { 2261 DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n", 2262 DEVNAME(sc)); 2263 return (1); 2264 } 2265 2266 sc->sc_evt_ccb = ccb; 2267 SIMPLEQ_INIT(&sc->sc_evt_ack_queue); 2268 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO); 2269 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool, 2270 mpi_eventack, sc); 2271 2272 ccb->ccb_done = mpi_eventnotify_done; 2273 enq = ccb->ccb_cmd; 2274 2275 enq->function = MPI_FUNCTION_EVENT_NOTIFICATION; 2276 enq->chain_offset = 0; 2277 enq->event_switch = MPI_EVENT_SWITCH_ON; 2278 enq->msg_context = htole32(ccb->ccb_id); 2279 2280 mpi_start(sc, ccb); 2281 return (0); 2282 } 2283 2284 void 2285 mpi_eventnotify_done(struct mpi_ccb *ccb) 2286 { 2287 struct mpi_softc *sc = ccb->ccb_sc; 2288 struct mpi_rcb *rcb = ccb->ccb_rcb; 2289 struct mpi_msg_event_reply *enp = rcb->rcb_reply; 2290 2291 DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc)); 2292 2293 DNPRINTF(MPI_D_EVT, "%s: function: 0x%02x msg_length: %d " 2294 "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length, 2295 letoh16(enp->data_length)); 2296 DNPRINTF(MPI_D_EVT, "%s: ack_required: %d msg_flags 0x%02x\n", 2297 DEVNAME(sc), enp->ack_required, enp->msg_flags); 2298 DNPRINTF(MPI_D_EVT, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2299 letoh32(enp->msg_context)); 2300 DNPRINTF(MPI_D_EVT, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2301 letoh16(enp->ioc_status)); 2302 DNPRINTF(MPI_D_EVT, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2303 letoh32(enp->ioc_loginfo)); 2304 DNPRINTF(MPI_D_EVT, "%s: event: 0x%08x\n", DEVNAME(sc), 2305 letoh32(enp->event)); 2306 DNPRINTF(MPI_D_EVT, "%s: event_context: 0x%08x\n", DEVNAME(sc), 2307 letoh32(enp->event_context)); 2308 2309 switch (letoh32(enp->event)) { 2310 /* ignore these */ 2311 case MPI_EVENT_EVENT_CHANGE: 2312 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2313 break; 2314 2315 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2316 if (sc->sc_scsibus == NULL) 2317 break; 2318 2319 if (mpi_evt_sas(sc, rcb) != 0) { 2320 /* reply is freed later on */ 2321 return; 2322 } 2323 break; 2324 2325 case MPI_EVENT_RESCAN: 2326 if (sc->sc_scsibus != NULL && 2327 sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC) 2328 mpi_evt_fc_rescan(sc); 2329 break; 2330 2331 default: 2332 DNPRINTF(MPI_D_EVT, "%s: unhandled event 0x%02x\n", 2333 DEVNAME(sc), letoh32(enp->event)); 2334 break; 2335 } 2336 2337 mpi_eventnotify_free(sc, rcb); 2338 } 2339 2340 void 2341 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb) 2342 { 2343 struct mpi_msg_event_reply *enp = rcb->rcb_reply; 2344 2345 if (enp->ack_required) { 2346 mtx_enter(&sc->sc_evt_ack_mtx); 2347 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link); 2348 mtx_leave(&sc->sc_evt_ack_mtx); 2349 scsi_ioh_add(&sc->sc_evt_ack_handler); 2350 } else 2351 mpi_push_reply(sc, rcb); 2352 } 2353 2354 int 2355 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb) 2356 { 2357 struct mpi_evt_sas_change *ch; 2358 u_int8_t *data; 2359 2360 data = rcb->rcb_reply; 2361 data += sizeof(struct mpi_msg_event_reply); 2362 ch = (struct mpi_evt_sas_change *)data; 2363 2364 if (ch->bus != 0) 2365 return (0); 2366 2367 switch (ch->reason) { 2368 case MPI_EVT_SASCH_REASON_ADDED: 2369 case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED: 2370 if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) { 2371 printf("%s: unable to request attach of %d\n", 2372 DEVNAME(sc), ch->target); 2373 } 2374 break; 2375 2376 case MPI_EVT_SASCH_REASON_NOT_RESPONDING: 2377 scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE); 2378 2379 mtx_enter(&sc->sc_evt_scan_mtx); 2380 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link); 2381 mtx_leave(&sc->sc_evt_scan_mtx); 2382 scsi_ioh_add(&sc->sc_evt_scan_handler); 2383 2384 /* we'll handle event ack later on */ 2385 return (1); 2386 2387 case MPI_EVT_SASCH_REASON_SMART_DATA: 2388 case MPI_EVT_SASCH_REASON_UNSUPPORTED: 2389 case MPI_EVT_SASCH_REASON_INTERNAL_RESET: 2390 break; 2391 default: 2392 printf("%s: unknown reason for SAS device status change: " 2393 "0x%02x\n", DEVNAME(sc), ch->reason); 2394 break; 2395 } 2396 2397 return (0); 2398 } 2399 2400 void 2401 mpi_evt_sas_detach(void *cookie, void *io) 2402 { 2403 struct mpi_softc *sc = cookie; 2404 struct mpi_ccb *ccb = io; 2405 struct mpi_rcb *rcb, *next; 2406 struct mpi_msg_event_reply *enp; 2407 struct mpi_evt_sas_change *ch; 2408 struct mpi_msg_scsi_task_request *str; 2409 2410 DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc)); 2411 2412 mtx_enter(&sc->sc_evt_scan_mtx); 2413 rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue); 2414 if (rcb != NULL) { 2415 next = SIMPLEQ_NEXT(rcb, rcb_link); 2416 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link); 2417 } 2418 mtx_leave(&sc->sc_evt_scan_mtx); 2419 2420 if (rcb == NULL) { 2421 scsi_io_put(&sc->sc_iopool, ccb); 2422 return; 2423 } 2424 2425 enp = rcb->rcb_reply; 2426 ch = (struct mpi_evt_sas_change *)(enp + 1); 2427 2428 ccb->ccb_done = mpi_evt_sas_detach_done; 2429 str = ccb->ccb_cmd; 2430 2431 str->target_id = ch->target; 2432 str->bus = 0; 2433 str->function = MPI_FUNCTION_SCSI_TASK_MGMT; 2434 2435 str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET; 2436 2437 str->msg_context = htole32(ccb->ccb_id); 2438 2439 mpi_eventnotify_free(sc, rcb); 2440 2441 mpi_start(sc, ccb); 2442 2443 if (next != NULL) 2444 scsi_ioh_add(&sc->sc_evt_scan_handler); 2445 } 2446 2447 void 2448 mpi_evt_sas_detach_done(struct mpi_ccb *ccb) 2449 { 2450 struct mpi_softc *sc = ccb->ccb_sc; 2451 struct mpi_msg_scsi_task_reply *r = ccb->ccb_rcb->rcb_reply; 2452 2453 if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1, 2454 DETACH_FORCE) != 0) { 2455 printf("%s: unable to request detach of %d\n", 2456 DEVNAME(sc), r->target_id); 2457 } 2458 2459 mpi_push_reply(sc, ccb->ccb_rcb); 2460 scsi_io_put(&sc->sc_iopool, ccb); 2461 } 2462 2463 void 2464 mpi_evt_fc_rescan(struct mpi_softc *sc) 2465 { 2466 int queue = 1; 2467 2468 mtx_enter(&sc->sc_evt_rescan_mtx); 2469 if (sc->sc_evt_rescan_sem) 2470 queue = 0; 2471 else 2472 sc->sc_evt_rescan_sem = 1; 2473 mtx_leave(&sc->sc_evt_rescan_mtx); 2474 2475 if (queue) { 2476 workq_queue_task(NULL, &sc->sc_evt_rescan, 0, 2477 mpi_fc_rescan, sc, NULL); 2478 } 2479 } 2480 2481 void 2482 mpi_fc_rescan(void *xsc, void *xarg) 2483 { 2484 struct mpi_softc *sc = xsc; 2485 struct mpi_cfg_hdr hdr; 2486 struct mpi_cfg_fc_device_pg0 pg; 2487 struct scsi_link *link; 2488 u_int8_t devmap[256 / NBBY]; 2489 u_int32_t id = 0xffffff; 2490 int i; 2491 2492 mtx_enter(&sc->sc_evt_rescan_mtx); 2493 sc->sc_evt_rescan_sem = 0; 2494 mtx_leave(&sc->sc_evt_rescan_mtx); 2495 2496 bzero(devmap, sizeof(devmap)); 2497 2498 do { 2499 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0, 2500 id, 0, &hdr) != 0) { 2501 printf("%s: header get for rescan of 0x%08x failed\n", 2502 DEVNAME(sc), id); 2503 return; 2504 } 2505 2506 bzero(&pg, sizeof(pg)); 2507 if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0) 2508 break; 2509 2510 if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) && 2511 pg.current_bus == 0) 2512 setbit(devmap, pg.current_target_id); 2513 2514 id = htole32(pg.port_id); 2515 } while (id <= 0xff0000); 2516 2517 for (i = 0; i < sc->sc_buswidth; i++) { 2518 link = scsi_get_link(sc->sc_scsibus, i, 0); 2519 2520 if (isset(devmap, i)) { 2521 if (link == NULL) 2522 scsi_probe_target(sc->sc_scsibus, i); 2523 } else { 2524 if (link != NULL) { 2525 scsi_activate(sc->sc_scsibus, i, -1, 2526 DVACT_DEACTIVATE); 2527 scsi_detach_target(sc->sc_scsibus, i, 2528 DETACH_FORCE); 2529 } 2530 } 2531 } 2532 } 2533 2534 void 2535 mpi_eventack(void *cookie, void *io) 2536 { 2537 struct mpi_softc *sc = cookie; 2538 struct mpi_ccb *ccb = io; 2539 struct mpi_rcb *rcb, *next; 2540 struct mpi_msg_event_reply *enp; 2541 struct mpi_msg_eventack_request *eaq; 2542 2543 DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc)); 2544 2545 mtx_enter(&sc->sc_evt_ack_mtx); 2546 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue); 2547 if (rcb != NULL) { 2548 next = SIMPLEQ_NEXT(rcb, rcb_link); 2549 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link); 2550 } 2551 mtx_leave(&sc->sc_evt_ack_mtx); 2552 2553 if (rcb == NULL) { 2554 scsi_io_put(&sc->sc_iopool, ccb); 2555 return; 2556 } 2557 2558 enp = rcb->rcb_reply; 2559 2560 ccb->ccb_done = mpi_eventack_done; 2561 eaq = ccb->ccb_cmd; 2562 2563 eaq->function = MPI_FUNCTION_EVENT_ACK; 2564 eaq->msg_context = htole32(ccb->ccb_id); 2565 2566 eaq->event = enp->event; 2567 eaq->event_context = enp->event_context; 2568 2569 mpi_push_reply(sc, rcb); 2570 mpi_start(sc, ccb); 2571 2572 if (next != NULL) 2573 scsi_ioh_add(&sc->sc_evt_ack_handler); 2574 } 2575 2576 void 2577 mpi_eventack_done(struct mpi_ccb *ccb) 2578 { 2579 struct mpi_softc *sc = ccb->ccb_sc; 2580 2581 DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc)); 2582 2583 mpi_push_reply(sc, ccb->ccb_rcb); 2584 scsi_io_put(&sc->sc_iopool, ccb); 2585 } 2586 2587 int 2588 mpi_portenable(struct mpi_softc *sc) 2589 { 2590 struct mpi_ccb *ccb; 2591 struct mpi_msg_portenable_request *peq; 2592 int rv = 0; 2593 2594 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc)); 2595 2596 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2597 if (ccb == NULL) { 2598 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n", 2599 DEVNAME(sc)); 2600 return (1); 2601 } 2602 2603 ccb->ccb_done = mpi_empty_done; 2604 peq = ccb->ccb_cmd; 2605 2606 peq->function = MPI_FUNCTION_PORT_ENABLE; 2607 peq->port_number = 0; 2608 peq->msg_context = htole32(ccb->ccb_id); 2609 2610 if (mpi_poll(sc, ccb, 50000) != 0) { 2611 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc)); 2612 return (1); 2613 } 2614 2615 if (ccb->ccb_rcb == NULL) { 2616 DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n", 2617 DEVNAME(sc)); 2618 rv = 1; 2619 } else 2620 mpi_push_reply(sc, ccb->ccb_rcb); 2621 2622 scsi_io_put(&sc->sc_iopool, ccb); 2623 2624 return (rv); 2625 } 2626 2627 int 2628 mpi_fwupload(struct mpi_softc *sc) 2629 { 2630 struct mpi_ccb *ccb; 2631 struct { 2632 struct mpi_msg_fwupload_request req; 2633 struct mpi_sge sge; 2634 } __packed *bundle; 2635 struct mpi_msg_fwupload_reply *upp; 2636 int rv = 0; 2637 2638 if (sc->sc_fw_len == 0) 2639 return (0); 2640 2641 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc)); 2642 2643 sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len); 2644 if (sc->sc_fw == NULL) { 2645 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n", 2646 DEVNAME(sc), sc->sc_fw_len); 2647 return (1); 2648 } 2649 2650 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2651 if (ccb == NULL) { 2652 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n", 2653 DEVNAME(sc)); 2654 goto err; 2655 } 2656 2657 ccb->ccb_done = mpi_empty_done; 2658 bundle = ccb->ccb_cmd; 2659 2660 bundle->req.function = MPI_FUNCTION_FW_UPLOAD; 2661 bundle->req.msg_context = htole32(ccb->ccb_id); 2662 2663 bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW; 2664 2665 bundle->req.tce.details_length = 12; 2666 bundle->req.tce.image_size = htole32(sc->sc_fw_len); 2667 2668 bundle->sge.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2669 MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 2670 MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len); 2671 bundle->sge.sg_addr = htole64(MPI_DMA_DVA(sc->sc_fw)); 2672 2673 if (mpi_poll(sc, ccb, 50000) != 0) { 2674 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc)); 2675 goto err; 2676 } 2677 2678 if (ccb->ccb_rcb == NULL) 2679 panic("%s: unable to do fw upload", DEVNAME(sc)); 2680 upp = ccb->ccb_rcb->rcb_reply; 2681 2682 if (letoh16(upp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2683 rv = 1; 2684 2685 mpi_push_reply(sc, ccb->ccb_rcb); 2686 scsi_io_put(&sc->sc_iopool, ccb); 2687 2688 return (rv); 2689 2690 err: 2691 mpi_dmamem_free(sc, sc->sc_fw); 2692 return (1); 2693 } 2694 2695 void 2696 mpi_get_raid(struct mpi_softc *sc) 2697 { 2698 struct mpi_cfg_hdr hdr; 2699 struct mpi_cfg_ioc_pg2 *vol_page; 2700 size_t pagelen; 2701 u_int32_t capabilities; 2702 2703 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc)); 2704 2705 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) { 2706 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header" 2707 "for IOC page 2\n", DEVNAME(sc)); 2708 return; 2709 } 2710 2711 pagelen = hdr.page_length * 4; /* dwords to bytes */ 2712 vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 2713 if (vol_page == NULL) { 2714 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate " 2715 "space for ioc config page 2\n", DEVNAME(sc)); 2716 return; 2717 } 2718 2719 if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) { 2720 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC " 2721 "page 2\n", DEVNAME(sc)); 2722 goto out; 2723 } 2724 2725 capabilities = letoh32(vol_page->capabilities); 2726 2727 DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc), 2728 letoh32(vol_page->capabilities)); 2729 DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d " 2730 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 2731 vol_page->active_vols, vol_page->max_vols, 2732 vol_page->active_physdisks, vol_page->max_physdisks); 2733 2734 /* don't walk list if there are no RAID capability */ 2735 if (capabilities == 0xdeadbeef) { 2736 printf("%s: deadbeef in raid configuration\n", DEVNAME(sc)); 2737 goto out; 2738 } 2739 2740 if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID)) 2741 sc->sc_flags |= MPI_F_RAID; 2742 2743 out: 2744 free(vol_page, M_TEMP); 2745 } 2746 2747 int 2748 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number, 2749 u_int32_t address, int flags, void *p) 2750 { 2751 struct mpi_ccb *ccb; 2752 struct mpi_msg_config_request *cq; 2753 struct mpi_msg_config_reply *cp; 2754 struct mpi_cfg_hdr *hdr = p; 2755 struct mpi_ecfg_hdr *ehdr = p; 2756 int etype = 0; 2757 int rv = 0; 2758 2759 DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x " 2760 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number, 2761 address, flags, MPI_PG_FMT); 2762 2763 ccb = scsi_io_get(&sc->sc_iopool, 2764 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0); 2765 if (ccb == NULL) { 2766 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n", 2767 DEVNAME(sc)); 2768 return (1); 2769 } 2770 2771 if (ISSET(flags, MPI_PG_EXTENDED)) { 2772 etype = type; 2773 type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED; 2774 } 2775 2776 cq = ccb->ccb_cmd; 2777 2778 cq->function = MPI_FUNCTION_CONFIG; 2779 cq->msg_context = htole32(ccb->ccb_id); 2780 2781 cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER; 2782 2783 cq->config_header.page_number = number; 2784 cq->config_header.page_type = type; 2785 cq->ext_page_type = etype; 2786 cq->page_address = htole32(address); 2787 cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2788 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 2789 2790 ccb->ccb_done = mpi_empty_done; 2791 if (ISSET(flags, MPI_PG_POLL)) { 2792 if (mpi_poll(sc, ccb, 50000) != 0) { 2793 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2794 DEVNAME(sc)); 2795 return (1); 2796 } 2797 } else 2798 mpi_wait(sc, ccb); 2799 2800 if (ccb->ccb_rcb == NULL) 2801 panic("%s: unable to fetch config header", DEVNAME(sc)); 2802 cp = ccb->ccb_rcb->rcb_reply; 2803 2804 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2805 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2806 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2807 "msg_flags: 0x%02x\n", DEVNAME(sc), 2808 letoh16(cp->ext_page_length), cp->ext_page_type, 2809 cp->msg_flags); 2810 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2811 letoh32(cp->msg_context)); 2812 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2813 letoh16(cp->ioc_status)); 2814 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2815 letoh32(cp->ioc_loginfo)); 2816 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2817 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2818 cp->config_header.page_version, 2819 cp->config_header.page_length, 2820 cp->config_header.page_number, 2821 cp->config_header.page_type); 2822 2823 if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2824 rv = 1; 2825 else if (ISSET(flags, MPI_PG_EXTENDED)) { 2826 bzero(ehdr, sizeof(*ehdr)); 2827 ehdr->page_version = cp->config_header.page_version; 2828 ehdr->page_number = cp->config_header.page_number; 2829 ehdr->page_type = cp->config_header.page_type; 2830 ehdr->ext_page_length = cp->ext_page_length; 2831 ehdr->ext_page_type = cp->ext_page_type; 2832 } else 2833 *hdr = cp->config_header; 2834 2835 mpi_push_reply(sc, ccb->ccb_rcb); 2836 scsi_io_put(&sc->sc_iopool, ccb); 2837 2838 return (rv); 2839 } 2840 2841 int 2842 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags, 2843 void *p, int read, void *page, size_t len) 2844 { 2845 struct mpi_ccb *ccb; 2846 struct mpi_msg_config_request *cq; 2847 struct mpi_msg_config_reply *cp; 2848 struct mpi_cfg_hdr *hdr = p; 2849 struct mpi_ecfg_hdr *ehdr = p; 2850 char *kva; 2851 int page_length; 2852 int rv = 0; 2853 2854 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n", 2855 DEVNAME(sc), address, read, hdr->page_type); 2856 2857 page_length = ISSET(flags, MPI_PG_EXTENDED) ? 2858 letoh16(ehdr->ext_page_length) : hdr->page_length; 2859 2860 if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) || 2861 len < page_length * 4) 2862 return (1); 2863 2864 ccb = scsi_io_get(&sc->sc_iopool, 2865 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0); 2866 if (ccb == NULL) { 2867 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc)); 2868 return (1); 2869 } 2870 2871 cq = ccb->ccb_cmd; 2872 2873 cq->function = MPI_FUNCTION_CONFIG; 2874 cq->msg_context = htole32(ccb->ccb_id); 2875 2876 cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT : 2877 MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT); 2878 2879 if (ISSET(flags, MPI_PG_EXTENDED)) { 2880 cq->config_header.page_version = ehdr->page_version; 2881 cq->config_header.page_number = ehdr->page_number; 2882 cq->config_header.page_type = ehdr->page_type; 2883 cq->ext_page_len = ehdr->ext_page_length; 2884 cq->ext_page_type = ehdr->ext_page_type; 2885 } else 2886 cq->config_header = *hdr; 2887 cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK; 2888 cq->page_address = htole32(address); 2889 cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2890 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 2891 (page_length * 4) | 2892 (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT)); 2893 2894 /* bounce the page via the request space to avoid more bus_dma games */ 2895 cq->page_buffer.sg_addr = htole64(ccb->ccb_cmd_dva + 2896 sizeof(struct mpi_msg_config_request)); 2897 2898 kva = ccb->ccb_cmd; 2899 kva += sizeof(struct mpi_msg_config_request); 2900 if (!read) 2901 bcopy(page, kva, len); 2902 2903 ccb->ccb_done = mpi_empty_done; 2904 if (ISSET(flags, MPI_PG_POLL)) { 2905 if (mpi_poll(sc, ccb, 50000) != 0) { 2906 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2907 DEVNAME(sc)); 2908 return (1); 2909 } 2910 } else 2911 mpi_wait(sc, ccb); 2912 2913 if (ccb->ccb_rcb == NULL) { 2914 scsi_io_put(&sc->sc_iopool, ccb); 2915 return (1); 2916 } 2917 cp = ccb->ccb_rcb->rcb_reply; 2918 2919 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2920 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2921 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2922 "msg_flags: 0x%02x\n", DEVNAME(sc), 2923 letoh16(cp->ext_page_length), cp->ext_page_type, 2924 cp->msg_flags); 2925 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2926 letoh32(cp->msg_context)); 2927 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2928 letoh16(cp->ioc_status)); 2929 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2930 letoh32(cp->ioc_loginfo)); 2931 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2932 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2933 cp->config_header.page_version, 2934 cp->config_header.page_length, 2935 cp->config_header.page_number, 2936 cp->config_header.page_type); 2937 2938 if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2939 rv = 1; 2940 else if (read) 2941 bcopy(kva, page, len); 2942 2943 mpi_push_reply(sc, ccb->ccb_rcb); 2944 scsi_io_put(&sc->sc_iopool, ccb); 2945 2946 return (rv); 2947 } 2948 2949 int 2950 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag) 2951 { 2952 struct mpi_softc *sc = (struct mpi_softc *)link->adapter_softc; 2953 2954 DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc)); 2955 2956 switch (cmd) { 2957 case DIOCGCACHE: 2958 case DIOCSCACHE: 2959 if (ISSET(link->flags, SDEV_VIRTUAL)) { 2960 return (mpi_ioctl_cache(link, cmd, 2961 (struct dk_cache *)addr)); 2962 } 2963 break; 2964 2965 default: 2966 if (sc->sc_ioctl) 2967 return (sc->sc_ioctl(link->adapter_softc, cmd, addr)); 2968 2969 break; 2970 } 2971 2972 return (ENOTTY); 2973 } 2974 2975 int 2976 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc) 2977 { 2978 struct mpi_softc *sc = (struct mpi_softc *)link->adapter_softc; 2979 struct mpi_ccb *ccb; 2980 int len, rv; 2981 struct mpi_cfg_hdr hdr; 2982 struct mpi_cfg_raid_vol_pg0 *rpg0; 2983 int enabled; 2984 struct mpi_msg_raid_action_request *req; 2985 struct mpi_msg_raid_action_reply *rep; 2986 struct mpi_raid_settings settings; 2987 2988 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 2989 link->target, MPI_PG_POLL, &hdr); 2990 if (rv != 0) 2991 return (EIO); 2992 2993 len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks * 2994 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk); 2995 rpg0 = malloc(len, M_TEMP, M_NOWAIT); 2996 if (rpg0 == NULL) 2997 return (ENOMEM); 2998 2999 if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, 3000 rpg0, len) != 0) { 3001 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n", 3002 DEVNAME(sc)); 3003 rv = EIO; 3004 goto done; 3005 } 3006 3007 enabled = ISSET(letoh16(rpg0->settings.volume_settings), 3008 MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0; 3009 3010 if (cmd == DIOCGCACHE) { 3011 dc->wrcache = enabled; 3012 dc->rdcache = 0; 3013 goto done; 3014 } /* else DIOCSCACHE */ 3015 3016 if (dc->rdcache) { 3017 rv = EOPNOTSUPP; 3018 goto done; 3019 } 3020 3021 if (((dc->wrcache) ? 1 : 0) == enabled) 3022 goto done; 3023 3024 settings = rpg0->settings; 3025 if (dc->wrcache) { 3026 SET(settings.volume_settings, 3027 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)); 3028 } else { 3029 CLR(settings.volume_settings, 3030 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)); 3031 } 3032 3033 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 3034 if (ccb == NULL) { 3035 rv = ENOMEM; 3036 goto done; 3037 } 3038 3039 req = ccb->ccb_cmd; 3040 req->function = MPI_FUNCTION_RAID_ACTION; 3041 req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS; 3042 req->vol_id = rpg0->volume_id; 3043 req->vol_bus = rpg0->volume_bus; 3044 req->msg_context = htole32(ccb->ccb_id); 3045 3046 memcpy(&req->data_word, &settings, sizeof(req->data_word)); 3047 ccb->ccb_done = mpi_empty_done; 3048 if (mpi_poll(sc, ccb, 50000) != 0) { 3049 rv = EIO; 3050 goto done; 3051 } 3052 3053 rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb; 3054 if (rep == NULL) 3055 panic("%s: raid volume settings change failed", DEVNAME(sc)); 3056 3057 switch (letoh16(rep->action_status)) { 3058 case MPI_RAID_ACTION_STATUS_OK: 3059 rv = 0; 3060 break; 3061 default: 3062 rv = EIO; 3063 break; 3064 } 3065 3066 mpi_push_reply(sc, ccb->ccb_rcb); 3067 scsi_io_put(&sc->sc_iopool, ccb); 3068 3069 done: 3070 free(rpg0, M_TEMP); 3071 return (rv); 3072 } 3073 3074 #if NBIO > 0 3075 int 3076 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id) 3077 { 3078 int len, rv = EINVAL; 3079 u_int32_t address; 3080 struct mpi_cfg_hdr hdr; 3081 struct mpi_cfg_raid_vol_pg0 *rpg0; 3082 3083 /* get IOC page 2 */ 3084 if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 3085 sc->sc_cfg_hdr.page_length * 4) != 0) { 3086 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to " 3087 "fetch IOC page 2\n", DEVNAME(sc)); 3088 goto done; 3089 } 3090 3091 /* XXX return something else than EINVAL to indicate within hs range */ 3092 if (id > sc->sc_vol_page->active_vols) { 3093 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol " 3094 "id: %d\n", DEVNAME(sc), id); 3095 goto done; 3096 } 3097 3098 /* replace current buffer with new one */ 3099 len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks * 3100 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk); 3101 rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL); 3102 if (rpg0 == NULL) { 3103 printf("%s: can't get memory for RAID page 0, " 3104 "bio disabled\n", DEVNAME(sc)); 3105 goto done; 3106 } 3107 if (sc->sc_rpg0) 3108 free(sc->sc_rpg0, M_DEVBUF); 3109 sc->sc_rpg0 = rpg0; 3110 3111 /* get raid vol page 0 */ 3112 address = sc->sc_vol_list[id].vol_id | 3113 (sc->sc_vol_list[id].vol_bus << 8); 3114 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3115 address, 0, &hdr) != 0) 3116 goto done; 3117 if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) { 3118 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n", 3119 DEVNAME(sc)); 3120 goto done; 3121 } 3122 3123 rv = 0; 3124 done: 3125 return (rv); 3126 } 3127 3128 int 3129 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr) 3130 { 3131 struct mpi_softc *sc = (struct mpi_softc *)dev; 3132 int error = 0; 3133 3134 DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc)); 3135 3136 /* make sure we have bio enabled */ 3137 if (sc->sc_ioctl != mpi_ioctl) 3138 return (EINVAL); 3139 3140 rw_enter_write(&sc->sc_lock); 3141 3142 switch (cmd) { 3143 case BIOCINQ: 3144 DNPRINTF(MPI_D_IOCTL, "inq\n"); 3145 error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr); 3146 break; 3147 3148 case BIOCVOL: 3149 DNPRINTF(MPI_D_IOCTL, "vol\n"); 3150 error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr); 3151 break; 3152 3153 case BIOCDISK: 3154 DNPRINTF(MPI_D_IOCTL, "disk\n"); 3155 error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr); 3156 break; 3157 3158 case BIOCALARM: 3159 DNPRINTF(MPI_D_IOCTL, "alarm\n"); 3160 break; 3161 3162 case BIOCBLINK: 3163 DNPRINTF(MPI_D_IOCTL, "blink\n"); 3164 break; 3165 3166 case BIOCSETSTATE: 3167 DNPRINTF(MPI_D_IOCTL, "setstate\n"); 3168 error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr); 3169 break; 3170 3171 default: 3172 DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n"); 3173 error = EINVAL; 3174 } 3175 3176 rw_exit_write(&sc->sc_lock); 3177 3178 return (error); 3179 } 3180 3181 int 3182 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi) 3183 { 3184 if (!(sc->sc_flags & MPI_F_RAID)) { 3185 bi->bi_novol = 0; 3186 bi->bi_nodisk = 0; 3187 } 3188 3189 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 3190 sc->sc_cfg_hdr.page_length * 4) != 0) { 3191 DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC " 3192 "page 2\n", DEVNAME(sc)); 3193 return (EINVAL); 3194 } 3195 3196 DNPRINTF(MPI_D_IOCTL, "%s: active_vols: %d max_vols: %d " 3197 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 3198 sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols, 3199 sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks); 3200 3201 bi->bi_novol = sc->sc_vol_page->active_vols; 3202 bi->bi_nodisk = sc->sc_vol_page->active_physdisks; 3203 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 3204 3205 return (0); 3206 } 3207 3208 int 3209 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv) 3210 { 3211 int i, vol, id, rv = EINVAL; 3212 struct device *dev; 3213 struct scsi_link *link; 3214 struct mpi_cfg_raid_vol_pg0 *rpg0; 3215 char *vendp; 3216 3217 id = bv->bv_volid; 3218 if (mpi_bio_get_pg0_raid(sc, id)) 3219 goto done; 3220 3221 if (id > sc->sc_vol_page->active_vols) 3222 return (EINVAL); /* XXX deal with hot spares */ 3223 3224 rpg0 = sc->sc_rpg0; 3225 if (rpg0 == NULL) 3226 goto done; 3227 3228 /* determine status */ 3229 switch (rpg0->volume_state) { 3230 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 3231 bv->bv_status = BIOC_SVONLINE; 3232 break; 3233 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 3234 bv->bv_status = BIOC_SVDEGRADED; 3235 break; 3236 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 3237 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 3238 bv->bv_status = BIOC_SVOFFLINE; 3239 break; 3240 default: 3241 bv->bv_status = BIOC_SVINVALID; 3242 } 3243 3244 /* override status if scrubbing or something */ 3245 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) 3246 bv->bv_status = BIOC_SVREBUILD; 3247 3248 bv->bv_size = (u_quad_t)letoh32(rpg0->max_lba) * 512; 3249 3250 switch (sc->sc_vol_list[id].vol_type) { 3251 case MPI_CFG_RAID_TYPE_RAID_IS: 3252 bv->bv_level = 0; 3253 break; 3254 case MPI_CFG_RAID_TYPE_RAID_IME: 3255 case MPI_CFG_RAID_TYPE_RAID_IM: 3256 bv->bv_level = 1; 3257 break; 3258 case MPI_CFG_RAID_TYPE_RAID_5: 3259 bv->bv_level = 5; 3260 break; 3261 case MPI_CFG_RAID_TYPE_RAID_6: 3262 bv->bv_level = 6; 3263 break; 3264 case MPI_CFG_RAID_TYPE_RAID_10: 3265 bv->bv_level = 10; 3266 break; 3267 case MPI_CFG_RAID_TYPE_RAID_50: 3268 bv->bv_level = 50; 3269 break; 3270 default: 3271 bv->bv_level = -1; 3272 } 3273 3274 bv->bv_nodisk = rpg0->num_phys_disks; 3275 3276 for (i = 0, vol = -1; i < sc->sc_buswidth; i++) { 3277 link = scsi_get_link(sc->sc_scsibus, i, 0); 3278 if (link == NULL) 3279 continue; 3280 3281 /* skip if not a virtual disk */ 3282 if (!(link->flags & SDEV_VIRTUAL)) 3283 continue; 3284 3285 vol++; 3286 /* are we it? */ 3287 if (vol == bv->bv_volid) { 3288 dev = link->device_softc; 3289 vendp = link->inqdata.vendor; 3290 memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor); 3291 bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0'; 3292 strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev); 3293 break; 3294 } 3295 } 3296 rv = 0; 3297 done: 3298 return (rv); 3299 } 3300 3301 int 3302 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd) 3303 { 3304 int pdid, id, rv = EINVAL; 3305 u_int32_t address; 3306 struct mpi_cfg_hdr hdr; 3307 struct mpi_cfg_raid_vol_pg0 *rpg0; 3308 struct mpi_cfg_raid_vol_pg0_physdisk *physdisk; 3309 struct mpi_cfg_raid_physdisk_pg0 pdpg0; 3310 3311 id = bd->bd_volid; 3312 if (mpi_bio_get_pg0_raid(sc, id)) 3313 goto done; 3314 3315 if (id > sc->sc_vol_page->active_vols) 3316 return (EINVAL); /* XXX deal with hot spares */ 3317 3318 rpg0 = sc->sc_rpg0; 3319 if (rpg0 == NULL) 3320 goto done; 3321 3322 pdid = bd->bd_diskid; 3323 if (pdid > rpg0->num_phys_disks) 3324 goto done; 3325 physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1); 3326 physdisk += pdid; 3327 3328 /* get raid phys disk page 0 */ 3329 address = physdisk->phys_disk_num; 3330 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address, 3331 &hdr) != 0) 3332 goto done; 3333 if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) { 3334 bd->bd_status = BIOC_SDFAILED; 3335 return (0); 3336 } 3337 bd->bd_channel = pdpg0.phys_disk_bus; 3338 bd->bd_target = pdpg0.phys_disk_id; 3339 bd->bd_lun = 0; 3340 bd->bd_size = (u_quad_t)letoh32(pdpg0.max_lba) * 512; 3341 strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor)); 3342 3343 switch (pdpg0.phys_disk_state) { 3344 case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE: 3345 bd->bd_status = BIOC_SDONLINE; 3346 break; 3347 case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING: 3348 case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED: 3349 bd->bd_status = BIOC_SDFAILED; 3350 break; 3351 case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL: 3352 case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER: 3353 case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE: 3354 bd->bd_status = BIOC_SDOFFLINE; 3355 break; 3356 case MPI_CFG_RAID_PHYDISK_0_STATE_INIT: 3357 bd->bd_status = BIOC_SDSCRUB; 3358 break; 3359 case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT: 3360 default: 3361 bd->bd_status = BIOC_SDINVALID; 3362 break; 3363 } 3364 3365 /* XXX figure this out */ 3366 /* bd_serial[32]; */ 3367 /* bd_procdev[16]; */ 3368 3369 rv = 0; 3370 done: 3371 return (rv); 3372 } 3373 3374 int 3375 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs) 3376 { 3377 return (ENOTTY); 3378 } 3379 3380 #ifndef SMALL_KERNEL 3381 int 3382 mpi_create_sensors(struct mpi_softc *sc) 3383 { 3384 struct device *dev; 3385 struct scsi_link *link; 3386 int i, vol; 3387 3388 /* count volumes */ 3389 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3390 link = scsi_get_link(sc->sc_scsibus, i, 0); 3391 if (link == NULL) 3392 continue; 3393 /* skip if not a virtual disk */ 3394 if (!(link->flags & SDEV_VIRTUAL)) 3395 continue; 3396 3397 vol++; 3398 } 3399 if (vol == 0) 3400 return (0); 3401 3402 sc->sc_sensors = malloc(sizeof(struct ksensor) * vol, 3403 M_DEVBUF, M_NOWAIT | M_ZERO); 3404 if (sc->sc_sensors == NULL) 3405 return (1); 3406 3407 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), 3408 sizeof(sc->sc_sensordev.xname)); 3409 3410 for (i = 0, vol= 0; i < sc->sc_buswidth; i++) { 3411 link = scsi_get_link(sc->sc_scsibus, i, 0); 3412 if (link == NULL) 3413 continue; 3414 /* skip if not a virtual disk */ 3415 if (!(link->flags & SDEV_VIRTUAL)) 3416 continue; 3417 3418 dev = link->device_softc; 3419 strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname, 3420 sizeof(sc->sc_sensors[vol].desc)); 3421 sc->sc_sensors[vol].type = SENSOR_DRIVE; 3422 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3423 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]); 3424 3425 vol++; 3426 } 3427 3428 if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL) 3429 goto bad; 3430 3431 sensordev_install(&sc->sc_sensordev); 3432 3433 return (0); 3434 3435 bad: 3436 free(sc->sc_sensors, M_DEVBUF); 3437 return (1); 3438 } 3439 3440 void 3441 mpi_refresh_sensors(void *arg) 3442 { 3443 int i, vol; 3444 struct scsi_link *link; 3445 struct mpi_softc *sc = arg; 3446 struct mpi_cfg_raid_vol_pg0 *rpg0; 3447 3448 rw_enter_write(&sc->sc_lock); 3449 3450 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3451 link = scsi_get_link(sc->sc_scsibus, i, 0); 3452 if (link == NULL) 3453 continue; 3454 /* skip if not a virtual disk */ 3455 if (!(link->flags & SDEV_VIRTUAL)) 3456 continue; 3457 3458 if (mpi_bio_get_pg0_raid(sc, vol)) 3459 continue; 3460 3461 rpg0 = sc->sc_rpg0; 3462 if (rpg0 == NULL) 3463 goto done; 3464 3465 /* determine status */ 3466 switch (rpg0->volume_state) { 3467 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 3468 sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE; 3469 sc->sc_sensors[vol].status = SENSOR_S_OK; 3470 break; 3471 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 3472 sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL; 3473 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3474 break; 3475 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 3476 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 3477 sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL; 3478 sc->sc_sensors[vol].status = SENSOR_S_CRIT; 3479 break; 3480 default: 3481 sc->sc_sensors[vol].value = 0; /* unknown */ 3482 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3483 } 3484 3485 /* override status if scrubbing or something */ 3486 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) { 3487 sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD; 3488 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3489 } 3490 3491 vol++; 3492 } 3493 done: 3494 rw_exit_write(&sc->sc_lock); 3495 } 3496 #endif /* SMALL_KERNEL */ 3497 #endif /* NBIO > 0 */ 3498