1 /* $OpenBSD: mpi.c,v 1.213 2020/04/21 19:27:03 krw Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include "bio.h" 21 22 #include <sys/param.h> 23 #include <sys/systm.h> 24 #include <sys/buf.h> 25 #include <sys/device.h> 26 #include <sys/malloc.h> 27 #include <sys/kernel.h> 28 #include <sys/mutex.h> 29 #include <sys/rwlock.h> 30 #include <sys/sensors.h> 31 #include <sys/dkio.h> 32 #include <sys/task.h> 33 34 #include <machine/bus.h> 35 36 #include <scsi/scsi_all.h> 37 #include <scsi/scsiconf.h> 38 39 #include <dev/biovar.h> 40 #include <dev/ic/mpireg.h> 41 #include <dev/ic/mpivar.h> 42 43 #ifdef MPI_DEBUG 44 uint32_t mpi_debug = 0 45 /* | MPI_D_CMD */ 46 /* | MPI_D_INTR */ 47 /* | MPI_D_MISC */ 48 /* | MPI_D_DMA */ 49 /* | MPI_D_IOCTL */ 50 /* | MPI_D_RW */ 51 /* | MPI_D_MEM */ 52 /* | MPI_D_CCB */ 53 /* | MPI_D_PPR */ 54 /* | MPI_D_RAID */ 55 /* | MPI_D_EVT */ 56 ; 57 #endif 58 59 struct cfdriver mpi_cd = { 60 NULL, 61 "mpi", 62 DV_DULL 63 }; 64 65 void mpi_scsi_cmd(struct scsi_xfer *); 66 void mpi_scsi_cmd_done(struct mpi_ccb *); 67 int mpi_scsi_probe(struct scsi_link *); 68 int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, 69 int); 70 71 struct scsi_adapter mpi_switch = { 72 mpi_scsi_cmd, NULL, mpi_scsi_probe, NULL, mpi_scsi_ioctl 73 }; 74 75 struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t); 76 void mpi_dmamem_free(struct mpi_softc *, 77 struct mpi_dmamem *); 78 int mpi_alloc_ccbs(struct mpi_softc *); 79 void *mpi_get_ccb(void *); 80 void mpi_put_ccb(void *, void *); 81 int mpi_alloc_replies(struct mpi_softc *); 82 void mpi_push_replies(struct mpi_softc *); 83 void mpi_push_reply(struct mpi_softc *, struct mpi_rcb *); 84 85 void mpi_start(struct mpi_softc *, struct mpi_ccb *); 86 int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int); 87 void mpi_poll_done(struct mpi_ccb *); 88 void mpi_reply(struct mpi_softc *, u_int32_t); 89 90 void mpi_wait(struct mpi_softc *sc, struct mpi_ccb *); 91 void mpi_wait_done(struct mpi_ccb *); 92 93 int mpi_cfg_spi_port(struct mpi_softc *); 94 void mpi_squash_ppr(struct mpi_softc *); 95 void mpi_run_ppr(struct mpi_softc *); 96 int mpi_ppr(struct mpi_softc *, struct scsi_link *, 97 struct mpi_cfg_raid_physdisk *, int, int, int); 98 int mpi_inq(struct mpi_softc *, u_int16_t, int); 99 100 int mpi_cfg_sas(struct mpi_softc *); 101 int mpi_cfg_fc(struct mpi_softc *); 102 103 void mpi_timeout_xs(void *); 104 int mpi_load_xs(struct mpi_ccb *); 105 106 u_int32_t mpi_read(struct mpi_softc *, bus_size_t); 107 void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t); 108 int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t, 109 u_int32_t); 110 int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t, 111 u_int32_t); 112 113 int mpi_init(struct mpi_softc *); 114 int mpi_reset_soft(struct mpi_softc *); 115 int mpi_reset_hard(struct mpi_softc *); 116 117 int mpi_handshake_send(struct mpi_softc *, void *, size_t); 118 int mpi_handshake_recv_dword(struct mpi_softc *, 119 u_int32_t *); 120 int mpi_handshake_recv(struct mpi_softc *, void *, size_t); 121 122 void mpi_empty_done(struct mpi_ccb *); 123 124 int mpi_iocinit(struct mpi_softc *); 125 int mpi_iocfacts(struct mpi_softc *); 126 int mpi_portfacts(struct mpi_softc *); 127 int mpi_portenable(struct mpi_softc *); 128 int mpi_cfg_coalescing(struct mpi_softc *); 129 void mpi_get_raid(struct mpi_softc *); 130 int mpi_fwupload(struct mpi_softc *); 131 int mpi_manufacturing(struct mpi_softc *); 132 int mpi_scsi_probe_virtual(struct scsi_link *); 133 134 int mpi_eventnotify(struct mpi_softc *); 135 void mpi_eventnotify_done(struct mpi_ccb *); 136 void mpi_eventnotify_free(struct mpi_softc *, 137 struct mpi_rcb *); 138 void mpi_eventack(void *, void *); 139 void mpi_eventack_done(struct mpi_ccb *); 140 int mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *); 141 void mpi_evt_sas_detach(void *, void *); 142 void mpi_evt_sas_detach_done(struct mpi_ccb *); 143 void mpi_fc_rescan(void *); 144 145 int mpi_req_cfg_header(struct mpi_softc *, u_int8_t, 146 u_int8_t, u_int32_t, int, void *); 147 int mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int, 148 void *, int, void *, size_t); 149 150 int mpi_ioctl_cache(struct scsi_link *, u_long, 151 struct dk_cache *); 152 153 #if NBIO > 0 154 int mpi_bio_get_pg0_raid(struct mpi_softc *, int); 155 int mpi_ioctl(struct device *, u_long, caddr_t); 156 int mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *); 157 int mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *); 158 int mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *); 159 int mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *); 160 #ifndef SMALL_KERNEL 161 int mpi_create_sensors(struct mpi_softc *); 162 void mpi_refresh_sensors(void *); 163 #endif /* SMALL_KERNEL */ 164 #endif /* NBIO > 0 */ 165 166 #define DEVNAME(s) ((s)->sc_dev.dv_xname) 167 168 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t)) 169 170 #define mpi_read_db(s) mpi_read((s), MPI_DOORBELL) 171 #define mpi_write_db(s, v) mpi_write((s), MPI_DOORBELL, (v)) 172 #define mpi_read_intr(s) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \ 173 MPI_INTR_STATUS) 174 #define mpi_write_intr(s, v) mpi_write((s), MPI_INTR_STATUS, (v)) 175 #define mpi_pop_reply(s) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \ 176 MPI_REPLY_QUEUE) 177 #define mpi_push_reply_db(s, v) bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \ 178 MPI_REPLY_QUEUE, (v)) 179 180 #define mpi_wait_db_int(s) mpi_wait_ne((s), MPI_INTR_STATUS, \ 181 MPI_INTR_STATUS_DOORBELL, 0) 182 #define mpi_wait_db_ack(s) mpi_wait_eq((s), MPI_INTR_STATUS, \ 183 MPI_INTR_STATUS_IOCDOORBELL, 0) 184 185 #define MPI_PG_EXTENDED (1<<0) 186 #define MPI_PG_POLL (1<<1) 187 #define MPI_PG_FMT "\020" "\002POLL" "\001EXTENDED" 188 189 #define mpi_cfg_header(_s, _t, _n, _a, _h) \ 190 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 191 MPI_PG_POLL, (_h)) 192 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \ 193 mpi_req_cfg_header((_s), (_t), (_n), (_a), \ 194 MPI_PG_POLL|MPI_PG_EXTENDED, (_h)) 195 196 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \ 197 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \ 198 (_h), (_r), (_p), (_l)) 199 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \ 200 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \ 201 (_h), (_r), (_p), (_l)) 202 203 static inline void 204 mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva) 205 { 206 htolem32(&sge->sg_addr_lo, dva); 207 htolem32(&sge->sg_addr_hi, dva >> 32); 208 } 209 210 int 211 mpi_attach(struct mpi_softc *sc) 212 { 213 struct scsibus_attach_args saa; 214 struct mpi_ccb *ccb; 215 216 printf("\n"); 217 218 rw_init(&sc->sc_lock, "mpi_lock"); 219 task_set(&sc->sc_evt_rescan, mpi_fc_rescan, sc); 220 221 /* disable interrupts */ 222 mpi_write(sc, MPI_INTR_MASK, 223 MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL); 224 225 if (mpi_init(sc) != 0) { 226 printf("%s: unable to initialise\n", DEVNAME(sc)); 227 return (1); 228 } 229 230 if (mpi_iocfacts(sc) != 0) { 231 printf("%s: unable to get iocfacts\n", DEVNAME(sc)); 232 return (1); 233 } 234 235 if (mpi_alloc_ccbs(sc) != 0) { 236 /* error already printed */ 237 return (1); 238 } 239 240 if (mpi_alloc_replies(sc) != 0) { 241 printf("%s: unable to allocate reply space\n", DEVNAME(sc)); 242 goto free_ccbs; 243 } 244 245 if (mpi_iocinit(sc) != 0) { 246 printf("%s: unable to send iocinit\n", DEVNAME(sc)); 247 goto free_ccbs; 248 } 249 250 /* spin until we're operational */ 251 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 252 MPI_DOORBELL_STATE_OPER) != 0) { 253 printf("%s: state: 0x%08x\n", DEVNAME(sc), 254 mpi_read_db(sc) & MPI_DOORBELL_STATE); 255 printf("%s: operational state timeout\n", DEVNAME(sc)); 256 goto free_ccbs; 257 } 258 259 mpi_push_replies(sc); 260 261 if (mpi_portfacts(sc) != 0) { 262 printf("%s: unable to get portfacts\n", DEVNAME(sc)); 263 goto free_replies; 264 } 265 266 if (mpi_cfg_coalescing(sc) != 0) { 267 printf("%s: unable to configure coalescing\n", DEVNAME(sc)); 268 goto free_replies; 269 } 270 271 switch (sc->sc_porttype) { 272 case MPI_PORTFACTS_PORTTYPE_SAS: 273 SIMPLEQ_INIT(&sc->sc_evt_scan_queue); 274 mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO); 275 scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool, 276 mpi_evt_sas_detach, sc); 277 /* FALLTHROUGH */ 278 case MPI_PORTFACTS_PORTTYPE_FC: 279 if (mpi_eventnotify(sc) != 0) { 280 printf("%s: unable to enable events\n", DEVNAME(sc)); 281 goto free_replies; 282 } 283 break; 284 } 285 286 if (mpi_portenable(sc) != 0) { 287 printf("%s: unable to enable port\n", DEVNAME(sc)); 288 goto free_replies; 289 } 290 291 if (mpi_fwupload(sc) != 0) { 292 printf("%s: unable to upload firmware\n", DEVNAME(sc)); 293 goto free_replies; 294 } 295 296 if (mpi_manufacturing(sc) != 0) { 297 printf("%s: unable to fetch manufacturing info\n", DEVNAME(sc)); 298 goto free_replies; 299 } 300 301 switch (sc->sc_porttype) { 302 case MPI_PORTFACTS_PORTTYPE_SCSI: 303 if (mpi_cfg_spi_port(sc) != 0) { 304 printf("%s: unable to configure spi\n", DEVNAME(sc)); 305 goto free_replies; 306 } 307 mpi_squash_ppr(sc); 308 break; 309 case MPI_PORTFACTS_PORTTYPE_SAS: 310 if (mpi_cfg_sas(sc) != 0) { 311 printf("%s: unable to configure sas\n", DEVNAME(sc)); 312 goto free_replies; 313 } 314 break; 315 case MPI_PORTFACTS_PORTTYPE_FC: 316 if (mpi_cfg_fc(sc) != 0) { 317 printf("%s: unable to configure fc\n", DEVNAME(sc)); 318 goto free_replies; 319 } 320 break; 321 } 322 323 /* get raid pages */ 324 mpi_get_raid(sc); 325 #if NBIO > 0 326 if (sc->sc_flags & MPI_F_RAID) { 327 if (bio_register(&sc->sc_dev, mpi_ioctl) != 0) 328 panic("%s: controller registration failed", 329 DEVNAME(sc)); 330 else { 331 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 332 2, 0, &sc->sc_cfg_hdr) != 0) { 333 panic("%s: can't get IOC page 2 hdr", 334 DEVNAME(sc)); 335 } 336 337 sc->sc_vol_page = mallocarray(sc->sc_cfg_hdr.page_length, 338 4, M_TEMP, M_WAITOK | M_CANFAIL); 339 if (sc->sc_vol_page == NULL) { 340 panic("%s: can't get memory for IOC page 2, " 341 "bio disabled", DEVNAME(sc)); 342 } 343 344 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, 345 sc->sc_vol_page, 346 sc->sc_cfg_hdr.page_length * 4) != 0) { 347 panic("%s: can't get IOC page 2", DEVNAME(sc)); 348 } 349 350 sc->sc_vol_list = (struct mpi_cfg_raid_vol *) 351 (sc->sc_vol_page + 1); 352 353 sc->sc_ioctl = mpi_ioctl; 354 } 355 } 356 #endif /* NBIO > 0 */ 357 358 /* we should be good to go now, attach scsibus */ 359 sc->sc_link.adapter = &mpi_switch; 360 sc->sc_link.adapter_softc = sc; 361 sc->sc_link.adapter_target = sc->sc_target; 362 sc->sc_link.adapter_buswidth = sc->sc_buswidth; 363 sc->sc_link.openings = MAX(sc->sc_maxcmds / sc->sc_buswidth, 16); 364 sc->sc_link.pool = &sc->sc_iopool; 365 366 memset(&saa, 0, sizeof(saa)); 367 saa.saa_sc_link = &sc->sc_link; 368 369 /* config_found() returns the scsibus attached to us */ 370 sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev, 371 &saa, scsiprint); 372 373 /* do domain validation */ 374 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI) 375 mpi_run_ppr(sc); 376 377 /* enable interrupts */ 378 mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL); 379 380 #if NBIO > 0 381 #ifndef SMALL_KERNEL 382 mpi_create_sensors(sc); 383 #endif /* SMALL_KERNEL */ 384 #endif /* NBIO > 0 */ 385 386 return (0); 387 388 free_replies: 389 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 390 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 391 mpi_dmamem_free(sc, sc->sc_replies); 392 free_ccbs: 393 while ((ccb = mpi_get_ccb(sc)) != NULL) 394 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 395 mpi_dmamem_free(sc, sc->sc_requests); 396 free(sc->sc_ccbs, M_DEVBUF, 0); 397 398 return(1); 399 } 400 401 int 402 mpi_cfg_spi_port(struct mpi_softc *sc) 403 { 404 struct mpi_cfg_hdr hdr; 405 struct mpi_cfg_spi_port_pg1 port; 406 407 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0, 408 &hdr) != 0) 409 return (1); 410 411 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0) 412 return (1); 413 414 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc)); 415 DNPRINTF(MPI_D_MISC, "%s: port_scsi_id: %d port_resp_ids 0x%04x\n", 416 DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids)); 417 DNPRINTF(MPI_D_MISC, "%s: on_bus_timer_value: 0x%08x\n", DEVNAME(sc), 418 letoh32(port.port_scsi_id)); 419 DNPRINTF(MPI_D_MISC, "%s: target_config: 0x%02x id_config: 0x%04x\n", 420 DEVNAME(sc), port.target_config, letoh16(port.id_config)); 421 422 if (port.port_scsi_id == sc->sc_target && 423 port.port_resp_ids == htole16(1 << sc->sc_target) && 424 port.on_bus_timer_value != htole32(0x0)) 425 return (0); 426 427 DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc), 428 sc->sc_target); 429 port.port_scsi_id = sc->sc_target; 430 port.port_resp_ids = htole16(1 << sc->sc_target); 431 port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */ 432 433 if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) { 434 printf("%s: unable to configure port scsi id\n", DEVNAME(sc)); 435 return (1); 436 } 437 438 return (0); 439 } 440 441 void 442 mpi_squash_ppr(struct mpi_softc *sc) 443 { 444 struct mpi_cfg_hdr hdr; 445 struct mpi_cfg_spi_dev_pg1 page; 446 int i; 447 448 DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc)); 449 450 for (i = 0; i < sc->sc_buswidth; i++) { 451 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 452 1, i, &hdr) != 0) 453 return; 454 455 if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0) 456 return; 457 458 DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x " 459 "req_offset: 0x%02x req_period: 0x%02x " 460 "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i, 461 page.req_params1, page.req_offset, page.req_period, 462 page.req_params2, letoh32(page.configuration)); 463 464 page.req_params1 = 0x0; 465 page.req_offset = 0x0; 466 page.req_period = 0x0; 467 page.req_params2 = 0x0; 468 page.configuration = htole32(0x0); 469 470 if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0) 471 return; 472 } 473 } 474 475 void 476 mpi_run_ppr(struct mpi_softc *sc) 477 { 478 struct mpi_cfg_hdr hdr; 479 struct mpi_cfg_spi_port_pg0 port_pg; 480 struct mpi_cfg_ioc_pg3 *physdisk_pg; 481 struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk; 482 size_t pagelen; 483 struct scsi_link *link; 484 int i, tries; 485 486 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0, 487 &hdr) != 0) { 488 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n", 489 DEVNAME(sc)); 490 return; 491 } 492 493 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) { 494 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n", 495 DEVNAME(sc)); 496 return; 497 } 498 499 for (i = 0; i < sc->sc_buswidth; i++) { 500 link = scsi_get_link(sc->sc_scsibus, i, 0); 501 if (link == NULL) 502 continue; 503 504 /* do not ppr volumes */ 505 if (link->flags & SDEV_VIRTUAL) 506 continue; 507 508 tries = 0; 509 while (mpi_ppr(sc, link, NULL, port_pg.min_period, 510 port_pg.max_offset, tries) == EAGAIN) 511 tries++; 512 } 513 514 if ((sc->sc_flags & MPI_F_RAID) == 0) 515 return; 516 517 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0, 518 &hdr) != 0) { 519 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 520 "fetch ioc pg 3 header\n", DEVNAME(sc)); 521 return; 522 } 523 524 pagelen = hdr.page_length * 4; /* dwords to bytes */ 525 physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 526 if (physdisk_pg == NULL) { 527 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 528 "allocate ioc pg 3\n", DEVNAME(sc)); 529 return; 530 } 531 physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1); 532 533 if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) { 534 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to " 535 "fetch ioc page 3\n", DEVNAME(sc)); 536 goto out; 537 } 538 539 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc), 540 physdisk_pg->no_phys_disks); 541 542 for (i = 0; i < physdisk_pg->no_phys_disks; i++) { 543 physdisk = &physdisk_list[i]; 544 545 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d " 546 "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id, 547 physdisk->phys_disk_bus, physdisk->phys_disk_ioc, 548 physdisk->phys_disk_num); 549 550 if (physdisk->phys_disk_ioc != sc->sc_ioc_number) 551 continue; 552 553 tries = 0; 554 while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period, 555 port_pg.max_offset, tries) == EAGAIN) 556 tries++; 557 } 558 559 out: 560 free(physdisk_pg, M_TEMP, pagelen); 561 } 562 563 int 564 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link, 565 struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try) 566 { 567 struct mpi_cfg_hdr hdr0, hdr1; 568 struct mpi_cfg_spi_dev_pg0 pg0; 569 struct mpi_cfg_spi_dev_pg1 pg1; 570 u_int32_t address; 571 int id; 572 int raid = 0; 573 574 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d " 575 "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try, 576 link->quirks); 577 578 if (try >= 3) 579 return (EIO); 580 581 if (physdisk == NULL) { 582 if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR) 583 return (EIO); 584 585 address = link->target; 586 id = link->target; 587 } else { 588 raid = 1; 589 address = (physdisk->phys_disk_bus << 8) | 590 (physdisk->phys_disk_id); 591 id = physdisk->phys_disk_num; 592 } 593 594 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0, 595 address, &hdr0) != 0) { 596 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n", 597 DEVNAME(sc)); 598 return (EIO); 599 } 600 601 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1, 602 address, &hdr1) != 0) { 603 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n", 604 DEVNAME(sc)); 605 return (EIO); 606 } 607 608 #ifdef MPI_DEBUG 609 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 610 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n", 611 DEVNAME(sc)); 612 return (EIO); 613 } 614 615 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 616 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 617 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 618 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 619 #endif 620 621 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 622 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n", 623 DEVNAME(sc)); 624 return (EIO); 625 } 626 627 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 628 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 629 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 630 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 631 632 pg1.req_params1 = 0; 633 pg1.req_offset = offset; 634 pg1.req_period = period; 635 pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH; 636 637 if (raid || !(link->quirks & SDEV_NOSYNC)) { 638 pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE; 639 640 switch (try) { 641 case 0: /* U320 */ 642 break; 643 case 1: /* U160 */ 644 pg1.req_period = 0x09; 645 break; 646 case 2: /* U80 */ 647 pg1.req_period = 0x0a; 648 break; 649 } 650 651 if (pg1.req_period < 0x09) { 652 /* Ultra320: enable QAS & PACKETIZED */ 653 pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS | 654 MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED; 655 } 656 if (pg1.req_period < 0xa) { 657 /* >= Ultra160: enable dual xfers */ 658 pg1.req_params1 |= 659 MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS; 660 } 661 } 662 663 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 664 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 665 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 666 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 667 668 if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) { 669 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n", 670 DEVNAME(sc)); 671 return (EIO); 672 } 673 674 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 675 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n", 676 DEVNAME(sc)); 677 return (EIO); 678 } 679 680 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 681 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 682 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 683 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 684 685 if (mpi_inq(sc, id, raid) != 0) { 686 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against " 687 "target %d\n", DEVNAME(sc), link->target); 688 return (EIO); 689 } 690 691 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 692 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after " 693 "inquiry\n", DEVNAME(sc)); 694 return (EIO); 695 } 696 697 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 698 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 699 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 700 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 701 702 if (!(lemtoh32(&pg0.information) & 0x07) && (try == 0)) { 703 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n", 704 DEVNAME(sc)); 705 return (EAGAIN); 706 } 707 708 if ((((lemtoh32(&pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) { 709 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n", 710 DEVNAME(sc)); 711 return (EAGAIN); 712 } 713 714 if (lemtoh32(&pg0.information) & 0x0e) { 715 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n", 716 DEVNAME(sc), lemtoh32(&pg0.information)); 717 return (EAGAIN); 718 } 719 720 switch(pg0.neg_period) { 721 case 0x08: 722 period = 160; 723 break; 724 case 0x09: 725 period = 80; 726 break; 727 case 0x0a: 728 period = 40; 729 break; 730 case 0x0b: 731 period = 20; 732 break; 733 case 0x0c: 734 period = 10; 735 break; 736 default: 737 period = 0; 738 break; 739 } 740 741 printf("%s: %s %d %s at %dMHz width %dbit offset %d " 742 "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target", 743 id, period ? "Sync" : "Async", period, 744 (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8, 745 pg0.neg_offset, 746 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0, 747 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0, 748 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0); 749 750 return (0); 751 } 752 753 int 754 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk) 755 { 756 struct mpi_ccb *ccb; 757 struct scsi_inquiry inq; 758 struct inq_bundle { 759 struct mpi_msg_scsi_io io; 760 struct mpi_sge sge; 761 struct scsi_inquiry_data inqbuf; 762 struct scsi_sense_data sense; 763 } __packed *bundle; 764 struct mpi_msg_scsi_io *io; 765 struct mpi_sge *sge; 766 767 DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc)); 768 769 memset(&inq, 0, sizeof(inq)); 770 inq.opcode = INQUIRY; 771 _lto2b(sizeof(struct scsi_inquiry_data), inq.length); 772 773 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 774 if (ccb == NULL) 775 return (1); 776 777 ccb->ccb_done = mpi_empty_done; 778 779 bundle = ccb->ccb_cmd; 780 io = &bundle->io; 781 sge = &bundle->sge; 782 783 io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH : 784 MPI_FUNCTION_SCSI_IO_REQUEST; 785 /* 786 * bus is always 0 787 * io->bus = htole16(sc->sc_bus); 788 */ 789 io->target_id = target; 790 791 io->cdb_length = sizeof(inq); 792 io->sense_buf_len = sizeof(struct scsi_sense_data); 793 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 794 795 /* 796 * always lun 0 797 * io->lun[0] = htobe16(link->lun); 798 */ 799 800 io->direction = MPI_SCSIIO_DIR_READ; 801 io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT; 802 803 memcpy(io->cdb, &inq, sizeof(inq)); 804 805 htolem32(&io->data_length, sizeof(struct scsi_inquiry_data)); 806 807 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva + 808 offsetof(struct inq_bundle, sense)); 809 810 htolem32(&sge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 | 811 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 812 (u_int32_t)sizeof(inq)); 813 814 mpi_dvatosge(sge, ccb->ccb_cmd_dva + 815 offsetof(struct inq_bundle, inqbuf)); 816 817 if (mpi_poll(sc, ccb, 5000) != 0) 818 return (1); 819 820 if (ccb->ccb_rcb != NULL) 821 mpi_push_reply(sc, ccb->ccb_rcb); 822 823 scsi_io_put(&sc->sc_iopool, ccb); 824 825 return (0); 826 } 827 828 int 829 mpi_cfg_sas(struct mpi_softc *sc) 830 { 831 struct mpi_ecfg_hdr ehdr; 832 struct mpi_cfg_sas_iou_pg1 *pg; 833 size_t pagelen; 834 int rv = 0; 835 836 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0, 837 &ehdr) != 0) 838 return (0); 839 840 pagelen = lemtoh16(&ehdr.ext_page_length) * 4; 841 pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO); 842 if (pg == NULL) 843 return (ENOMEM); 844 845 if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0) 846 goto out; 847 848 if (pg->max_sata_q_depth != 32) { 849 pg->max_sata_q_depth = 32; 850 851 if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0) 852 goto out; 853 } 854 855 out: 856 free(pg, M_TEMP, pagelen); 857 return (rv); 858 } 859 860 int 861 mpi_cfg_fc(struct mpi_softc *sc) 862 { 863 struct mpi_cfg_hdr hdr; 864 struct mpi_cfg_fc_port_pg0 pg0; 865 struct mpi_cfg_fc_port_pg1 pg1; 866 867 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0, 868 &hdr) != 0) { 869 printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc)); 870 return (1); 871 } 872 873 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) { 874 printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc)); 875 return (1); 876 } 877 878 sc->sc_link.port_wwn = letoh64(pg0.wwpn); 879 sc->sc_link.node_wwn = letoh64(pg0.wwnn); 880 881 /* configure port config more to our liking */ 882 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0, 883 &hdr) != 0) { 884 printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc)); 885 return (1); 886 } 887 888 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) { 889 printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc)); 890 return (1); 891 } 892 893 SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR | 894 MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN)); 895 896 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) { 897 printf("%s: unable to set FC port page 1\n", DEVNAME(sc)); 898 return (1); 899 } 900 901 return (0); 902 } 903 904 void 905 mpi_detach(struct mpi_softc *sc) 906 { 907 908 } 909 910 int 911 mpi_intr(void *arg) 912 { 913 struct mpi_softc *sc = arg; 914 u_int32_t reg; 915 int rv = 0; 916 917 if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0) 918 return (rv); 919 920 while ((reg = mpi_pop_reply(sc)) != 0xffffffff) { 921 mpi_reply(sc, reg); 922 rv = 1; 923 } 924 925 return (rv); 926 } 927 928 void 929 mpi_reply(struct mpi_softc *sc, u_int32_t reg) 930 { 931 struct mpi_ccb *ccb; 932 struct mpi_rcb *rcb = NULL; 933 struct mpi_msg_reply *reply = NULL; 934 u_int32_t reply_dva; 935 int id; 936 int i; 937 938 DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg); 939 940 if (reg & MPI_REPLY_QUEUE_ADDRESS) { 941 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1; 942 i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) / 943 MPI_REPLY_SIZE; 944 rcb = &sc->sc_rcbs[i]; 945 946 bus_dmamap_sync(sc->sc_dmat, 947 MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset, 948 MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD); 949 950 reply = rcb->rcb_reply; 951 952 id = lemtoh32(&reply->msg_context); 953 } else { 954 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) { 955 case MPI_REPLY_QUEUE_TYPE_INIT: 956 id = reg & MPI_REPLY_QUEUE_CONTEXT; 957 break; 958 959 default: 960 panic("%s: unsupported context reply", 961 DEVNAME(sc)); 962 } 963 } 964 965 DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n", 966 DEVNAME(sc), id, reply); 967 968 ccb = &sc->sc_ccbs[id]; 969 970 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 971 ccb->ccb_offset, MPI_REQUEST_SIZE, 972 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 973 ccb->ccb_state = MPI_CCB_READY; 974 ccb->ccb_rcb = rcb; 975 976 ccb->ccb_done(ccb); 977 } 978 979 struct mpi_dmamem * 980 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size) 981 { 982 struct mpi_dmamem *mdm; 983 int nsegs; 984 985 mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO); 986 if (mdm == NULL) 987 return (NULL); 988 989 mdm->mdm_size = size; 990 991 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 992 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0) 993 goto mdmfree; 994 995 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg, 996 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 997 goto destroy; 998 999 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size, 1000 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0) 1001 goto free; 1002 1003 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size, 1004 NULL, BUS_DMA_NOWAIT) != 0) 1005 goto unmap; 1006 1007 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x " 1008 "map: %#x nsegs: %d segs: %#x kva: %x\n", 1009 DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva); 1010 1011 return (mdm); 1012 1013 unmap: 1014 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size); 1015 free: 1016 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1017 destroy: 1018 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1019 mdmfree: 1020 free(mdm, M_DEVBUF, sizeof *mdm); 1021 1022 return (NULL); 1023 } 1024 1025 void 1026 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm) 1027 { 1028 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm); 1029 1030 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map); 1031 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size); 1032 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 1033 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 1034 free(mdm, M_DEVBUF, sizeof *mdm); 1035 } 1036 1037 int 1038 mpi_alloc_ccbs(struct mpi_softc *sc) 1039 { 1040 struct mpi_ccb *ccb; 1041 u_int8_t *cmd; 1042 int i; 1043 1044 SLIST_INIT(&sc->sc_ccb_free); 1045 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 1046 1047 sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct mpi_ccb), 1048 M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO); 1049 if (sc->sc_ccbs == NULL) { 1050 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 1051 return (1); 1052 } 1053 1054 sc->sc_requests = mpi_dmamem_alloc(sc, 1055 MPI_REQUEST_SIZE * sc->sc_maxcmds); 1056 if (sc->sc_requests == NULL) { 1057 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 1058 goto free_ccbs; 1059 } 1060 cmd = MPI_DMA_KVA(sc->sc_requests); 1061 memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds); 1062 1063 for (i = 0; i < sc->sc_maxcmds; i++) { 1064 ccb = &sc->sc_ccbs[i]; 1065 1066 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, 1067 sc->sc_max_sgl_len, MAXPHYS, 0, 1068 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1069 &ccb->ccb_dmamap) != 0) { 1070 printf("%s: unable to create dma map\n", DEVNAME(sc)); 1071 goto free_maps; 1072 } 1073 1074 ccb->ccb_sc = sc; 1075 ccb->ccb_id = i; 1076 ccb->ccb_offset = MPI_REQUEST_SIZE * i; 1077 ccb->ccb_state = MPI_CCB_READY; 1078 1079 ccb->ccb_cmd = &cmd[ccb->ccb_offset]; 1080 ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) + 1081 ccb->ccb_offset; 1082 1083 DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x " 1084 "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n", 1085 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc, 1086 ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd, 1087 ccb->ccb_cmd_dva); 1088 1089 mpi_put_ccb(sc, ccb); 1090 } 1091 1092 scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb); 1093 1094 return (0); 1095 1096 free_maps: 1097 while ((ccb = mpi_get_ccb(sc)) != NULL) 1098 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 1099 1100 mpi_dmamem_free(sc, sc->sc_requests); 1101 free_ccbs: 1102 free(sc->sc_ccbs, M_DEVBUF, 0); 1103 1104 return (1); 1105 } 1106 1107 void * 1108 mpi_get_ccb(void *xsc) 1109 { 1110 struct mpi_softc *sc = xsc; 1111 struct mpi_ccb *ccb; 1112 1113 mtx_enter(&sc->sc_ccb_mtx); 1114 ccb = SLIST_FIRST(&sc->sc_ccb_free); 1115 if (ccb != NULL) { 1116 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link); 1117 ccb->ccb_state = MPI_CCB_READY; 1118 } 1119 mtx_leave(&sc->sc_ccb_mtx); 1120 1121 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb); 1122 1123 return (ccb); 1124 } 1125 1126 void 1127 mpi_put_ccb(void *xsc, void *io) 1128 { 1129 struct mpi_softc *sc = xsc; 1130 struct mpi_ccb *ccb = io; 1131 1132 DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb); 1133 1134 #ifdef DIAGNOSTIC 1135 if (ccb->ccb_state == MPI_CCB_FREE) 1136 panic("mpi_put_ccb: double free"); 1137 #endif 1138 1139 ccb->ccb_state = MPI_CCB_FREE; 1140 ccb->ccb_cookie = NULL; 1141 ccb->ccb_done = NULL; 1142 memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE); 1143 mtx_enter(&sc->sc_ccb_mtx); 1144 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link); 1145 mtx_leave(&sc->sc_ccb_mtx); 1146 } 1147 1148 int 1149 mpi_alloc_replies(struct mpi_softc *sc) 1150 { 1151 DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc)); 1152 1153 sc->sc_rcbs = mallocarray(sc->sc_repq, sizeof(struct mpi_rcb), M_DEVBUF, 1154 M_WAITOK|M_CANFAIL); 1155 if (sc->sc_rcbs == NULL) 1156 return (1); 1157 1158 sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE); 1159 if (sc->sc_replies == NULL) { 1160 free(sc->sc_rcbs, M_DEVBUF, 0); 1161 return (1); 1162 } 1163 1164 return (0); 1165 } 1166 1167 void 1168 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb) 1169 { 1170 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 1171 rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1172 mpi_push_reply_db(sc, rcb->rcb_reply_dva); 1173 } 1174 1175 void 1176 mpi_push_replies(struct mpi_softc *sc) 1177 { 1178 struct mpi_rcb *rcb; 1179 char *kva = MPI_DMA_KVA(sc->sc_replies); 1180 int i; 1181 1182 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0, 1183 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1184 1185 for (i = 0; i < sc->sc_repq; i++) { 1186 rcb = &sc->sc_rcbs[i]; 1187 1188 rcb->rcb_reply = kva + MPI_REPLY_SIZE * i; 1189 rcb->rcb_offset = MPI_REPLY_SIZE * i; 1190 rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) + 1191 MPI_REPLY_SIZE * i; 1192 mpi_push_reply_db(sc, rcb->rcb_reply_dva); 1193 } 1194 } 1195 1196 void 1197 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb) 1198 { 1199 struct mpi_msg_request *msg; 1200 1201 DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc), 1202 ccb->ccb_cmd_dva); 1203 1204 msg = ccb->ccb_cmd; 1205 htolem32(&msg->msg_context, ccb->ccb_id); 1206 1207 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 1208 ccb->ccb_offset, MPI_REQUEST_SIZE, 1209 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1210 1211 ccb->ccb_state = MPI_CCB_QUEUED; 1212 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 1213 MPI_REQ_QUEUE, ccb->ccb_cmd_dva); 1214 } 1215 1216 int 1217 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout) 1218 { 1219 void (*done)(struct mpi_ccb *); 1220 void *cookie; 1221 int rv = 1; 1222 u_int32_t reg; 1223 1224 DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc), 1225 timeout); 1226 1227 done = ccb->ccb_done; 1228 cookie = ccb->ccb_cookie; 1229 1230 ccb->ccb_done = mpi_poll_done; 1231 ccb->ccb_cookie = &rv; 1232 1233 mpi_start(sc, ccb); 1234 while (rv == 1) { 1235 reg = mpi_pop_reply(sc); 1236 if (reg == 0xffffffff) { 1237 if (timeout-- == 0) { 1238 printf("%s: timeout\n", DEVNAME(sc)); 1239 goto timeout; 1240 } 1241 1242 delay(1000); 1243 continue; 1244 } 1245 1246 mpi_reply(sc, reg); 1247 } 1248 1249 ccb->ccb_cookie = cookie; 1250 done(ccb); 1251 1252 timeout: 1253 return (rv); 1254 } 1255 1256 void 1257 mpi_poll_done(struct mpi_ccb *ccb) 1258 { 1259 int *rv = ccb->ccb_cookie; 1260 1261 *rv = 0; 1262 } 1263 1264 void 1265 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb) 1266 { 1267 struct mutex cookie = MUTEX_INITIALIZER(IPL_BIO); 1268 void (*done)(struct mpi_ccb *); 1269 1270 done = ccb->ccb_done; 1271 ccb->ccb_done = mpi_wait_done; 1272 ccb->ccb_cookie = &cookie; 1273 1274 /* XXX this will wait forever for the ccb to complete */ 1275 1276 mpi_start(sc, ccb); 1277 1278 mtx_enter(&cookie); 1279 while (ccb->ccb_cookie != NULL) 1280 msleep_nsec(ccb, &cookie, PRIBIO, "mpiwait", INFSLP); 1281 mtx_leave(&cookie); 1282 1283 done(ccb); 1284 } 1285 1286 void 1287 mpi_wait_done(struct mpi_ccb *ccb) 1288 { 1289 struct mutex *cookie = ccb->ccb_cookie; 1290 1291 mtx_enter(cookie); 1292 ccb->ccb_cookie = NULL; 1293 wakeup_one(ccb); 1294 mtx_leave(cookie); 1295 } 1296 1297 void 1298 mpi_scsi_cmd(struct scsi_xfer *xs) 1299 { 1300 struct scsi_link *link = xs->sc_link; 1301 struct mpi_softc *sc = link->adapter_softc; 1302 struct mpi_ccb *ccb; 1303 struct mpi_ccb_bundle *mcb; 1304 struct mpi_msg_scsi_io *io; 1305 1306 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc)); 1307 1308 KERNEL_UNLOCK(); 1309 1310 if (xs->cmdlen > MPI_CDB_LEN) { 1311 DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n", 1312 DEVNAME(sc), xs->cmdlen); 1313 memset(&xs->sense, 0, sizeof(xs->sense)); 1314 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT; 1315 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1316 xs->sense.add_sense_code = 0x20; 1317 xs->error = XS_SENSE; 1318 goto done; 1319 } 1320 1321 ccb = xs->io; 1322 1323 DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n", 1324 DEVNAME(sc), ccb->ccb_id, xs->flags); 1325 1326 ccb->ccb_cookie = xs; 1327 ccb->ccb_done = mpi_scsi_cmd_done; 1328 1329 mcb = ccb->ccb_cmd; 1330 io = &mcb->mcb_io; 1331 1332 io->function = MPI_FUNCTION_SCSI_IO_REQUEST; 1333 /* 1334 * bus is always 0 1335 * io->bus = htole16(sc->sc_bus); 1336 */ 1337 io->target_id = link->target; 1338 1339 io->cdb_length = xs->cmdlen; 1340 io->sense_buf_len = sizeof(xs->sense); 1341 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 1342 1343 htobem16(&io->lun[0], link->lun); 1344 1345 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1346 case SCSI_DATA_IN: 1347 io->direction = MPI_SCSIIO_DIR_READ; 1348 break; 1349 case SCSI_DATA_OUT: 1350 io->direction = MPI_SCSIIO_DIR_WRITE; 1351 break; 1352 default: 1353 io->direction = MPI_SCSIIO_DIR_NONE; 1354 break; 1355 } 1356 1357 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI && 1358 (link->quirks & SDEV_NOTAGS)) 1359 io->tagging = MPI_SCSIIO_ATTR_UNTAGGED; 1360 else 1361 io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q; 1362 1363 memcpy(io->cdb, xs->cmd, xs->cmdlen); 1364 1365 htolem32(&io->data_length, xs->datalen); 1366 1367 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva + 1368 offsetof(struct mpi_ccb_bundle, mcb_sense)); 1369 1370 if (mpi_load_xs(ccb) != 0) 1371 goto stuffup; 1372 1373 timeout_set(&xs->stimeout, mpi_timeout_xs, ccb); 1374 1375 if (xs->flags & SCSI_POLL) { 1376 if (mpi_poll(sc, ccb, xs->timeout) != 0) 1377 goto stuffup; 1378 } else 1379 mpi_start(sc, ccb); 1380 1381 KERNEL_LOCK(); 1382 return; 1383 1384 stuffup: 1385 xs->error = XS_DRIVER_STUFFUP; 1386 done: 1387 KERNEL_LOCK(); 1388 scsi_done(xs); 1389 } 1390 1391 void 1392 mpi_scsi_cmd_done(struct mpi_ccb *ccb) 1393 { 1394 struct mpi_softc *sc = ccb->ccb_sc; 1395 struct scsi_xfer *xs = ccb->ccb_cookie; 1396 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1397 bus_dmamap_t dmap = ccb->ccb_dmamap; 1398 struct mpi_msg_scsi_io_error *sie; 1399 1400 if (xs->datalen != 0) { 1401 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1402 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1403 BUS_DMASYNC_POSTWRITE); 1404 1405 bus_dmamap_unload(sc->sc_dmat, dmap); 1406 } 1407 1408 /* timeout_del */ 1409 xs->error = XS_NOERROR; 1410 xs->resid = 0; 1411 1412 if (ccb->ccb_rcb == NULL) { 1413 /* no scsi error, we're ok so drop out early */ 1414 xs->status = SCSI_OK; 1415 KERNEL_LOCK(); 1416 scsi_done(xs); 1417 KERNEL_UNLOCK(); 1418 return; 1419 } 1420 1421 sie = ccb->ccb_rcb->rcb_reply; 1422 1423 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d " 1424 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen, 1425 xs->flags); 1426 DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d " 1427 "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus, 1428 sie->msg_length, sie->function); 1429 DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d " 1430 "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length, 1431 sie->sense_buf_len, sie->msg_flags); 1432 DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 1433 letoh32(sie->msg_context)); 1434 DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x " 1435 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status, 1436 sie->scsi_state, letoh16(sie->ioc_status)); 1437 DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1438 letoh32(sie->ioc_loginfo)); 1439 DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc), 1440 letoh32(sie->transfer_count)); 1441 DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc), 1442 letoh32(sie->sense_count)); 1443 DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc), 1444 letoh32(sie->response_info)); 1445 DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc), 1446 letoh16(sie->tag)); 1447 1448 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) 1449 xs->status = SCSI_TERMINATED; 1450 else 1451 xs->status = sie->scsi_status; 1452 xs->resid = 0; 1453 1454 switch (lemtoh16(&sie->ioc_status)) { 1455 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 1456 xs->resid = xs->datalen - lemtoh32(&sie->transfer_count); 1457 /* FALLTHROUGH */ 1458 case MPI_IOCSTATUS_SUCCESS: 1459 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 1460 switch (xs->status) { 1461 case SCSI_OK: 1462 xs->error = XS_NOERROR; 1463 break; 1464 1465 case SCSI_CHECK: 1466 xs->error = XS_SENSE; 1467 break; 1468 1469 case SCSI_BUSY: 1470 case SCSI_QUEUE_FULL: 1471 xs->error = XS_BUSY; 1472 break; 1473 1474 default: 1475 xs->error = XS_DRIVER_STUFFUP; 1476 break; 1477 } 1478 break; 1479 1480 case MPI_IOCSTATUS_BUSY: 1481 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 1482 xs->error = XS_BUSY; 1483 break; 1484 1485 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 1486 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 1487 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1488 xs->error = XS_SELTIMEOUT; 1489 break; 1490 1491 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 1492 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 1493 xs->error = XS_RESET; 1494 break; 1495 1496 default: 1497 xs->error = XS_DRIVER_STUFFUP; 1498 break; 1499 } 1500 1501 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID) 1502 memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense)); 1503 1504 DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc), 1505 xs->error, xs->status); 1506 1507 mpi_push_reply(sc, ccb->ccb_rcb); 1508 KERNEL_LOCK(); 1509 scsi_done(xs); 1510 KERNEL_UNLOCK(); 1511 } 1512 1513 void 1514 mpi_timeout_xs(void *arg) 1515 { 1516 /* XXX */ 1517 } 1518 1519 int 1520 mpi_load_xs(struct mpi_ccb *ccb) 1521 { 1522 struct mpi_softc *sc = ccb->ccb_sc; 1523 struct scsi_xfer *xs = ccb->ccb_cookie; 1524 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1525 struct mpi_msg_scsi_io *io = &mcb->mcb_io; 1526 struct mpi_sge *sge = NULL; 1527 struct mpi_sge *nsge = &mcb->mcb_sgl[0]; 1528 struct mpi_sge *ce = NULL, *nce; 1529 bus_dmamap_t dmap = ccb->ccb_dmamap; 1530 u_int32_t addr, flags; 1531 int i, error; 1532 1533 if (xs->datalen == 0) { 1534 htolem32(&nsge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | 1535 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 1536 return (0); 1537 } 1538 1539 error = bus_dmamap_load(sc->sc_dmat, dmap, 1540 xs->data, xs->datalen, NULL, BUS_DMA_STREAMING | 1541 ((xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK)); 1542 if (error) { 1543 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 1544 return (1); 1545 } 1546 1547 flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64; 1548 if (xs->flags & SCSI_DATA_OUT) 1549 flags |= MPI_SGE_FL_DIR_OUT; 1550 1551 if (dmap->dm_nsegs > sc->sc_first_sgl_len) { 1552 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1]; 1553 io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io; 1554 } 1555 1556 for (i = 0; i < dmap->dm_nsegs; i++) { 1557 1558 if (nsge == ce) { 1559 nsge++; 1560 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST); 1561 1562 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) { 1563 nce = &nsge[sc->sc_chain_len - 1]; 1564 addr = (u_int32_t *)nce - (u_int32_t *)nsge; 1565 addr = addr << 16 | 1566 sizeof(struct mpi_sge) * sc->sc_chain_len; 1567 } else { 1568 nce = NULL; 1569 addr = sizeof(struct mpi_sge) * 1570 (dmap->dm_nsegs - i); 1571 } 1572 1573 ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN | 1574 MPI_SGE_FL_SIZE_64 | addr); 1575 1576 mpi_dvatosge(ce, ccb->ccb_cmd_dva + 1577 ((u_int8_t *)nsge - (u_int8_t *)mcb)); 1578 1579 ce = nce; 1580 } 1581 1582 DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc), 1583 i, dmap->dm_segs[i].ds_len, 1584 (u_int64_t)dmap->dm_segs[i].ds_addr); 1585 1586 sge = nsge++; 1587 1588 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len); 1589 mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr); 1590 } 1591 1592 /* terminate list */ 1593 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 1594 MPI_SGE_FL_EOL); 1595 1596 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1597 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 1598 BUS_DMASYNC_PREWRITE); 1599 1600 return (0); 1601 } 1602 1603 int 1604 mpi_scsi_probe_virtual(struct scsi_link *link) 1605 { 1606 struct mpi_softc *sc = link->adapter_softc; 1607 struct mpi_cfg_hdr hdr; 1608 struct mpi_cfg_raid_vol_pg0 *rp0; 1609 int len; 1610 int rv; 1611 1612 if (!ISSET(sc->sc_flags, MPI_F_RAID)) 1613 return (0); 1614 1615 if (link->lun > 0) 1616 return (0); 1617 1618 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 1619 0, link->target, MPI_PG_POLL, &hdr); 1620 if (rv != 0) 1621 return (0); 1622 1623 len = hdr.page_length * 4; 1624 rp0 = malloc(len, M_TEMP, M_NOWAIT); 1625 if (rp0 == NULL) 1626 return (ENOMEM); 1627 1628 rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len); 1629 if (rv == 0) 1630 SET(link->flags, SDEV_VIRTUAL); 1631 1632 free(rp0, M_TEMP, len); 1633 return (0); 1634 } 1635 1636 int 1637 mpi_scsi_probe(struct scsi_link *link) 1638 { 1639 struct mpi_softc *sc = link->adapter_softc; 1640 struct mpi_ecfg_hdr ehdr; 1641 struct mpi_cfg_sas_dev_pg0 pg0; 1642 u_int32_t address; 1643 int rv; 1644 1645 rv = mpi_scsi_probe_virtual(link); 1646 if (rv != 0) 1647 return (rv); 1648 1649 if (ISSET(link->flags, SDEV_VIRTUAL)) 1650 return (0); 1651 1652 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS) 1653 return (0); 1654 1655 address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target; 1656 1657 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0, 1658 address, &ehdr) != 0) 1659 return (EIO); 1660 1661 if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0) 1662 return (0); 1663 1664 DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n", 1665 DEVNAME(sc), link->target); 1666 DNPRINTF(MPI_D_MISC, "%s: slot: 0x%04x enc_handle: 0x%04x\n", 1667 DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle)); 1668 DNPRINTF(MPI_D_MISC, "%s: sas_addr: 0x%016llx\n", DEVNAME(sc), 1669 letoh64(pg0.sas_addr)); 1670 DNPRINTF(MPI_D_MISC, "%s: parent_dev_handle: 0x%04x phy_num: 0x%02x " 1671 "access_status: 0x%02x\n", DEVNAME(sc), 1672 letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status); 1673 DNPRINTF(MPI_D_MISC, "%s: dev_handle: 0x%04x " 1674 "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc), 1675 letoh16(pg0.dev_handle), pg0.bus, pg0.target); 1676 DNPRINTF(MPI_D_MISC, "%s: device_info: 0x%08x\n", DEVNAME(sc), 1677 letoh32(pg0.device_info)); 1678 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%04x physical_port: 0x%02x\n", 1679 DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port); 1680 1681 if (ISSET(lemtoh32(&pg0.device_info), 1682 MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) { 1683 DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n", 1684 DEVNAME(sc), link->target); 1685 link->flags |= SDEV_ATAPI; 1686 link->quirks |= SDEV_ONLYBIG; 1687 } 1688 1689 return (0); 1690 } 1691 1692 u_int32_t 1693 mpi_read(struct mpi_softc *sc, bus_size_t r) 1694 { 1695 u_int32_t rv; 1696 1697 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1698 BUS_SPACE_BARRIER_READ); 1699 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 1700 1701 DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv); 1702 1703 return (rv); 1704 } 1705 1706 void 1707 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v) 1708 { 1709 DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v); 1710 1711 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 1712 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1713 BUS_SPACE_BARRIER_WRITE); 1714 } 1715 1716 int 1717 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1718 u_int32_t target) 1719 { 1720 int i; 1721 1722 DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r, 1723 mask, target); 1724 1725 for (i = 0; i < 10000; i++) { 1726 if ((mpi_read(sc, r) & mask) == target) 1727 return (0); 1728 delay(1000); 1729 } 1730 1731 return (1); 1732 } 1733 1734 int 1735 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1736 u_int32_t target) 1737 { 1738 int i; 1739 1740 DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r, 1741 mask, target); 1742 1743 for (i = 0; i < 10000; i++) { 1744 if ((mpi_read(sc, r) & mask) != target) 1745 return (0); 1746 delay(1000); 1747 } 1748 1749 return (1); 1750 } 1751 1752 int 1753 mpi_init(struct mpi_softc *sc) 1754 { 1755 u_int32_t db; 1756 int i; 1757 1758 /* spin until the IOC leaves the RESET state */ 1759 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1760 MPI_DOORBELL_STATE_RESET) != 0) { 1761 DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave " 1762 "reset state\n", DEVNAME(sc)); 1763 return (1); 1764 } 1765 1766 /* check current ownership */ 1767 db = mpi_read_db(sc); 1768 if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) { 1769 DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n", 1770 DEVNAME(sc)); 1771 return (0); 1772 } 1773 1774 for (i = 0; i < 5; i++) { 1775 switch (db & MPI_DOORBELL_STATE) { 1776 case MPI_DOORBELL_STATE_READY: 1777 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n", 1778 DEVNAME(sc)); 1779 return (0); 1780 1781 case MPI_DOORBELL_STATE_OPER: 1782 case MPI_DOORBELL_STATE_FAULT: 1783 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being " 1784 "reset\n" , DEVNAME(sc)); 1785 if (mpi_reset_soft(sc) != 0) 1786 mpi_reset_hard(sc); 1787 break; 1788 1789 case MPI_DOORBELL_STATE_RESET: 1790 DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come " 1791 "out of reset\n", DEVNAME(sc)); 1792 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1793 MPI_DOORBELL_STATE_RESET) != 0) 1794 return (1); 1795 break; 1796 } 1797 db = mpi_read_db(sc); 1798 } 1799 1800 return (1); 1801 } 1802 1803 int 1804 mpi_reset_soft(struct mpi_softc *sc) 1805 { 1806 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc)); 1807 1808 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1809 return (1); 1810 1811 mpi_write_db(sc, 1812 MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET)); 1813 if (mpi_wait_eq(sc, MPI_INTR_STATUS, 1814 MPI_INTR_STATUS_IOCDOORBELL, 0) != 0) 1815 return (1); 1816 1817 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1818 MPI_DOORBELL_STATE_READY) != 0) 1819 return (1); 1820 1821 return (0); 1822 } 1823 1824 int 1825 mpi_reset_hard(struct mpi_softc *sc) 1826 { 1827 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc)); 1828 1829 /* enable diagnostic register */ 1830 mpi_write(sc, MPI_WRITESEQ, 0xff); 1831 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1); 1832 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2); 1833 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3); 1834 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4); 1835 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5); 1836 1837 /* reset ioc */ 1838 mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER); 1839 1840 delay(10000); 1841 1842 /* disable diagnostic register */ 1843 mpi_write(sc, MPI_WRITESEQ, 0xff); 1844 1845 /* restore pci bits? */ 1846 1847 /* firmware bits? */ 1848 return (0); 1849 } 1850 1851 int 1852 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords) 1853 { 1854 u_int32_t *query = buf; 1855 int i; 1856 1857 /* make sure the doorbell is not in use. */ 1858 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1859 return (1); 1860 1861 /* clear pending doorbell interrupts */ 1862 if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL) 1863 mpi_write_intr(sc, 0); 1864 1865 /* 1866 * first write the doorbell with the handshake function and the 1867 * dword count. 1868 */ 1869 mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) | 1870 MPI_DOORBELL_DWORDS(dwords)); 1871 1872 /* 1873 * the doorbell used bit will be set because a doorbell function has 1874 * started. Wait for the interrupt and then ack it. 1875 */ 1876 if (mpi_wait_db_int(sc) != 0) 1877 return (1); 1878 mpi_write_intr(sc, 0); 1879 1880 /* poll for the acknowledgement. */ 1881 if (mpi_wait_db_ack(sc) != 0) 1882 return (1); 1883 1884 /* write the query through the doorbell. */ 1885 for (i = 0; i < dwords; i++) { 1886 mpi_write_db(sc, htole32(query[i])); 1887 if (mpi_wait_db_ack(sc) != 0) 1888 return (1); 1889 } 1890 1891 return (0); 1892 } 1893 1894 int 1895 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword) 1896 { 1897 u_int16_t *words = (u_int16_t *)dword; 1898 int i; 1899 1900 for (i = 0; i < 2; i++) { 1901 if (mpi_wait_db_int(sc) != 0) 1902 return (1); 1903 words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK); 1904 mpi_write_intr(sc, 0); 1905 } 1906 1907 return (0); 1908 } 1909 1910 int 1911 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords) 1912 { 1913 struct mpi_msg_reply *reply = buf; 1914 u_int32_t *dbuf = buf, dummy; 1915 int i; 1916 1917 /* get the first dword so we can read the length out of the header. */ 1918 if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0) 1919 return (1); 1920 1921 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n", 1922 DEVNAME(sc), dwords, reply->msg_length); 1923 1924 /* 1925 * the total length, in dwords, is in the message length field of the 1926 * reply header. 1927 */ 1928 for (i = 1; i < MIN(dwords, reply->msg_length); i++) { 1929 if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0) 1930 return (1); 1931 } 1932 1933 /* if there's extra stuff to come off the ioc, discard it */ 1934 while (i++ < reply->msg_length) { 1935 if (mpi_handshake_recv_dword(sc, &dummy) != 0) 1936 return (1); 1937 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: " 1938 "0x%08x\n", DEVNAME(sc), dummy); 1939 } 1940 1941 /* wait for the doorbell used bit to be reset and clear the intr */ 1942 if (mpi_wait_db_int(sc) != 0) 1943 return (1); 1944 mpi_write_intr(sc, 0); 1945 1946 return (0); 1947 } 1948 1949 void 1950 mpi_empty_done(struct mpi_ccb *ccb) 1951 { 1952 /* nothing to do */ 1953 } 1954 1955 int 1956 mpi_iocfacts(struct mpi_softc *sc) 1957 { 1958 struct mpi_msg_iocfacts_request ifq; 1959 struct mpi_msg_iocfacts_reply ifp; 1960 1961 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc)); 1962 1963 memset(&ifq, 0, sizeof(ifq)); 1964 memset(&ifp, 0, sizeof(ifp)); 1965 1966 ifq.function = MPI_FUNCTION_IOC_FACTS; 1967 ifq.chain_offset = 0; 1968 ifq.msg_flags = 0; 1969 ifq.msg_context = htole32(0xdeadbeef); 1970 1971 if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 1972 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n", 1973 DEVNAME(sc)); 1974 return (1); 1975 } 1976 1977 if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 1978 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n", 1979 DEVNAME(sc)); 1980 return (1); 1981 } 1982 1983 DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n", 1984 DEVNAME(sc), ifp.function, ifp.msg_length, 1985 ifp.msg_version_maj, ifp.msg_version_min); 1986 DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x " 1987 "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags, 1988 ifp.ioc_number, ifp.header_version_maj, 1989 ifp.header_version_min); 1990 DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc), 1991 letoh32(ifp.msg_context)); 1992 DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n", 1993 DEVNAME(sc), letoh16(ifp.ioc_status), 1994 letoh16(ifp.ioc_exceptions)); 1995 DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc), 1996 letoh32(ifp.ioc_loginfo)); 1997 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x " 1998 "maxchdepth: %d\n", DEVNAME(sc), ifp.flags, 1999 ifp.block_size, ifp.whoinit, ifp.max_chain_depth); 2000 DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n", 2001 DEVNAME(sc), letoh16(ifp.request_frame_size), 2002 letoh16(ifp.reply_queue_depth)); 2003 DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc), 2004 letoh16(ifp.product_id)); 2005 DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc), 2006 letoh32(ifp.current_host_mfa_hi_addr)); 2007 DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d " 2008 "global_credits: %d\n", 2009 DEVNAME(sc), ifp.event_state, ifp.number_of_ports, 2010 letoh16(ifp.global_credits)); 2011 DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc), 2012 letoh32(ifp.current_sense_buffer_hi_addr)); 2013 DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n", 2014 DEVNAME(sc), ifp.max_buses, ifp.max_devices, 2015 letoh16(ifp.current_reply_frame_size)); 2016 DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc), 2017 letoh32(ifp.fw_image_size)); 2018 DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc), 2019 letoh32(ifp.ioc_capabilities)); 2020 DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x " 2021 "fw_version_dev: 0x%02x\n", DEVNAME(sc), 2022 ifp.fw_version_maj, ifp.fw_version_min, 2023 ifp.fw_version_unit, ifp.fw_version_dev); 2024 DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n", 2025 DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth)); 2026 DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x " 2027 "addr 0x%08lx%08lx\n", DEVNAME(sc), 2028 letoh32(ifp.host_page_buffer_sge.sg_hdr), 2029 letoh32(ifp.host_page_buffer_sge.sg_addr_hi), 2030 letoh32(ifp.host_page_buffer_sge.sg_addr_lo)); 2031 2032 sc->sc_fw_maj = ifp.fw_version_maj; 2033 sc->sc_fw_min = ifp.fw_version_min; 2034 sc->sc_fw_unit = ifp.fw_version_unit; 2035 sc->sc_fw_dev = ifp.fw_version_dev; 2036 2037 sc->sc_maxcmds = lemtoh16(&ifp.global_credits); 2038 sc->sc_maxchdepth = ifp.max_chain_depth; 2039 sc->sc_ioc_number = ifp.ioc_number; 2040 if (sc->sc_flags & MPI_F_SPI) 2041 sc->sc_buswidth = 16; 2042 else 2043 sc->sc_buswidth = 2044 (ifp.max_devices == 0) ? 256 : ifp.max_devices; 2045 if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) 2046 sc->sc_fw_len = lemtoh32(&ifp.fw_image_size); 2047 2048 sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, lemtoh16(&ifp.reply_queue_depth)); 2049 2050 /* 2051 * you can fit sg elements on the end of the io cmd if they fit in the 2052 * request frame size. 2053 */ 2054 sc->sc_first_sgl_len = ((lemtoh16(&ifp.request_frame_size) * 4) - 2055 sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge); 2056 DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc), 2057 sc->sc_first_sgl_len); 2058 2059 sc->sc_chain_len = (lemtoh16(&ifp.request_frame_size) * 4) / 2060 sizeof(struct mpi_sge); 2061 DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc), 2062 sc->sc_chain_len); 2063 2064 /* the sgl tailing the io cmd loses an entry to the chain element. */ 2065 sc->sc_max_sgl_len = MPI_MAX_SGL - 1; 2066 /* the sgl chains lose an entry for each chain element */ 2067 sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) / 2068 sc->sc_chain_len; 2069 DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc), 2070 sc->sc_max_sgl_len); 2071 2072 /* XXX we're ignoring the max chain depth */ 2073 2074 return (0); 2075 } 2076 2077 int 2078 mpi_iocinit(struct mpi_softc *sc) 2079 { 2080 struct mpi_msg_iocinit_request iiq; 2081 struct mpi_msg_iocinit_reply iip; 2082 u_int32_t hi_addr; 2083 2084 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc)); 2085 2086 memset(&iiq, 0, sizeof(iiq)); 2087 memset(&iip, 0, sizeof(iip)); 2088 2089 iiq.function = MPI_FUNCTION_IOC_INIT; 2090 iiq.whoinit = MPI_WHOINIT_HOST_DRIVER; 2091 2092 iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth; 2093 iiq.max_buses = 1; 2094 2095 iiq.msg_context = htole32(0xd00fd00f); 2096 2097 iiq.reply_frame_size = htole16(MPI_REPLY_SIZE); 2098 2099 hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests) >> 32); 2100 htolem32(&iiq.host_mfa_hi_addr, hi_addr); 2101 htolem32(&iiq.sense_buffer_hi_addr, hi_addr); 2102 2103 iiq.msg_version_maj = 0x01; 2104 iiq.msg_version_min = 0x02; 2105 2106 iiq.hdr_version_unit = 0x0d; 2107 iiq.hdr_version_dev = 0x00; 2108 2109 if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) { 2110 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n", 2111 DEVNAME(sc)); 2112 return (1); 2113 } 2114 2115 if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) { 2116 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n", 2117 DEVNAME(sc)); 2118 return (1); 2119 } 2120 2121 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d " 2122 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function, 2123 iip.msg_length, iip.whoinit); 2124 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d " 2125 "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags, 2126 iip.max_buses, iip.max_devices, iip.flags); 2127 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2128 letoh32(iip.msg_context)); 2129 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2130 letoh16(iip.ioc_status)); 2131 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2132 letoh32(iip.ioc_loginfo)); 2133 2134 return (0); 2135 } 2136 2137 int 2138 mpi_portfacts(struct mpi_softc *sc) 2139 { 2140 struct mpi_ccb *ccb; 2141 struct mpi_msg_portfacts_request *pfq; 2142 volatile struct mpi_msg_portfacts_reply *pfp; 2143 int rv = 1; 2144 2145 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc)); 2146 2147 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2148 if (ccb == NULL) { 2149 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n", 2150 DEVNAME(sc)); 2151 return (rv); 2152 } 2153 2154 ccb->ccb_done = mpi_empty_done; 2155 pfq = ccb->ccb_cmd; 2156 2157 pfq->function = MPI_FUNCTION_PORT_FACTS; 2158 pfq->chain_offset = 0; 2159 pfq->msg_flags = 0; 2160 pfq->port_number = 0; 2161 2162 if (mpi_poll(sc, ccb, 50000) != 0) { 2163 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc)); 2164 goto err; 2165 } 2166 2167 if (ccb->ccb_rcb == NULL) { 2168 DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n", 2169 DEVNAME(sc)); 2170 goto err; 2171 } 2172 pfp = ccb->ccb_rcb->rcb_reply; 2173 2174 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n", 2175 DEVNAME(sc), pfp->function, pfp->msg_length); 2176 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n", 2177 DEVNAME(sc), pfp->msg_flags, pfp->port_number); 2178 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2179 letoh32(pfp->msg_context)); 2180 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2181 letoh16(pfp->ioc_status)); 2182 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2183 letoh32(pfp->ioc_loginfo)); 2184 DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n", 2185 DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type); 2186 DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n", 2187 DEVNAME(sc), letoh16(pfp->protocol_flags), 2188 letoh16(pfp->port_scsi_id)); 2189 DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d " 2190 "max_posted_cmd_buffers: %d\n", DEVNAME(sc), 2191 letoh16(pfp->max_persistent_ids), 2192 letoh16(pfp->max_posted_cmd_buffers)); 2193 DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc), 2194 letoh16(pfp->max_lan_buckets)); 2195 2196 sc->sc_porttype = pfp->port_type; 2197 if (sc->sc_target == -1) 2198 sc->sc_target = lemtoh16(&pfp->port_scsi_id); 2199 2200 mpi_push_reply(sc, ccb->ccb_rcb); 2201 rv = 0; 2202 err: 2203 scsi_io_put(&sc->sc_iopool, ccb); 2204 2205 return (rv); 2206 } 2207 2208 int 2209 mpi_cfg_coalescing(struct mpi_softc *sc) 2210 { 2211 struct mpi_cfg_hdr hdr; 2212 struct mpi_cfg_ioc_pg1 pg; 2213 u_int32_t flags; 2214 2215 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) { 2216 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n", 2217 DEVNAME(sc)); 2218 return (1); 2219 } 2220 2221 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) { 2222 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n", 2223 DEVNAME(sc)); 2224 return (1); 2225 } 2226 2227 DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc)); 2228 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%08x\n", DEVNAME(sc), 2229 letoh32(pg.flags)); 2230 DNPRINTF(MPI_D_MISC, "%s: coalescing_timeout: %d\n", DEVNAME(sc), 2231 letoh32(pg.coalescing_timeout)); 2232 DNPRINTF(MPI_D_MISC, "%s: coalescing_depth: %d pci_slot_num: %d\n", 2233 DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num); 2234 2235 flags = lemtoh32(&pg.flags); 2236 if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING)) 2237 return (0); 2238 2239 CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING)); 2240 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) { 2241 DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n", 2242 DEVNAME(sc)); 2243 return (1); 2244 } 2245 2246 return (0); 2247 } 2248 2249 int 2250 mpi_eventnotify(struct mpi_softc *sc) 2251 { 2252 struct mpi_ccb *ccb; 2253 struct mpi_msg_event_request *enq; 2254 2255 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2256 if (ccb == NULL) { 2257 DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n", 2258 DEVNAME(sc)); 2259 return (1); 2260 } 2261 2262 sc->sc_evt_ccb = ccb; 2263 SIMPLEQ_INIT(&sc->sc_evt_ack_queue); 2264 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO); 2265 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool, 2266 mpi_eventack, sc); 2267 2268 ccb->ccb_done = mpi_eventnotify_done; 2269 enq = ccb->ccb_cmd; 2270 2271 enq->function = MPI_FUNCTION_EVENT_NOTIFICATION; 2272 enq->chain_offset = 0; 2273 enq->event_switch = MPI_EVENT_SWITCH_ON; 2274 2275 mpi_start(sc, ccb); 2276 return (0); 2277 } 2278 2279 void 2280 mpi_eventnotify_done(struct mpi_ccb *ccb) 2281 { 2282 struct mpi_softc *sc = ccb->ccb_sc; 2283 struct mpi_rcb *rcb = ccb->ccb_rcb; 2284 struct mpi_msg_event_reply *enp = rcb->rcb_reply; 2285 2286 DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc)); 2287 2288 DNPRINTF(MPI_D_EVT, "%s: function: 0x%02x msg_length: %d " 2289 "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length, 2290 letoh16(enp->data_length)); 2291 DNPRINTF(MPI_D_EVT, "%s: ack_required: %d msg_flags 0x%02x\n", 2292 DEVNAME(sc), enp->ack_required, enp->msg_flags); 2293 DNPRINTF(MPI_D_EVT, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2294 letoh32(enp->msg_context)); 2295 DNPRINTF(MPI_D_EVT, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2296 letoh16(enp->ioc_status)); 2297 DNPRINTF(MPI_D_EVT, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2298 letoh32(enp->ioc_loginfo)); 2299 DNPRINTF(MPI_D_EVT, "%s: event: 0x%08x\n", DEVNAME(sc), 2300 letoh32(enp->event)); 2301 DNPRINTF(MPI_D_EVT, "%s: event_context: 0x%08x\n", DEVNAME(sc), 2302 letoh32(enp->event_context)); 2303 2304 switch (lemtoh32(&enp->event)) { 2305 /* ignore these */ 2306 case MPI_EVENT_EVENT_CHANGE: 2307 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2308 break; 2309 2310 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2311 if (sc->sc_scsibus == NULL) 2312 break; 2313 2314 if (mpi_evt_sas(sc, rcb) != 0) { 2315 /* reply is freed later on */ 2316 return; 2317 } 2318 break; 2319 2320 case MPI_EVENT_RESCAN: 2321 if (sc->sc_scsibus != NULL && 2322 sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC) 2323 task_add(systq, &sc->sc_evt_rescan); 2324 break; 2325 2326 default: 2327 DNPRINTF(MPI_D_EVT, "%s: unhandled event 0x%02x\n", 2328 DEVNAME(sc), lemtoh32(&enp->event)); 2329 break; 2330 } 2331 2332 mpi_eventnotify_free(sc, rcb); 2333 } 2334 2335 void 2336 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb) 2337 { 2338 struct mpi_msg_event_reply *enp = rcb->rcb_reply; 2339 2340 if (enp->ack_required) { 2341 mtx_enter(&sc->sc_evt_ack_mtx); 2342 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link); 2343 mtx_leave(&sc->sc_evt_ack_mtx); 2344 scsi_ioh_add(&sc->sc_evt_ack_handler); 2345 } else 2346 mpi_push_reply(sc, rcb); 2347 } 2348 2349 int 2350 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb) 2351 { 2352 struct mpi_evt_sas_change *ch; 2353 u_int8_t *data; 2354 2355 data = rcb->rcb_reply; 2356 data += sizeof(struct mpi_msg_event_reply); 2357 ch = (struct mpi_evt_sas_change *)data; 2358 2359 if (ch->bus != 0) 2360 return (0); 2361 2362 switch (ch->reason) { 2363 case MPI_EVT_SASCH_REASON_ADDED: 2364 case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED: 2365 KERNEL_LOCK(); 2366 if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) { 2367 printf("%s: unable to request attach of %d\n", 2368 DEVNAME(sc), ch->target); 2369 } 2370 KERNEL_UNLOCK(); 2371 break; 2372 2373 case MPI_EVT_SASCH_REASON_NOT_RESPONDING: 2374 KERNEL_LOCK(); 2375 scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE); 2376 KERNEL_UNLOCK(); 2377 2378 mtx_enter(&sc->sc_evt_scan_mtx); 2379 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link); 2380 mtx_leave(&sc->sc_evt_scan_mtx); 2381 scsi_ioh_add(&sc->sc_evt_scan_handler); 2382 2383 /* we'll handle event ack later on */ 2384 return (1); 2385 2386 case MPI_EVT_SASCH_REASON_SMART_DATA: 2387 case MPI_EVT_SASCH_REASON_UNSUPPORTED: 2388 case MPI_EVT_SASCH_REASON_INTERNAL_RESET: 2389 break; 2390 default: 2391 printf("%s: unknown reason for SAS device status change: " 2392 "0x%02x\n", DEVNAME(sc), ch->reason); 2393 break; 2394 } 2395 2396 return (0); 2397 } 2398 2399 void 2400 mpi_evt_sas_detach(void *cookie, void *io) 2401 { 2402 struct mpi_softc *sc = cookie; 2403 struct mpi_ccb *ccb = io; 2404 struct mpi_rcb *rcb, *next; 2405 struct mpi_msg_event_reply *enp; 2406 struct mpi_evt_sas_change *ch; 2407 struct mpi_msg_scsi_task_request *str; 2408 2409 DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc)); 2410 2411 mtx_enter(&sc->sc_evt_scan_mtx); 2412 rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue); 2413 if (rcb != NULL) { 2414 next = SIMPLEQ_NEXT(rcb, rcb_link); 2415 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link); 2416 } 2417 mtx_leave(&sc->sc_evt_scan_mtx); 2418 2419 if (rcb == NULL) { 2420 scsi_io_put(&sc->sc_iopool, ccb); 2421 return; 2422 } 2423 2424 enp = rcb->rcb_reply; 2425 ch = (struct mpi_evt_sas_change *)(enp + 1); 2426 2427 ccb->ccb_done = mpi_evt_sas_detach_done; 2428 str = ccb->ccb_cmd; 2429 2430 str->target_id = ch->target; 2431 str->bus = 0; 2432 str->function = MPI_FUNCTION_SCSI_TASK_MGMT; 2433 2434 str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET; 2435 2436 mpi_eventnotify_free(sc, rcb); 2437 2438 mpi_start(sc, ccb); 2439 2440 if (next != NULL) 2441 scsi_ioh_add(&sc->sc_evt_scan_handler); 2442 } 2443 2444 void 2445 mpi_evt_sas_detach_done(struct mpi_ccb *ccb) 2446 { 2447 struct mpi_softc *sc = ccb->ccb_sc; 2448 struct mpi_msg_scsi_task_reply *r = ccb->ccb_rcb->rcb_reply; 2449 2450 KERNEL_LOCK(); 2451 if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1, 2452 DETACH_FORCE) != 0) { 2453 printf("%s: unable to request detach of %d\n", 2454 DEVNAME(sc), r->target_id); 2455 } 2456 KERNEL_UNLOCK(); 2457 2458 mpi_push_reply(sc, ccb->ccb_rcb); 2459 scsi_io_put(&sc->sc_iopool, ccb); 2460 } 2461 2462 void 2463 mpi_fc_rescan(void *xsc) 2464 { 2465 struct mpi_softc *sc = xsc; 2466 struct mpi_cfg_hdr hdr; 2467 struct mpi_cfg_fc_device_pg0 pg; 2468 struct scsi_link *link; 2469 u_int8_t devmap[256 / NBBY]; 2470 u_int32_t id = 0xffffff; 2471 int i; 2472 2473 memset(devmap, 0, sizeof(devmap)); 2474 2475 do { 2476 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0, 2477 id, 0, &hdr) != 0) { 2478 printf("%s: header get for rescan of 0x%08x failed\n", 2479 DEVNAME(sc), id); 2480 return; 2481 } 2482 2483 memset(&pg, 0, sizeof(pg)); 2484 if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0) 2485 break; 2486 2487 if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) && 2488 pg.current_bus == 0) 2489 setbit(devmap, pg.current_target_id); 2490 2491 id = lemtoh32(&pg.port_id); 2492 } while (id <= 0xff0000); 2493 2494 for (i = 0; i < sc->sc_buswidth; i++) { 2495 link = scsi_get_link(sc->sc_scsibus, i, 0); 2496 2497 if (isset(devmap, i)) { 2498 if (link == NULL) 2499 scsi_probe_target(sc->sc_scsibus, i); 2500 } else { 2501 if (link != NULL) { 2502 scsi_activate(sc->sc_scsibus, i, -1, 2503 DVACT_DEACTIVATE); 2504 scsi_detach_target(sc->sc_scsibus, i, 2505 DETACH_FORCE); 2506 } 2507 } 2508 } 2509 } 2510 2511 void 2512 mpi_eventack(void *cookie, void *io) 2513 { 2514 struct mpi_softc *sc = cookie; 2515 struct mpi_ccb *ccb = io; 2516 struct mpi_rcb *rcb, *next; 2517 struct mpi_msg_event_reply *enp; 2518 struct mpi_msg_eventack_request *eaq; 2519 2520 DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc)); 2521 2522 mtx_enter(&sc->sc_evt_ack_mtx); 2523 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue); 2524 if (rcb != NULL) { 2525 next = SIMPLEQ_NEXT(rcb, rcb_link); 2526 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link); 2527 } 2528 mtx_leave(&sc->sc_evt_ack_mtx); 2529 2530 if (rcb == NULL) { 2531 scsi_io_put(&sc->sc_iopool, ccb); 2532 return; 2533 } 2534 2535 enp = rcb->rcb_reply; 2536 2537 ccb->ccb_done = mpi_eventack_done; 2538 eaq = ccb->ccb_cmd; 2539 2540 eaq->function = MPI_FUNCTION_EVENT_ACK; 2541 2542 eaq->event = enp->event; 2543 eaq->event_context = enp->event_context; 2544 2545 mpi_push_reply(sc, rcb); 2546 mpi_start(sc, ccb); 2547 2548 if (next != NULL) 2549 scsi_ioh_add(&sc->sc_evt_ack_handler); 2550 } 2551 2552 void 2553 mpi_eventack_done(struct mpi_ccb *ccb) 2554 { 2555 struct mpi_softc *sc = ccb->ccb_sc; 2556 2557 DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc)); 2558 2559 mpi_push_reply(sc, ccb->ccb_rcb); 2560 scsi_io_put(&sc->sc_iopool, ccb); 2561 } 2562 2563 int 2564 mpi_portenable(struct mpi_softc *sc) 2565 { 2566 struct mpi_ccb *ccb; 2567 struct mpi_msg_portenable_request *peq; 2568 int rv = 0; 2569 2570 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc)); 2571 2572 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2573 if (ccb == NULL) { 2574 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n", 2575 DEVNAME(sc)); 2576 return (1); 2577 } 2578 2579 ccb->ccb_done = mpi_empty_done; 2580 peq = ccb->ccb_cmd; 2581 2582 peq->function = MPI_FUNCTION_PORT_ENABLE; 2583 peq->port_number = 0; 2584 2585 if (mpi_poll(sc, ccb, 50000) != 0) { 2586 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc)); 2587 return (1); 2588 } 2589 2590 if (ccb->ccb_rcb == NULL) { 2591 DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n", 2592 DEVNAME(sc)); 2593 rv = 1; 2594 } else 2595 mpi_push_reply(sc, ccb->ccb_rcb); 2596 2597 scsi_io_put(&sc->sc_iopool, ccb); 2598 2599 return (rv); 2600 } 2601 2602 int 2603 mpi_fwupload(struct mpi_softc *sc) 2604 { 2605 struct mpi_ccb *ccb; 2606 struct { 2607 struct mpi_msg_fwupload_request req; 2608 struct mpi_sge sge; 2609 } __packed *bundle; 2610 struct mpi_msg_fwupload_reply *upp; 2611 int rv = 0; 2612 2613 if (sc->sc_fw_len == 0) 2614 return (0); 2615 2616 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc)); 2617 2618 sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len); 2619 if (sc->sc_fw == NULL) { 2620 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n", 2621 DEVNAME(sc), sc->sc_fw_len); 2622 return (1); 2623 } 2624 2625 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 2626 if (ccb == NULL) { 2627 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n", 2628 DEVNAME(sc)); 2629 goto err; 2630 } 2631 2632 ccb->ccb_done = mpi_empty_done; 2633 bundle = ccb->ccb_cmd; 2634 2635 bundle->req.function = MPI_FUNCTION_FW_UPLOAD; 2636 2637 bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW; 2638 2639 bundle->req.tce.details_length = 12; 2640 htolem32(&bundle->req.tce.image_size, sc->sc_fw_len); 2641 2642 htolem32(&bundle->sge.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | 2643 MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 2644 MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len); 2645 mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw)); 2646 2647 if (mpi_poll(sc, ccb, 50000) != 0) { 2648 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc)); 2649 goto err; 2650 } 2651 2652 if (ccb->ccb_rcb == NULL) 2653 panic("%s: unable to do fw upload", DEVNAME(sc)); 2654 upp = ccb->ccb_rcb->rcb_reply; 2655 2656 if (lemtoh16(&upp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2657 rv = 1; 2658 2659 mpi_push_reply(sc, ccb->ccb_rcb); 2660 scsi_io_put(&sc->sc_iopool, ccb); 2661 2662 return (rv); 2663 2664 err: 2665 mpi_dmamem_free(sc, sc->sc_fw); 2666 return (1); 2667 } 2668 2669 int 2670 mpi_manufacturing(struct mpi_softc *sc) 2671 { 2672 char board_name[33]; 2673 struct mpi_cfg_hdr hdr; 2674 struct mpi_cfg_manufacturing_pg0 *pg; 2675 size_t pagelen; 2676 int rv = 1; 2677 2678 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_MANUFACTURING, 2679 0, 0, &hdr) != 0) 2680 return (1); 2681 2682 pagelen = hdr.page_length * 4; /* dwords to bytes */ 2683 if (pagelen < sizeof(*pg)) 2684 return (1); 2685 2686 pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 2687 if (pg == NULL) 2688 return (1); 2689 2690 if (mpi_cfg_page(sc, 0, &hdr, 1, pg, pagelen) != 0) 2691 goto out; 2692 2693 scsi_strvis(board_name, pg->board_name, sizeof(pg->board_name)); 2694 2695 printf("%s: %s, firmware %d.%d.%d.%d\n", DEVNAME(sc), board_name, 2696 sc->sc_fw_maj, sc->sc_fw_min, sc->sc_fw_unit, sc->sc_fw_dev); 2697 2698 rv = 0; 2699 2700 out: 2701 free(pg, M_TEMP, pagelen); 2702 return (rv); 2703 } 2704 2705 void 2706 mpi_get_raid(struct mpi_softc *sc) 2707 { 2708 struct mpi_cfg_hdr hdr; 2709 struct mpi_cfg_ioc_pg2 *vol_page; 2710 size_t pagelen; 2711 u_int32_t capabilities; 2712 2713 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc)); 2714 2715 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) { 2716 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header" 2717 "for IOC page 2\n", DEVNAME(sc)); 2718 return; 2719 } 2720 2721 pagelen = hdr.page_length * 4; /* dwords to bytes */ 2722 vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL); 2723 if (vol_page == NULL) { 2724 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate " 2725 "space for ioc config page 2\n", DEVNAME(sc)); 2726 return; 2727 } 2728 2729 if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) { 2730 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC " 2731 "page 2\n", DEVNAME(sc)); 2732 goto out; 2733 } 2734 2735 capabilities = lemtoh32(&vol_page->capabilities); 2736 2737 DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc), 2738 letoh32(vol_page->capabilities)); 2739 DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d " 2740 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 2741 vol_page->active_vols, vol_page->max_vols, 2742 vol_page->active_physdisks, vol_page->max_physdisks); 2743 2744 /* don't walk list if there are no RAID capability */ 2745 if (capabilities == 0xdeadbeef) { 2746 printf("%s: deadbeef in raid configuration\n", DEVNAME(sc)); 2747 goto out; 2748 } 2749 2750 if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID)) 2751 sc->sc_flags |= MPI_F_RAID; 2752 2753 out: 2754 free(vol_page, M_TEMP, pagelen); 2755 } 2756 2757 int 2758 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number, 2759 u_int32_t address, int flags, void *p) 2760 { 2761 struct mpi_ccb *ccb; 2762 struct mpi_msg_config_request *cq; 2763 struct mpi_msg_config_reply *cp; 2764 struct mpi_cfg_hdr *hdr = p; 2765 struct mpi_ecfg_hdr *ehdr = p; 2766 int etype = 0; 2767 int rv = 0; 2768 2769 DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x " 2770 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number, 2771 address, flags, MPI_PG_FMT); 2772 2773 ccb = scsi_io_get(&sc->sc_iopool, 2774 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0); 2775 if (ccb == NULL) { 2776 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n", 2777 DEVNAME(sc)); 2778 return (1); 2779 } 2780 2781 if (ISSET(flags, MPI_PG_EXTENDED)) { 2782 etype = type; 2783 type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED; 2784 } 2785 2786 cq = ccb->ccb_cmd; 2787 2788 cq->function = MPI_FUNCTION_CONFIG; 2789 2790 cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER; 2791 2792 cq->config_header.page_number = number; 2793 cq->config_header.page_type = type; 2794 cq->ext_page_type = etype; 2795 htolem32(&cq->page_address, address); 2796 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | 2797 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 2798 2799 ccb->ccb_done = mpi_empty_done; 2800 if (ISSET(flags, MPI_PG_POLL)) { 2801 if (mpi_poll(sc, ccb, 50000) != 0) { 2802 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2803 DEVNAME(sc)); 2804 return (1); 2805 } 2806 } else 2807 mpi_wait(sc, ccb); 2808 2809 if (ccb->ccb_rcb == NULL) 2810 panic("%s: unable to fetch config header", DEVNAME(sc)); 2811 cp = ccb->ccb_rcb->rcb_reply; 2812 2813 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2814 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2815 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2816 "msg_flags: 0x%02x\n", DEVNAME(sc), 2817 letoh16(cp->ext_page_length), cp->ext_page_type, 2818 cp->msg_flags); 2819 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2820 letoh32(cp->msg_context)); 2821 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2822 letoh16(cp->ioc_status)); 2823 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2824 letoh32(cp->ioc_loginfo)); 2825 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2826 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2827 cp->config_header.page_version, 2828 cp->config_header.page_length, 2829 cp->config_header.page_number, 2830 cp->config_header.page_type); 2831 2832 if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2833 rv = 1; 2834 else if (ISSET(flags, MPI_PG_EXTENDED)) { 2835 memset(ehdr, 0, sizeof(*ehdr)); 2836 ehdr->page_version = cp->config_header.page_version; 2837 ehdr->page_number = cp->config_header.page_number; 2838 ehdr->page_type = cp->config_header.page_type; 2839 ehdr->ext_page_length = cp->ext_page_length; 2840 ehdr->ext_page_type = cp->ext_page_type; 2841 } else 2842 *hdr = cp->config_header; 2843 2844 mpi_push_reply(sc, ccb->ccb_rcb); 2845 scsi_io_put(&sc->sc_iopool, ccb); 2846 2847 return (rv); 2848 } 2849 2850 int 2851 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags, 2852 void *p, int read, void *page, size_t len) 2853 { 2854 struct mpi_ccb *ccb; 2855 struct mpi_msg_config_request *cq; 2856 struct mpi_msg_config_reply *cp; 2857 struct mpi_cfg_hdr *hdr = p; 2858 struct mpi_ecfg_hdr *ehdr = p; 2859 char *kva; 2860 int page_length; 2861 int rv = 0; 2862 2863 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n", 2864 DEVNAME(sc), address, read, hdr->page_type); 2865 2866 page_length = ISSET(flags, MPI_PG_EXTENDED) ? 2867 lemtoh16(&ehdr->ext_page_length) : hdr->page_length; 2868 2869 if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) || 2870 len < page_length * 4) 2871 return (1); 2872 2873 ccb = scsi_io_get(&sc->sc_iopool, 2874 ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0); 2875 if (ccb == NULL) { 2876 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc)); 2877 return (1); 2878 } 2879 2880 cq = ccb->ccb_cmd; 2881 2882 cq->function = MPI_FUNCTION_CONFIG; 2883 2884 cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT : 2885 MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT); 2886 2887 if (ISSET(flags, MPI_PG_EXTENDED)) { 2888 cq->config_header.page_version = ehdr->page_version; 2889 cq->config_header.page_number = ehdr->page_number; 2890 cq->config_header.page_type = ehdr->page_type; 2891 cq->ext_page_len = ehdr->ext_page_length; 2892 cq->ext_page_type = ehdr->ext_page_type; 2893 } else 2894 cq->config_header = *hdr; 2895 cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK; 2896 htolem32(&cq->page_address, address); 2897 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | 2898 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 2899 (page_length * 4) | 2900 (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT)); 2901 2902 /* bounce the page via the request space to avoid more bus_dma games */ 2903 mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva + 2904 sizeof(struct mpi_msg_config_request)); 2905 2906 kva = ccb->ccb_cmd; 2907 kva += sizeof(struct mpi_msg_config_request); 2908 if (!read) 2909 memcpy(kva, page, len); 2910 2911 ccb->ccb_done = mpi_empty_done; 2912 if (ISSET(flags, MPI_PG_POLL)) { 2913 if (mpi_poll(sc, ccb, 50000) != 0) { 2914 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", 2915 DEVNAME(sc)); 2916 return (1); 2917 } 2918 } else 2919 mpi_wait(sc, ccb); 2920 2921 if (ccb->ccb_rcb == NULL) { 2922 scsi_io_put(&sc->sc_iopool, ccb); 2923 return (1); 2924 } 2925 cp = ccb->ccb_rcb->rcb_reply; 2926 2927 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2928 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2929 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2930 "msg_flags: 0x%02x\n", DEVNAME(sc), 2931 letoh16(cp->ext_page_length), cp->ext_page_type, 2932 cp->msg_flags); 2933 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2934 letoh32(cp->msg_context)); 2935 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2936 letoh16(cp->ioc_status)); 2937 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2938 letoh32(cp->ioc_loginfo)); 2939 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2940 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2941 cp->config_header.page_version, 2942 cp->config_header.page_length, 2943 cp->config_header.page_number, 2944 cp->config_header.page_type); 2945 2946 if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2947 rv = 1; 2948 else if (read) 2949 memcpy(page, kva, len); 2950 2951 mpi_push_reply(sc, ccb->ccb_rcb); 2952 scsi_io_put(&sc->sc_iopool, ccb); 2953 2954 return (rv); 2955 } 2956 2957 int 2958 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag) 2959 { 2960 struct mpi_softc *sc = (struct mpi_softc *)link->adapter_softc; 2961 2962 DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc)); 2963 2964 switch (cmd) { 2965 case DIOCGCACHE: 2966 case DIOCSCACHE: 2967 if (ISSET(link->flags, SDEV_VIRTUAL)) { 2968 return (mpi_ioctl_cache(link, cmd, 2969 (struct dk_cache *)addr)); 2970 } 2971 break; 2972 2973 default: 2974 if (sc->sc_ioctl) 2975 return (sc->sc_ioctl(link->adapter_softc, cmd, addr)); 2976 2977 break; 2978 } 2979 2980 return (ENOTTY); 2981 } 2982 2983 int 2984 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc) 2985 { 2986 struct mpi_softc *sc = (struct mpi_softc *)link->adapter_softc; 2987 struct mpi_ccb *ccb; 2988 int len, rv; 2989 struct mpi_cfg_hdr hdr; 2990 struct mpi_cfg_raid_vol_pg0 *rpg0; 2991 int enabled; 2992 struct mpi_msg_raid_action_request *req; 2993 struct mpi_msg_raid_action_reply *rep; 2994 struct mpi_raid_settings settings; 2995 2996 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 2997 link->target, MPI_PG_POLL, &hdr); 2998 if (rv != 0) 2999 return (EIO); 3000 3001 len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks * 3002 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk); 3003 rpg0 = malloc(len, M_TEMP, M_NOWAIT); 3004 if (rpg0 == NULL) 3005 return (ENOMEM); 3006 3007 if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, 3008 rpg0, len) != 0) { 3009 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n", 3010 DEVNAME(sc)); 3011 rv = EIO; 3012 goto done; 3013 } 3014 3015 enabled = ISSET(lemtoh16(&rpg0->settings.volume_settings), 3016 MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0; 3017 3018 if (cmd == DIOCGCACHE) { 3019 dc->wrcache = enabled; 3020 dc->rdcache = 0; 3021 goto done; 3022 } /* else DIOCSCACHE */ 3023 3024 if (dc->rdcache) { 3025 rv = EOPNOTSUPP; 3026 goto done; 3027 } 3028 3029 if (((dc->wrcache) ? 1 : 0) == enabled) 3030 goto done; 3031 3032 settings = rpg0->settings; 3033 if (dc->wrcache) { 3034 SET(settings.volume_settings, 3035 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)); 3036 } else { 3037 CLR(settings.volume_settings, 3038 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)); 3039 } 3040 3041 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 3042 if (ccb == NULL) { 3043 rv = ENOMEM; 3044 goto done; 3045 } 3046 3047 req = ccb->ccb_cmd; 3048 req->function = MPI_FUNCTION_RAID_ACTION; 3049 req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS; 3050 req->vol_id = rpg0->volume_id; 3051 req->vol_bus = rpg0->volume_bus; 3052 3053 memcpy(&req->data_word, &settings, sizeof(req->data_word)); 3054 ccb->ccb_done = mpi_empty_done; 3055 if (mpi_poll(sc, ccb, 50000) != 0) { 3056 rv = EIO; 3057 goto done; 3058 } 3059 3060 rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb; 3061 if (rep == NULL) 3062 panic("%s: raid volume settings change failed", DEVNAME(sc)); 3063 3064 switch (lemtoh16(&rep->action_status)) { 3065 case MPI_RAID_ACTION_STATUS_OK: 3066 rv = 0; 3067 break; 3068 default: 3069 rv = EIO; 3070 break; 3071 } 3072 3073 mpi_push_reply(sc, ccb->ccb_rcb); 3074 scsi_io_put(&sc->sc_iopool, ccb); 3075 3076 done: 3077 free(rpg0, M_TEMP, len); 3078 return (rv); 3079 } 3080 3081 #if NBIO > 0 3082 int 3083 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id) 3084 { 3085 int len, rv = EINVAL; 3086 u_int32_t address; 3087 struct mpi_cfg_hdr hdr; 3088 struct mpi_cfg_raid_vol_pg0 *rpg0; 3089 3090 /* get IOC page 2 */ 3091 if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 3092 sc->sc_cfg_hdr.page_length * 4) != 0) { 3093 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to " 3094 "fetch IOC page 2\n", DEVNAME(sc)); 3095 goto done; 3096 } 3097 3098 /* XXX return something else than EINVAL to indicate within hs range */ 3099 if (id > sc->sc_vol_page->active_vols) { 3100 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol " 3101 "id: %d\n", DEVNAME(sc), id); 3102 goto done; 3103 } 3104 3105 /* replace current buffer with new one */ 3106 len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks * 3107 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk); 3108 rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL); 3109 if (rpg0 == NULL) { 3110 printf("%s: can't get memory for RAID page 0, " 3111 "bio disabled\n", DEVNAME(sc)); 3112 goto done; 3113 } 3114 if (sc->sc_rpg0) 3115 free(sc->sc_rpg0, M_DEVBUF, 0); 3116 sc->sc_rpg0 = rpg0; 3117 3118 /* get raid vol page 0 */ 3119 address = sc->sc_vol_list[id].vol_id | 3120 (sc->sc_vol_list[id].vol_bus << 8); 3121 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3122 address, 0, &hdr) != 0) 3123 goto done; 3124 if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) { 3125 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n", 3126 DEVNAME(sc)); 3127 goto done; 3128 } 3129 3130 rv = 0; 3131 done: 3132 return (rv); 3133 } 3134 3135 int 3136 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr) 3137 { 3138 struct mpi_softc *sc = (struct mpi_softc *)dev; 3139 int error = 0; 3140 3141 DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc)); 3142 3143 /* make sure we have bio enabled */ 3144 if (sc->sc_ioctl != mpi_ioctl) 3145 return (EINVAL); 3146 3147 rw_enter_write(&sc->sc_lock); 3148 3149 switch (cmd) { 3150 case BIOCINQ: 3151 DNPRINTF(MPI_D_IOCTL, "inq\n"); 3152 error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr); 3153 break; 3154 3155 case BIOCVOL: 3156 DNPRINTF(MPI_D_IOCTL, "vol\n"); 3157 error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr); 3158 break; 3159 3160 case BIOCDISK: 3161 DNPRINTF(MPI_D_IOCTL, "disk\n"); 3162 error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr); 3163 break; 3164 3165 case BIOCALARM: 3166 DNPRINTF(MPI_D_IOCTL, "alarm\n"); 3167 break; 3168 3169 case BIOCBLINK: 3170 DNPRINTF(MPI_D_IOCTL, "blink\n"); 3171 break; 3172 3173 case BIOCSETSTATE: 3174 DNPRINTF(MPI_D_IOCTL, "setstate\n"); 3175 error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr); 3176 break; 3177 3178 default: 3179 DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n"); 3180 error = ENOTTY; 3181 } 3182 3183 rw_exit_write(&sc->sc_lock); 3184 3185 return (error); 3186 } 3187 3188 int 3189 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi) 3190 { 3191 if (!(sc->sc_flags & MPI_F_RAID)) { 3192 bi->bi_novol = 0; 3193 bi->bi_nodisk = 0; 3194 } 3195 3196 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page, 3197 sc->sc_cfg_hdr.page_length * 4) != 0) { 3198 DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC " 3199 "page 2\n", DEVNAME(sc)); 3200 return (EINVAL); 3201 } 3202 3203 DNPRINTF(MPI_D_IOCTL, "%s: active_vols: %d max_vols: %d " 3204 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 3205 sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols, 3206 sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks); 3207 3208 bi->bi_novol = sc->sc_vol_page->active_vols; 3209 bi->bi_nodisk = sc->sc_vol_page->active_physdisks; 3210 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 3211 3212 return (0); 3213 } 3214 3215 int 3216 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv) 3217 { 3218 int i, vol, id, rv = EINVAL; 3219 struct device *dev; 3220 struct scsi_link *link; 3221 struct mpi_cfg_raid_vol_pg0 *rpg0; 3222 char *vendp; 3223 3224 id = bv->bv_volid; 3225 if (mpi_bio_get_pg0_raid(sc, id)) 3226 goto done; 3227 3228 if (id > sc->sc_vol_page->active_vols) 3229 return (EINVAL); /* XXX deal with hot spares */ 3230 3231 rpg0 = sc->sc_rpg0; 3232 if (rpg0 == NULL) 3233 goto done; 3234 3235 /* determine status */ 3236 switch (rpg0->volume_state) { 3237 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 3238 bv->bv_status = BIOC_SVONLINE; 3239 break; 3240 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 3241 bv->bv_status = BIOC_SVDEGRADED; 3242 break; 3243 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 3244 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 3245 bv->bv_status = BIOC_SVOFFLINE; 3246 break; 3247 default: 3248 bv->bv_status = BIOC_SVINVALID; 3249 } 3250 3251 /* override status if scrubbing or something */ 3252 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) 3253 bv->bv_status = BIOC_SVREBUILD; 3254 3255 bv->bv_size = (uint64_t)lemtoh32(&rpg0->max_lba) * 512; 3256 3257 switch (sc->sc_vol_list[id].vol_type) { 3258 case MPI_CFG_RAID_TYPE_RAID_IS: 3259 bv->bv_level = 0; 3260 break; 3261 case MPI_CFG_RAID_TYPE_RAID_IME: 3262 case MPI_CFG_RAID_TYPE_RAID_IM: 3263 bv->bv_level = 1; 3264 break; 3265 case MPI_CFG_RAID_TYPE_RAID_5: 3266 bv->bv_level = 5; 3267 break; 3268 case MPI_CFG_RAID_TYPE_RAID_6: 3269 bv->bv_level = 6; 3270 break; 3271 case MPI_CFG_RAID_TYPE_RAID_10: 3272 bv->bv_level = 10; 3273 break; 3274 case MPI_CFG_RAID_TYPE_RAID_50: 3275 bv->bv_level = 50; 3276 break; 3277 default: 3278 bv->bv_level = -1; 3279 } 3280 3281 bv->bv_nodisk = rpg0->num_phys_disks; 3282 3283 for (i = 0, vol = -1; i < sc->sc_buswidth; i++) { 3284 link = scsi_get_link(sc->sc_scsibus, i, 0); 3285 if (link == NULL) 3286 continue; 3287 3288 /* skip if not a virtual disk */ 3289 if (!(link->flags & SDEV_VIRTUAL)) 3290 continue; 3291 3292 vol++; 3293 /* are we it? */ 3294 if (vol == bv->bv_volid) { 3295 dev = link->device_softc; 3296 vendp = link->inqdata.vendor; 3297 memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor); 3298 bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0'; 3299 strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev); 3300 break; 3301 } 3302 } 3303 rv = 0; 3304 done: 3305 return (rv); 3306 } 3307 3308 int 3309 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd) 3310 { 3311 int pdid, id, rv = EINVAL; 3312 u_int32_t address; 3313 struct mpi_cfg_hdr hdr; 3314 struct mpi_cfg_raid_vol_pg0 *rpg0; 3315 struct mpi_cfg_raid_vol_pg0_physdisk *physdisk; 3316 struct mpi_cfg_raid_physdisk_pg0 pdpg0; 3317 3318 id = bd->bd_volid; 3319 if (mpi_bio_get_pg0_raid(sc, id)) 3320 goto done; 3321 3322 if (id > sc->sc_vol_page->active_vols) 3323 return (EINVAL); /* XXX deal with hot spares */ 3324 3325 rpg0 = sc->sc_rpg0; 3326 if (rpg0 == NULL) 3327 goto done; 3328 3329 pdid = bd->bd_diskid; 3330 if (pdid > rpg0->num_phys_disks) 3331 goto done; 3332 physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1); 3333 physdisk += pdid; 3334 3335 /* get raid phys disk page 0 */ 3336 address = physdisk->phys_disk_num; 3337 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address, 3338 &hdr) != 0) 3339 goto done; 3340 if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) { 3341 bd->bd_status = BIOC_SDFAILED; 3342 return (0); 3343 } 3344 bd->bd_channel = pdpg0.phys_disk_bus; 3345 bd->bd_target = pdpg0.phys_disk_id; 3346 bd->bd_lun = 0; 3347 bd->bd_size = (uint64_t)lemtoh32(&pdpg0.max_lba) * 512; 3348 strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor)); 3349 3350 switch (pdpg0.phys_disk_state) { 3351 case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE: 3352 bd->bd_status = BIOC_SDONLINE; 3353 break; 3354 case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING: 3355 case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED: 3356 bd->bd_status = BIOC_SDFAILED; 3357 break; 3358 case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL: 3359 case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER: 3360 case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE: 3361 bd->bd_status = BIOC_SDOFFLINE; 3362 break; 3363 case MPI_CFG_RAID_PHYDISK_0_STATE_INIT: 3364 bd->bd_status = BIOC_SDSCRUB; 3365 break; 3366 case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT: 3367 default: 3368 bd->bd_status = BIOC_SDINVALID; 3369 break; 3370 } 3371 3372 /* XXX figure this out */ 3373 /* bd_serial[32]; */ 3374 /* bd_procdev[16]; */ 3375 3376 rv = 0; 3377 done: 3378 return (rv); 3379 } 3380 3381 int 3382 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs) 3383 { 3384 return (ENOTTY); 3385 } 3386 3387 #ifndef SMALL_KERNEL 3388 int 3389 mpi_create_sensors(struct mpi_softc *sc) 3390 { 3391 struct device *dev; 3392 struct scsi_link *link; 3393 int i, vol, nsensors; 3394 3395 /* count volumes */ 3396 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3397 link = scsi_get_link(sc->sc_scsibus, i, 0); 3398 if (link == NULL) 3399 continue; 3400 /* skip if not a virtual disk */ 3401 if (!(link->flags & SDEV_VIRTUAL)) 3402 continue; 3403 3404 vol++; 3405 } 3406 if (vol == 0) 3407 return (0); 3408 3409 sc->sc_sensors = mallocarray(vol, sizeof(struct ksensor), 3410 M_DEVBUF, M_NOWAIT | M_ZERO); 3411 if (sc->sc_sensors == NULL) 3412 return (1); 3413 nsensors = vol; 3414 3415 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), 3416 sizeof(sc->sc_sensordev.xname)); 3417 3418 for (i = 0, vol= 0; i < sc->sc_buswidth; i++) { 3419 link = scsi_get_link(sc->sc_scsibus, i, 0); 3420 if (link == NULL) 3421 continue; 3422 /* skip if not a virtual disk */ 3423 if (!(link->flags & SDEV_VIRTUAL)) 3424 continue; 3425 3426 dev = link->device_softc; 3427 strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname, 3428 sizeof(sc->sc_sensors[vol].desc)); 3429 sc->sc_sensors[vol].type = SENSOR_DRIVE; 3430 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3431 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]); 3432 3433 vol++; 3434 } 3435 3436 if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL) 3437 goto bad; 3438 3439 sensordev_install(&sc->sc_sensordev); 3440 3441 return (0); 3442 3443 bad: 3444 free(sc->sc_sensors, M_DEVBUF, nsensors * sizeof(struct ksensor)); 3445 return (1); 3446 } 3447 3448 void 3449 mpi_refresh_sensors(void *arg) 3450 { 3451 int i, vol; 3452 struct scsi_link *link; 3453 struct mpi_softc *sc = arg; 3454 struct mpi_cfg_raid_vol_pg0 *rpg0; 3455 3456 rw_enter_write(&sc->sc_lock); 3457 3458 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) { 3459 link = scsi_get_link(sc->sc_scsibus, i, 0); 3460 if (link == NULL) 3461 continue; 3462 /* skip if not a virtual disk */ 3463 if (!(link->flags & SDEV_VIRTUAL)) 3464 continue; 3465 3466 if (mpi_bio_get_pg0_raid(sc, vol)) 3467 continue; 3468 3469 rpg0 = sc->sc_rpg0; 3470 if (rpg0 == NULL) 3471 goto done; 3472 3473 /* determine status */ 3474 switch (rpg0->volume_state) { 3475 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL: 3476 sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE; 3477 sc->sc_sensors[vol].status = SENSOR_S_OK; 3478 break; 3479 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED: 3480 sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL; 3481 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3482 break; 3483 case MPI_CFG_RAID_VOL_0_STATE_FAILED: 3484 case MPI_CFG_RAID_VOL_0_STATE_MISSING: 3485 sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL; 3486 sc->sc_sensors[vol].status = SENSOR_S_CRIT; 3487 break; 3488 default: 3489 sc->sc_sensors[vol].value = 0; /* unknown */ 3490 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN; 3491 } 3492 3493 /* override status if scrubbing or something */ 3494 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) { 3495 sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD; 3496 sc->sc_sensors[vol].status = SENSOR_S_WARN; 3497 } 3498 3499 vol++; 3500 } 3501 done: 3502 rw_exit_write(&sc->sc_lock); 3503 } 3504 #endif /* SMALL_KERNEL */ 3505 #endif /* NBIO > 0 */ 3506