1 /* $NetBSD: mfi.c,v 1.34 2010/03/14 18:06:28 pgoyette Exp $ */ 2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */ 3 /* 4 * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.34 2010/03/14 18:06:28 pgoyette Exp $"); 21 22 #include "bio.h" 23 24 #include <sys/param.h> 25 #include <sys/systm.h> 26 #include <sys/buf.h> 27 #include <sys/ioctl.h> 28 #include <sys/device.h> 29 #include <sys/kernel.h> 30 #include <sys/malloc.h> 31 #include <sys/proc.h> 32 33 #include <uvm/uvm_param.h> 34 35 #include <sys/bus.h> 36 37 #include <dev/scsipi/scsipi_all.h> 38 #include <dev/scsipi/scsi_all.h> 39 #include <dev/scsipi/scsi_spc.h> 40 #include <dev/scsipi/scsipi_disk.h> 41 #include <dev/scsipi/scsi_disk.h> 42 #include <dev/scsipi/scsiconf.h> 43 44 #include <dev/ic/mfireg.h> 45 #include <dev/ic/mfivar.h> 46 47 #if NBIO > 0 48 #include <dev/biovar.h> 49 #endif /* NBIO > 0 */ 50 51 #ifdef MFI_DEBUG 52 uint32_t mfi_debug = 0 53 /* | MFI_D_CMD */ 54 /* | MFI_D_INTR */ 55 /* | MFI_D_MISC */ 56 /* | MFI_D_DMA */ 57 | MFI_D_IOCTL 58 /* | MFI_D_RW */ 59 /* | MFI_D_MEM */ 60 /* | MFI_D_CCB */ 61 ; 62 #endif 63 64 static void mfi_scsipi_request(struct scsipi_channel *, 65 scsipi_adapter_req_t, void *); 66 static void mfiminphys(struct buf *bp); 67 68 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *); 69 static void mfi_put_ccb(struct mfi_ccb *); 70 static int mfi_init_ccb(struct mfi_softc *); 71 72 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t); 73 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **); 74 75 static int mfi_transition_firmware(struct mfi_softc *); 76 static int mfi_initialize_firmware(struct mfi_softc *); 77 static int mfi_get_info(struct mfi_softc *); 78 static uint32_t mfi_read(struct mfi_softc *, bus_size_t); 79 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t); 80 static int mfi_poll(struct mfi_ccb *); 81 static int mfi_create_sgl(struct mfi_ccb *, int); 82 83 /* commands */ 84 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *); 85 static int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *, 86 uint32_t, uint32_t); 87 static void mfi_scsi_xs_done(struct mfi_ccb *); 88 static int mfi_mgmt_internal(struct mfi_softc *, 89 uint32_t, uint32_t, uint32_t, void *, uint8_t *); 90 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *, 91 uint32_t, uint32_t, uint32_t, void *, uint8_t *); 92 static void mfi_mgmt_done(struct mfi_ccb *); 93 94 #if NBIO > 0 95 static int mfi_ioctl(device_t, u_long, void *); 96 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *); 97 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *); 98 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *); 99 static int mfi_ioctl_alarm(struct mfi_softc *, 100 struct bioc_alarm *); 101 static int mfi_ioctl_blink(struct mfi_softc *sc, 102 struct bioc_blink *); 103 static int mfi_ioctl_setstate(struct mfi_softc *, 104 struct bioc_setstate *); 105 static int mfi_bio_hs(struct mfi_softc *, int, int, void *); 106 static int mfi_create_sensors(struct mfi_softc *); 107 static int mfi_destroy_sensors(struct mfi_softc *); 108 static void mfi_sensor_refresh(struct sysmon_envsys *, 109 envsys_data_t *); 110 #endif /* NBIO > 0 */ 111 112 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc); 113 static void mfi_xscale_intr_ena(struct mfi_softc *sc); 114 static void mfi_xscale_intr_dis(struct mfi_softc *sc); 115 static int mfi_xscale_intr(struct mfi_softc *sc); 116 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb); 117 118 static const struct mfi_iop_ops mfi_iop_xscale = { 119 mfi_xscale_fw_state, 120 mfi_xscale_intr_dis, 121 mfi_xscale_intr_ena, 122 mfi_xscale_intr, 123 mfi_xscale_post 124 }; 125 126 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc); 127 static void mfi_ppc_intr_ena(struct mfi_softc *sc); 128 static void mfi_ppc_intr_dis(struct mfi_softc *sc); 129 static int mfi_ppc_intr(struct mfi_softc *sc); 130 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb); 131 132 static const struct mfi_iop_ops mfi_iop_ppc = { 133 mfi_ppc_fw_state, 134 mfi_ppc_intr_dis, 135 mfi_ppc_intr_ena, 136 mfi_ppc_intr, 137 mfi_ppc_post 138 }; 139 140 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc); 141 void mfi_gen2_intr_ena(struct mfi_softc *sc); 142 void mfi_gen2_intr_dis(struct mfi_softc *sc); 143 int mfi_gen2_intr(struct mfi_softc *sc); 144 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb); 145 146 static const struct mfi_iop_ops mfi_iop_gen2 = { 147 mfi_gen2_fw_state, 148 mfi_gen2_intr_dis, 149 mfi_gen2_intr_ena, 150 mfi_gen2_intr, 151 mfi_gen2_post 152 }; 153 154 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s)) 155 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s)) 156 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s)) 157 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s)) 158 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c))) 159 160 static struct mfi_ccb * 161 mfi_get_ccb(struct mfi_softc *sc) 162 { 163 struct mfi_ccb *ccb; 164 int s; 165 166 s = splbio(); 167 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq); 168 if (ccb) { 169 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link); 170 ccb->ccb_state = MFI_CCB_READY; 171 } 172 splx(s); 173 174 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb); 175 176 return ccb; 177 } 178 179 static void 180 mfi_put_ccb(struct mfi_ccb *ccb) 181 { 182 struct mfi_softc *sc = ccb->ccb_sc; 183 int s; 184 185 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb); 186 187 s = splbio(); 188 ccb->ccb_state = MFI_CCB_FREE; 189 ccb->ccb_xs = NULL; 190 ccb->ccb_flags = 0; 191 ccb->ccb_done = NULL; 192 ccb->ccb_direction = 0; 193 ccb->ccb_frame_size = 0; 194 ccb->ccb_extra_frames = 0; 195 ccb->ccb_sgl = NULL; 196 ccb->ccb_data = NULL; 197 ccb->ccb_len = 0; 198 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link); 199 splx(s); 200 } 201 202 static int 203 mfi_destroy_ccb(struct mfi_softc *sc) 204 { 205 struct mfi_ccb *ccb; 206 uint32_t i; 207 208 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc)); 209 210 211 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) { 212 /* create a dma map for transfer */ 213 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 214 } 215 216 if (i < sc->sc_max_cmds) 217 return EBUSY; 218 219 free(sc->sc_ccb, M_DEVBUF); 220 221 return 0; 222 } 223 224 static int 225 mfi_init_ccb(struct mfi_softc *sc) 226 { 227 struct mfi_ccb *ccb; 228 uint32_t i; 229 int error; 230 231 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc)); 232 233 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds, 234 M_DEVBUF, M_WAITOK|M_ZERO); 235 236 for (i = 0; i < sc->sc_max_cmds; i++) { 237 ccb = &sc->sc_ccb[i]; 238 239 ccb->ccb_sc = sc; 240 241 /* select i'th frame */ 242 ccb->ccb_frame = (union mfi_frame *) 243 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i); 244 ccb->ccb_pframe = 245 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i; 246 ccb->ccb_frame->mfr_header.mfh_context = i; 247 248 /* select i'th sense */ 249 ccb->ccb_sense = (struct mfi_sense *) 250 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i); 251 ccb->ccb_psense = 252 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i); 253 254 /* create a dma map for transfer */ 255 error = bus_dmamap_create(sc->sc_dmat, 256 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0, 257 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap); 258 if (error) { 259 printf("%s: cannot create ccb dmamap (%d)\n", 260 DEVNAME(sc), error); 261 goto destroy; 262 } 263 264 DNPRINTF(MFI_D_CCB, 265 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n", 266 ccb->ccb_frame->mfr_header.mfh_context, ccb, 267 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe, 268 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense, 269 (u_long)ccb->ccb_dmamap); 270 271 /* add ccb to queue */ 272 mfi_put_ccb(ccb); 273 } 274 275 return 0; 276 destroy: 277 /* free dma maps and ccb memory */ 278 while (i) { 279 i--; 280 ccb = &sc->sc_ccb[i]; 281 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 282 } 283 284 free(sc->sc_ccb, M_DEVBUF); 285 286 return 1; 287 } 288 289 static uint32_t 290 mfi_read(struct mfi_softc *sc, bus_size_t r) 291 { 292 uint32_t rv; 293 294 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 295 BUS_SPACE_BARRIER_READ); 296 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 297 298 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv); 299 return rv; 300 } 301 302 static void 303 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v) 304 { 305 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v); 306 307 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 308 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 309 BUS_SPACE_BARRIER_WRITE); 310 } 311 312 static struct mfi_mem * 313 mfi_allocmem(struct mfi_softc *sc, size_t size) 314 { 315 struct mfi_mem *mm; 316 int nsegs; 317 318 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc), 319 (long)size); 320 321 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO); 322 if (mm == NULL) 323 return NULL; 324 325 mm->am_size = size; 326 327 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 328 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0) 329 goto amfree; 330 331 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1, 332 &nsegs, BUS_DMA_NOWAIT) != 0) 333 goto destroy; 334 335 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva, 336 BUS_DMA_NOWAIT) != 0) 337 goto free; 338 339 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL, 340 BUS_DMA_NOWAIT) != 0) 341 goto unmap; 342 343 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n", 344 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map); 345 346 memset(mm->am_kva, 0, size); 347 return mm; 348 349 unmap: 350 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size); 351 free: 352 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1); 353 destroy: 354 bus_dmamap_destroy(sc->sc_dmat, mm->am_map); 355 amfree: 356 free(mm, M_DEVBUF); 357 358 return NULL; 359 } 360 361 static void 362 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp) 363 { 364 struct mfi_mem *mm = *mmp; 365 366 if (mm == NULL) 367 return; 368 369 *mmp = NULL; 370 371 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm); 372 373 bus_dmamap_unload(sc->sc_dmat, mm->am_map); 374 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size); 375 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1); 376 bus_dmamap_destroy(sc->sc_dmat, mm->am_map); 377 free(mm, M_DEVBUF); 378 } 379 380 static int 381 mfi_transition_firmware(struct mfi_softc *sc) 382 { 383 uint32_t fw_state, cur_state; 384 int max_wait, i; 385 386 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK; 387 388 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc), 389 fw_state); 390 391 while (fw_state != MFI_STATE_READY) { 392 DNPRINTF(MFI_D_MISC, 393 "%s: waiting for firmware to become ready\n", 394 DEVNAME(sc)); 395 cur_state = fw_state; 396 switch (fw_state) { 397 case MFI_STATE_FAULT: 398 printf("%s: firmware fault\n", DEVNAME(sc)); 399 return 1; 400 case MFI_STATE_WAIT_HANDSHAKE: 401 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE); 402 max_wait = 2; 403 break; 404 case MFI_STATE_OPERATIONAL: 405 mfi_write(sc, MFI_IDB, MFI_INIT_READY); 406 max_wait = 10; 407 break; 408 case MFI_STATE_UNDEFINED: 409 case MFI_STATE_BB_INIT: 410 max_wait = 2; 411 break; 412 case MFI_STATE_FW_INIT: 413 case MFI_STATE_DEVICE_SCAN: 414 case MFI_STATE_FLUSH_CACHE: 415 max_wait = 20; 416 break; 417 default: 418 printf("%s: unknown firmware state %d\n", 419 DEVNAME(sc), fw_state); 420 return 1; 421 } 422 for (i = 0; i < (max_wait * 10); i++) { 423 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK; 424 if (fw_state == cur_state) 425 DELAY(100000); 426 else 427 break; 428 } 429 if (fw_state == cur_state) { 430 printf("%s: firmware stuck in state %#x\n", 431 DEVNAME(sc), fw_state); 432 return 1; 433 } 434 } 435 436 return 0; 437 } 438 439 static int 440 mfi_initialize_firmware(struct mfi_softc *sc) 441 { 442 struct mfi_ccb *ccb; 443 struct mfi_init_frame *init; 444 struct mfi_init_qinfo *qinfo; 445 446 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc)); 447 448 if ((ccb = mfi_get_ccb(sc)) == NULL) 449 return 1; 450 451 init = &ccb->ccb_frame->mfr_init; 452 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE); 453 454 memset(qinfo, 0, sizeof *qinfo); 455 qinfo->miq_rq_entries = sc->sc_max_cmds + 1; 456 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) + 457 offsetof(struct mfi_prod_cons, mpc_reply_q)); 458 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) + 459 offsetof(struct mfi_prod_cons, mpc_producer)); 460 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) + 461 offsetof(struct mfi_prod_cons, mpc_consumer)); 462 463 init->mif_header.mfh_cmd = MFI_CMD_INIT; 464 init->mif_header.mfh_data_len = sizeof *qinfo; 465 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE); 466 467 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n", 468 DEVNAME(sc), 469 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo, 470 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo); 471 472 if (mfi_poll(ccb)) { 473 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc)); 474 return 1; 475 } 476 477 mfi_put_ccb(ccb); 478 479 return 0; 480 } 481 482 static int 483 mfi_get_info(struct mfi_softc *sc) 484 { 485 #ifdef MFI_DEBUG 486 int i; 487 #endif 488 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc)); 489 490 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN, 491 sizeof(sc->sc_info), &sc->sc_info, NULL)) 492 return 1; 493 494 #ifdef MFI_DEBUG 495 496 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) { 497 printf("%s: active FW %s Version %s date %s time %s\n", 498 DEVNAME(sc), 499 sc->sc_info.mci_image_component[i].mic_name, 500 sc->sc_info.mci_image_component[i].mic_version, 501 sc->sc_info.mci_image_component[i].mic_build_date, 502 sc->sc_info.mci_image_component[i].mic_build_time); 503 } 504 505 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) { 506 printf("%s: pending FW %s Version %s date %s time %s\n", 507 DEVNAME(sc), 508 sc->sc_info.mci_pending_image_component[i].mic_name, 509 sc->sc_info.mci_pending_image_component[i].mic_version, 510 sc->sc_info.mci_pending_image_component[i].mic_build_date, 511 sc->sc_info.mci_pending_image_component[i].mic_build_time); 512 } 513 514 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n", 515 DEVNAME(sc), 516 sc->sc_info.mci_max_arms, 517 sc->sc_info.mci_max_spans, 518 sc->sc_info.mci_max_arrays, 519 sc->sc_info.mci_max_lds, 520 sc->sc_info.mci_product_name); 521 522 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n", 523 DEVNAME(sc), 524 sc->sc_info.mci_serial_number, 525 sc->sc_info.mci_hw_present, 526 sc->sc_info.mci_current_fw_time, 527 sc->sc_info.mci_max_cmds, 528 sc->sc_info.mci_max_sg_elements); 529 530 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n", 531 DEVNAME(sc), 532 sc->sc_info.mci_max_request_size, 533 sc->sc_info.mci_lds_present, 534 sc->sc_info.mci_lds_degraded, 535 sc->sc_info.mci_lds_offline, 536 sc->sc_info.mci_pd_present); 537 538 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n", 539 DEVNAME(sc), 540 sc->sc_info.mci_pd_disks_present, 541 sc->sc_info.mci_pd_disks_pred_failure, 542 sc->sc_info.mci_pd_disks_failed); 543 544 printf("%s: nvram %d mem %d flash %d\n", 545 DEVNAME(sc), 546 sc->sc_info.mci_nvram_size, 547 sc->sc_info.mci_memory_size, 548 sc->sc_info.mci_flash_size); 549 550 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n", 551 DEVNAME(sc), 552 sc->sc_info.mci_ram_correctable_errors, 553 sc->sc_info.mci_ram_uncorrectable_errors, 554 sc->sc_info.mci_cluster_allowed, 555 sc->sc_info.mci_cluster_active); 556 557 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n", 558 DEVNAME(sc), 559 sc->sc_info.mci_max_strips_per_io, 560 sc->sc_info.mci_raid_levels, 561 sc->sc_info.mci_adapter_ops, 562 sc->sc_info.mci_ld_ops); 563 564 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n", 565 DEVNAME(sc), 566 sc->sc_info.mci_stripe_sz_ops.min, 567 sc->sc_info.mci_stripe_sz_ops.max, 568 sc->sc_info.mci_pd_ops, 569 sc->sc_info.mci_pd_mix_support); 570 571 printf("%s: ecc_bucket %d pckg_prop %s\n", 572 DEVNAME(sc), 573 sc->sc_info.mci_ecc_bucket_count, 574 sc->sc_info.mci_package_version); 575 576 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n", 577 DEVNAME(sc), 578 sc->sc_info.mci_properties.mcp_seq_num, 579 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval, 580 sc->sc_info.mci_properties.mcp_intr_throttle_cnt, 581 sc->sc_info.mci_properties.mcp_intr_throttle_timeout); 582 583 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n", 584 DEVNAME(sc), 585 sc->sc_info.mci_properties.mcp_rebuild_rate, 586 sc->sc_info.mci_properties.mcp_patrol_read_rate, 587 sc->sc_info.mci_properties.mcp_bgi_rate, 588 sc->sc_info.mci_properties.mcp_cc_rate); 589 590 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n", 591 DEVNAME(sc), 592 sc->sc_info.mci_properties.mcp_recon_rate, 593 sc->sc_info.mci_properties.mcp_cache_flush_interval, 594 sc->sc_info.mci_properties.mcp_spinup_drv_cnt, 595 sc->sc_info.mci_properties.mcp_spinup_delay, 596 sc->sc_info.mci_properties.mcp_cluster_enable); 597 598 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n", 599 DEVNAME(sc), 600 sc->sc_info.mci_properties.mcp_coercion_mode, 601 sc->sc_info.mci_properties.mcp_alarm_enable, 602 sc->sc_info.mci_properties.mcp_disable_auto_rebuild, 603 sc->sc_info.mci_properties.mcp_disable_battery_warn, 604 sc->sc_info.mci_properties.mcp_ecc_bucket_size); 605 606 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n", 607 DEVNAME(sc), 608 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate, 609 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion, 610 sc->sc_info.mci_properties.mcp_expose_encl_devices); 611 612 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n", 613 DEVNAME(sc), 614 sc->sc_info.mci_pci.mip_vendor, 615 sc->sc_info.mci_pci.mip_device, 616 sc->sc_info.mci_pci.mip_subvendor, 617 sc->sc_info.mci_pci.mip_subdevice); 618 619 printf("%s: type %#x port_count %d port_addr ", 620 DEVNAME(sc), 621 sc->sc_info.mci_host.mih_type, 622 sc->sc_info.mci_host.mih_port_count); 623 624 for (i = 0; i < 8; i++) 625 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]); 626 printf("\n"); 627 628 printf("%s: type %.x port_count %d port_addr ", 629 DEVNAME(sc), 630 sc->sc_info.mci_device.mid_type, 631 sc->sc_info.mci_device.mid_port_count); 632 633 for (i = 0; i < 8; i++) 634 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]); 635 printf("\n"); 636 #endif /* MFI_DEBUG */ 637 638 return 0; 639 } 640 641 static void 642 mfiminphys(struct buf *bp) 643 { 644 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount); 645 646 /* XXX currently using MFI_MAXFER = MAXPHYS */ 647 if (bp->b_bcount > MFI_MAXFER) 648 bp->b_bcount = MFI_MAXFER; 649 minphys(bp); 650 } 651 652 int 653 mfi_rescan(device_t self, const char *ifattr, const int *locators) 654 { 655 struct mfi_softc *sc = device_private(self); 656 657 if (sc->sc_child != NULL) 658 return 0; 659 660 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan, 661 scsiprint, NULL); 662 663 return 0; 664 } 665 666 void 667 mfi_childdetached(device_t self, device_t child) 668 { 669 struct mfi_softc *sc = device_private(self); 670 671 KASSERT(self == sc->sc_dev); 672 KASSERT(child == sc->sc_child); 673 674 if (child == sc->sc_child) 675 sc->sc_child = NULL; 676 } 677 678 int 679 mfi_detach(struct mfi_softc *sc, int flags) 680 { 681 int error; 682 683 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc)); 684 685 if ((error = config_detach_children(sc->sc_dev, flags)) != 0) 686 return error; 687 688 #if NBIO > 0 689 mfi_destroy_sensors(sc); 690 bio_unregister(sc->sc_dev); 691 #endif /* NBIO > 0 */ 692 693 mfi_intr_disable(sc); 694 695 /* TBD: shutdown firmware */ 696 697 if ((error = mfi_destroy_ccb(sc)) != 0) 698 return error; 699 700 mfi_freemem(sc, &sc->sc_sense); 701 702 mfi_freemem(sc, &sc->sc_frames); 703 704 mfi_freemem(sc, &sc->sc_pcq); 705 706 return 0; 707 } 708 709 int 710 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop) 711 { 712 struct scsipi_adapter *adapt = &sc->sc_adapt; 713 struct scsipi_channel *chan = &sc->sc_chan; 714 uint32_t status, frames; 715 int i; 716 717 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc)); 718 719 switch (iop) { 720 case MFI_IOP_XSCALE: 721 sc->sc_iop = &mfi_iop_xscale; 722 break; 723 case MFI_IOP_PPC: 724 sc->sc_iop = &mfi_iop_ppc; 725 break; 726 case MFI_IOP_GEN2: 727 sc->sc_iop = &mfi_iop_gen2; 728 break; 729 default: 730 panic("%s: unknown iop %d", DEVNAME(sc), iop); 731 } 732 733 if (mfi_transition_firmware(sc)) 734 return 1; 735 736 TAILQ_INIT(&sc->sc_ccb_freeq); 737 738 status = mfi_fw_state(sc); 739 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK; 740 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16; 741 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n", 742 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl); 743 744 /* consumer/producer and reply queue memory */ 745 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) + 746 sizeof(struct mfi_prod_cons)); 747 if (sc->sc_pcq == NULL) { 748 aprint_error("%s: unable to allocate reply queue memory\n", 749 DEVNAME(sc)); 750 goto nopcq; 751 } 752 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, 753 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons), 754 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 755 756 /* frame memory */ 757 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */ 758 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl + 759 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1; 760 sc->sc_frames_size = frames * MFI_FRAME_SIZE; 761 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds); 762 if (sc->sc_frames == NULL) { 763 aprint_error("%s: unable to allocate frame memory\n", 764 DEVNAME(sc)); 765 goto noframe; 766 } 767 /* XXX hack, fix this */ 768 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) { 769 aprint_error("%s: improper frame alignment (%#llx) FIXME\n", 770 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames)); 771 goto noframe; 772 } 773 774 /* sense memory */ 775 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE); 776 if (sc->sc_sense == NULL) { 777 aprint_error("%s: unable to allocate sense memory\n", 778 DEVNAME(sc)); 779 goto nosense; 780 } 781 782 /* now that we have all memory bits go initialize ccbs */ 783 if (mfi_init_ccb(sc)) { 784 aprint_error("%s: could not init ccb list\n", DEVNAME(sc)); 785 goto noinit; 786 } 787 788 /* kickstart firmware with all addresses and pointers */ 789 if (mfi_initialize_firmware(sc)) { 790 aprint_error("%s: could not initialize firmware\n", 791 DEVNAME(sc)); 792 goto noinit; 793 } 794 795 if (mfi_get_info(sc)) { 796 aprint_error("%s: could not retrieve controller information\n", 797 DEVNAME(sc)); 798 goto noinit; 799 } 800 801 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n", 802 DEVNAME(sc), 803 sc->sc_info.mci_lds_present, 804 sc->sc_info.mci_package_version, 805 sc->sc_info.mci_memory_size); 806 807 sc->sc_ld_cnt = sc->sc_info.mci_lds_present; 808 sc->sc_max_ld = sc->sc_ld_cnt; 809 for (i = 0; i < sc->sc_ld_cnt; i++) 810 sc->sc_ld[i].ld_present = 1; 811 812 memset(adapt, 0, sizeof(*adapt)); 813 adapt->adapt_dev = sc->sc_dev; 814 adapt->adapt_nchannels = 1; 815 if (sc->sc_ld_cnt) 816 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt; 817 else 818 adapt->adapt_openings = sc->sc_max_cmds; 819 adapt->adapt_max_periph = adapt->adapt_openings; 820 adapt->adapt_request = mfi_scsipi_request; 821 adapt->adapt_minphys = mfiminphys; 822 823 memset(chan, 0, sizeof(*chan)); 824 chan->chan_adapter = adapt; 825 chan->chan_bustype = &scsi_bustype; 826 chan->chan_channel = 0; 827 chan->chan_flags = 0; 828 chan->chan_nluns = 8; 829 chan->chan_ntargets = MFI_MAX_LD; 830 chan->chan_id = MFI_MAX_LD; 831 832 mfi_rescan(sc->sc_dev, "scsi", NULL); 833 834 /* enable interrupts */ 835 mfi_intr_enable(sc); 836 837 #if NBIO > 0 838 if (bio_register(sc->sc_dev, mfi_ioctl) != 0) 839 panic("%s: controller registration failed", DEVNAME(sc)); 840 if (mfi_create_sensors(sc) != 0) 841 aprint_error("%s: unable to create sensors\n", DEVNAME(sc)); 842 #endif /* NBIO > 0 */ 843 844 return 0; 845 noinit: 846 mfi_freemem(sc, &sc->sc_sense); 847 nosense: 848 mfi_freemem(sc, &sc->sc_frames); 849 noframe: 850 mfi_freemem(sc, &sc->sc_pcq); 851 nopcq: 852 return 1; 853 } 854 855 static int 856 mfi_poll(struct mfi_ccb *ccb) 857 { 858 struct mfi_softc *sc = ccb->ccb_sc; 859 struct mfi_frame_header *hdr; 860 int to = 0; 861 862 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc)); 863 864 hdr = &ccb->ccb_frame->mfr_header; 865 hdr->mfh_cmd_status = 0xff; 866 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 867 868 mfi_post(sc, ccb); 869 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 870 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 871 sc->sc_frames_size, BUS_DMASYNC_POSTREAD); 872 873 while (hdr->mfh_cmd_status == 0xff) { 874 delay(1000); 875 if (to++ > 5000) /* XXX 5 seconds busywait sucks */ 876 break; 877 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 878 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 879 sc->sc_frames_size, BUS_DMASYNC_POSTREAD); 880 } 881 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 882 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 883 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 884 885 if (ccb->ccb_data != NULL) { 886 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n", 887 DEVNAME(sc)); 888 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 889 ccb->ccb_dmamap->dm_mapsize, 890 (ccb->ccb_direction & MFI_DATA_IN) ? 891 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 892 893 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 894 } 895 896 if (hdr->mfh_cmd_status == 0xff) { 897 printf("%s: timeout on ccb %d\n", DEVNAME(sc), 898 hdr->mfh_context); 899 ccb->ccb_flags |= MFI_CCB_F_ERR; 900 return 1; 901 } 902 903 return 0; 904 } 905 906 int 907 mfi_intr(void *arg) 908 { 909 struct mfi_softc *sc = arg; 910 struct mfi_prod_cons *pcq; 911 struct mfi_ccb *ccb; 912 uint32_t producer, consumer, ctx; 913 int claimed = 0; 914 915 if (!mfi_my_intr(sc)) 916 return 0; 917 918 pcq = MFIMEM_KVA(sc->sc_pcq); 919 920 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc), 921 (u_long)sc, (u_long)pcq); 922 923 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, 924 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons), 925 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 926 927 producer = pcq->mpc_producer; 928 consumer = pcq->mpc_consumer; 929 930 while (consumer != producer) { 931 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n", 932 DEVNAME(sc), producer, consumer); 933 934 ctx = pcq->mpc_reply_q[consumer]; 935 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX; 936 if (ctx == MFI_INVALID_CTX) 937 printf("%s: invalid context, p: %d c: %d\n", 938 DEVNAME(sc), producer, consumer); 939 else { 940 /* XXX remove from queue and call scsi_done */ 941 ccb = &sc->sc_ccb[ctx]; 942 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n", 943 DEVNAME(sc), ctx); 944 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 945 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 946 sc->sc_frames_size, 947 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 948 ccb->ccb_done(ccb); 949 950 claimed = 1; 951 } 952 consumer++; 953 if (consumer == (sc->sc_max_cmds + 1)) 954 consumer = 0; 955 } 956 957 pcq->mpc_consumer = consumer; 958 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, 959 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons), 960 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 961 962 return claimed; 963 } 964 965 static int 966 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno, 967 uint32_t blockcnt) 968 { 969 struct scsipi_periph *periph = xs->xs_periph; 970 struct mfi_io_frame *io; 971 972 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n", 973 device_xname(periph->periph_channel->chan_adapter->adapt_dev), 974 periph->periph_target); 975 976 if (!xs->data) 977 return 1; 978 979 io = &ccb->ccb_frame->mfr_io; 980 if (xs->xs_control & XS_CTL_DATA_IN) { 981 io->mif_header.mfh_cmd = MFI_CMD_LD_READ; 982 ccb->ccb_direction = MFI_DATA_IN; 983 } else { 984 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE; 985 ccb->ccb_direction = MFI_DATA_OUT; 986 } 987 io->mif_header.mfh_target_id = periph->periph_target; 988 io->mif_header.mfh_timeout = 0; 989 io->mif_header.mfh_flags = 0; 990 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE; 991 io->mif_header.mfh_data_len= blockcnt; 992 io->mif_lba_hi = 0; 993 io->mif_lba_lo = blockno; 994 io->mif_sense_addr_lo = htole32(ccb->ccb_psense); 995 io->mif_sense_addr_hi = 0; 996 997 ccb->ccb_done = mfi_scsi_xs_done; 998 ccb->ccb_xs = xs; 999 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE; 1000 ccb->ccb_sgl = &io->mif_sgl; 1001 ccb->ccb_data = xs->data; 1002 ccb->ccb_len = xs->datalen; 1003 1004 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ? 1005 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)) 1006 return 1; 1007 1008 return 0; 1009 } 1010 1011 static void 1012 mfi_scsi_xs_done(struct mfi_ccb *ccb) 1013 { 1014 struct scsipi_xfer *xs = ccb->ccb_xs; 1015 struct mfi_softc *sc = ccb->ccb_sc; 1016 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 1017 1018 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n", 1019 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame); 1020 1021 if (xs->data != NULL) { 1022 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n", 1023 DEVNAME(sc)); 1024 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1025 ccb->ccb_dmamap->dm_mapsize, 1026 (xs->xs_control & XS_CTL_DATA_IN) ? 1027 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1028 1029 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 1030 } 1031 1032 if (hdr->mfh_cmd_status != MFI_STAT_OK) { 1033 xs->error = XS_DRIVER_STUFFUP; 1034 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n", 1035 DEVNAME(sc), hdr->mfh_cmd_status); 1036 1037 if (hdr->mfh_scsi_status != 0) { 1038 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense), 1039 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense), 1040 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD); 1041 DNPRINTF(MFI_D_INTR, 1042 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n", 1043 DEVNAME(sc), hdr->mfh_scsi_status, 1044 (u_long)&xs->sense, (u_long)ccb->ccb_sense); 1045 memset(&xs->sense, 0, sizeof(xs->sense)); 1046 memcpy(&xs->sense, ccb->ccb_sense, 1047 sizeof(struct scsi_sense_data)); 1048 xs->error = XS_SENSE; 1049 } 1050 } else { 1051 xs->error = XS_NOERROR; 1052 xs->status = SCSI_OK; 1053 xs->resid = 0; 1054 } 1055 1056 mfi_put_ccb(ccb); 1057 scsipi_done(xs); 1058 } 1059 1060 static int 1061 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs) 1062 { 1063 struct mfi_pass_frame *pf; 1064 struct scsipi_periph *periph = xs->xs_periph; 1065 1066 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n", 1067 device_xname(periph->periph_channel->chan_adapter->adapt_dev), 1068 periph->periph_target); 1069 1070 pf = &ccb->ccb_frame->mfr_pass; 1071 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO; 1072 pf->mpf_header.mfh_target_id = periph->periph_target; 1073 pf->mpf_header.mfh_lun_id = 0; 1074 pf->mpf_header.mfh_cdb_len = xs->cmdlen; 1075 pf->mpf_header.mfh_timeout = 0; 1076 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */ 1077 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE; 1078 1079 pf->mpf_sense_addr_hi = 0; 1080 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense); 1081 1082 memset(pf->mpf_cdb, 0, 16); 1083 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen); 1084 1085 ccb->ccb_done = mfi_scsi_xs_done; 1086 ccb->ccb_xs = xs; 1087 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE; 1088 ccb->ccb_sgl = &pf->mpf_sgl; 1089 1090 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) 1091 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ? 1092 MFI_DATA_IN : MFI_DATA_OUT; 1093 else 1094 ccb->ccb_direction = MFI_DATA_NONE; 1095 1096 if (xs->data) { 1097 ccb->ccb_data = xs->data; 1098 ccb->ccb_len = xs->datalen; 1099 1100 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ? 1101 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)) 1102 return 1; 1103 } 1104 1105 return 0; 1106 } 1107 1108 static void 1109 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1110 void *arg) 1111 { 1112 struct scsipi_periph *periph; 1113 struct scsipi_xfer *xs; 1114 struct scsipi_adapter *adapt = chan->chan_adapter; 1115 struct mfi_softc *sc = device_private(adapt->adapt_dev); 1116 struct mfi_ccb *ccb; 1117 struct scsi_rw_6 *rw; 1118 struct scsipi_rw_10 *rwb; 1119 uint32_t blockno, blockcnt; 1120 uint8_t target; 1121 uint8_t mbox[MFI_MBOX_SIZE]; 1122 int s; 1123 1124 switch (req) { 1125 case ADAPTER_REQ_GROW_RESOURCES: 1126 /* Not supported. */ 1127 return; 1128 case ADAPTER_REQ_SET_XFER_MODE: 1129 /* Not supported. */ 1130 return; 1131 case ADAPTER_REQ_RUN_XFER: 1132 break; 1133 } 1134 1135 xs = arg; 1136 1137 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n", 1138 DEVNAME(sc), req, xs->cmd->opcode); 1139 1140 periph = xs->xs_periph; 1141 target = periph->periph_target; 1142 1143 s = splbio(); 1144 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present || 1145 periph->periph_lun != 0) { 1146 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n", 1147 DEVNAME(sc), target); 1148 xs->error = XS_SELTIMEOUT; 1149 scsipi_done(xs); 1150 splx(s); 1151 return; 1152 } 1153 1154 if ((ccb = mfi_get_ccb(sc)) == NULL) { 1155 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc)); 1156 xs->error = XS_RESOURCE_SHORTAGE; 1157 scsipi_done(xs); 1158 splx(s); 1159 return; 1160 } 1161 1162 switch (xs->cmd->opcode) { 1163 /* IO path */ 1164 case READ_10: 1165 case WRITE_10: 1166 rwb = (struct scsipi_rw_10 *)xs->cmd; 1167 blockno = _4btol(rwb->addr); 1168 blockcnt = _2btol(rwb->length); 1169 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) { 1170 mfi_put_ccb(ccb); 1171 goto stuffup; 1172 } 1173 break; 1174 1175 case SCSI_READ_6_COMMAND: 1176 case SCSI_WRITE_6_COMMAND: 1177 rw = (struct scsi_rw_6 *)xs->cmd; 1178 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); 1179 blockcnt = rw->length ? rw->length : 0x100; 1180 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) { 1181 mfi_put_ccb(ccb); 1182 goto stuffup; 1183 } 1184 break; 1185 1186 case SCSI_SYNCHRONIZE_CACHE_10: 1187 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 1188 if (mfi_mgmt(ccb, xs, 1189 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) { 1190 mfi_put_ccb(ccb); 1191 goto stuffup; 1192 } 1193 break; 1194 1195 /* hand it of to the firmware and let it deal with it */ 1196 case SCSI_TEST_UNIT_READY: 1197 /* save off sd? after autoconf */ 1198 if (!cold) /* XXX bogus */ 1199 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev), 1200 sizeof(sc->sc_ld[target].ld_dev)); 1201 /* FALLTHROUGH */ 1202 1203 default: 1204 if (mfi_scsi_ld(ccb, xs)) { 1205 mfi_put_ccb(ccb); 1206 goto stuffup; 1207 } 1208 break; 1209 } 1210 1211 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target); 1212 1213 if (xs->xs_control & XS_CTL_POLL) { 1214 if (mfi_poll(ccb)) { 1215 /* XXX check for sense in ccb->ccb_sense? */ 1216 printf("%s: mfi_scsipi_request poll failed\n", 1217 DEVNAME(sc)); 1218 memset(&xs->sense, 0, sizeof(xs->sense)); 1219 xs->sense.scsi_sense.response_code = 1220 SSD_RCODE_VALID | SSD_RCODE_CURRENT; 1221 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST; 1222 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */ 1223 xs->error = XS_SENSE; 1224 xs->status = SCSI_CHECK; 1225 } else { 1226 DNPRINTF(MFI_D_DMA, 1227 "%s: mfi_scsipi_request poll complete %d\n", 1228 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs); 1229 xs->error = XS_NOERROR; 1230 xs->status = SCSI_OK; 1231 xs->resid = 0; 1232 } 1233 mfi_put_ccb(ccb); 1234 scsipi_done(xs); 1235 splx(s); 1236 return; 1237 } 1238 1239 mfi_post(sc, ccb); 1240 1241 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc), 1242 ccb->ccb_dmamap->dm_nsegs); 1243 1244 splx(s); 1245 return; 1246 1247 stuffup: 1248 xs->error = XS_DRIVER_STUFFUP; 1249 scsipi_done(xs); 1250 splx(s); 1251 } 1252 1253 static int 1254 mfi_create_sgl(struct mfi_ccb *ccb, int flags) 1255 { 1256 struct mfi_softc *sc = ccb->ccb_sc; 1257 struct mfi_frame_header *hdr; 1258 bus_dma_segment_t *sgd; 1259 union mfi_sgl *sgl; 1260 int error, i; 1261 1262 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc), 1263 (u_long)ccb->ccb_data); 1264 1265 if (!ccb->ccb_data) 1266 return 1; 1267 1268 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap, 1269 ccb->ccb_data, ccb->ccb_len, NULL, flags); 1270 if (error) { 1271 if (error == EFBIG) 1272 printf("more than %d dma segs\n", 1273 sc->sc_max_sgl); 1274 else 1275 printf("error %d loading dma map\n", error); 1276 return 1; 1277 } 1278 1279 hdr = &ccb->ccb_frame->mfr_header; 1280 sgl = ccb->ccb_sgl; 1281 sgd = ccb->ccb_dmamap->dm_segs; 1282 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) { 1283 sgl->sg32[i].addr = htole32(sgd[i].ds_addr); 1284 sgl->sg32[i].len = htole32(sgd[i].ds_len); 1285 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n", 1286 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len); 1287 } 1288 1289 if (ccb->ccb_direction == MFI_DATA_IN) { 1290 hdr->mfh_flags |= MFI_FRAME_DIR_READ; 1291 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1292 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1293 } else { 1294 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE; 1295 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1296 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1297 } 1298 1299 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs; 1300 /* for 64 bit io make the sizeof a variable to hold whatever sg size */ 1301 ccb->ccb_frame_size += sizeof(struct mfi_sg32) * 1302 ccb->ccb_dmamap->dm_nsegs; 1303 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE; 1304 1305 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d" 1306 " dm_nsegs: %d extra_frames: %d\n", 1307 DEVNAME(sc), 1308 hdr->mfh_sg_count, 1309 ccb->ccb_frame_size, 1310 sc->sc_frames_size, 1311 ccb->ccb_dmamap->dm_nsegs, 1312 ccb->ccb_extra_frames); 1313 1314 return 0; 1315 } 1316 1317 static int 1318 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir, 1319 uint32_t len, void *buf, uint8_t *mbox) 1320 { 1321 struct mfi_ccb *ccb; 1322 int rv = 1; 1323 1324 if ((ccb = mfi_get_ccb(sc)) == NULL) 1325 return rv; 1326 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox); 1327 if (rv) 1328 return rv; 1329 1330 if (cold) { 1331 if (mfi_poll(ccb)) 1332 goto done; 1333 } else { 1334 mfi_post(sc, ccb); 1335 1336 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n", 1337 DEVNAME(sc)); 1338 while (ccb->ccb_state != MFI_CCB_DONE) 1339 tsleep(ccb, PRIBIO, "mfi_mgmt", 0); 1340 1341 if (ccb->ccb_flags & MFI_CCB_F_ERR) 1342 goto done; 1343 } 1344 rv = 0; 1345 1346 done: 1347 mfi_put_ccb(ccb); 1348 return rv; 1349 } 1350 1351 static int 1352 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs, 1353 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox) 1354 { 1355 struct mfi_dcmd_frame *dcmd; 1356 1357 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc); 1358 1359 dcmd = &ccb->ccb_frame->mfr_dcmd; 1360 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE); 1361 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD; 1362 dcmd->mdf_header.mfh_timeout = 0; 1363 1364 dcmd->mdf_opcode = opc; 1365 dcmd->mdf_header.mfh_data_len = 0; 1366 ccb->ccb_direction = dir; 1367 ccb->ccb_xs = xs; 1368 ccb->ccb_done = mfi_mgmt_done; 1369 1370 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE; 1371 1372 /* handle special opcodes */ 1373 if (mbox) 1374 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE); 1375 1376 if (dir != MFI_DATA_NONE) { 1377 dcmd->mdf_header.mfh_data_len = len; 1378 ccb->ccb_data = buf; 1379 ccb->ccb_len = len; 1380 ccb->ccb_sgl = &dcmd->mdf_sgl; 1381 1382 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) 1383 return 1; 1384 } 1385 return 0; 1386 } 1387 1388 static void 1389 mfi_mgmt_done(struct mfi_ccb *ccb) 1390 { 1391 struct scsipi_xfer *xs = ccb->ccb_xs; 1392 struct mfi_softc *sc = ccb->ccb_sc; 1393 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 1394 1395 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n", 1396 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame); 1397 1398 if (ccb->ccb_data != NULL) { 1399 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n", 1400 DEVNAME(sc)); 1401 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1402 ccb->ccb_dmamap->dm_mapsize, 1403 (ccb->ccb_direction & MFI_DATA_IN) ? 1404 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1405 1406 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 1407 } 1408 1409 if (hdr->mfh_cmd_status != MFI_STAT_OK) 1410 ccb->ccb_flags |= MFI_CCB_F_ERR; 1411 1412 ccb->ccb_state = MFI_CCB_DONE; 1413 if (xs) { 1414 if (hdr->mfh_cmd_status != MFI_STAT_OK) { 1415 xs->error = XS_DRIVER_STUFFUP; 1416 } else { 1417 xs->error = XS_NOERROR; 1418 xs->status = SCSI_OK; 1419 xs->resid = 0; 1420 } 1421 mfi_put_ccb(ccb); 1422 scsipi_done(xs); 1423 } else 1424 wakeup(ccb); 1425 } 1426 1427 #if NBIO > 0 1428 int 1429 mfi_ioctl(device_t dev, u_long cmd, void *addr) 1430 { 1431 struct mfi_softc *sc = device_private(dev); 1432 int error = 0; 1433 int s; 1434 1435 KERNEL_LOCK(1, curlwp); 1436 s = splbio(); 1437 1438 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc)); 1439 1440 switch (cmd) { 1441 case BIOCINQ: 1442 DNPRINTF(MFI_D_IOCTL, "inq\n"); 1443 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr); 1444 break; 1445 1446 case BIOCVOL: 1447 DNPRINTF(MFI_D_IOCTL, "vol\n"); 1448 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr); 1449 break; 1450 1451 case BIOCDISK: 1452 DNPRINTF(MFI_D_IOCTL, "disk\n"); 1453 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr); 1454 break; 1455 1456 case BIOCALARM: 1457 DNPRINTF(MFI_D_IOCTL, "alarm\n"); 1458 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr); 1459 break; 1460 1461 case BIOCBLINK: 1462 DNPRINTF(MFI_D_IOCTL, "blink\n"); 1463 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr); 1464 break; 1465 1466 case BIOCSETSTATE: 1467 DNPRINTF(MFI_D_IOCTL, "setstate\n"); 1468 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr); 1469 break; 1470 1471 default: 1472 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n"); 1473 error = EINVAL; 1474 } 1475 splx(s); 1476 KERNEL_UNLOCK_ONE(curlwp); 1477 1478 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error); 1479 return error; 1480 } 1481 1482 static int 1483 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi) 1484 { 1485 struct mfi_conf *cfg; 1486 int rv = EINVAL; 1487 1488 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc)); 1489 1490 if (mfi_get_info(sc)) { 1491 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n", 1492 DEVNAME(sc)); 1493 return EIO; 1494 } 1495 1496 /* get figures */ 1497 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 1498 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1499 sizeof *cfg, cfg, NULL)) 1500 goto freeme; 1501 1502 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 1503 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs; 1504 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present; 1505 1506 rv = 0; 1507 freeme: 1508 free(cfg, M_DEVBUF); 1509 return rv; 1510 } 1511 1512 static int 1513 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv) 1514 { 1515 int i, per, rv = EINVAL; 1516 uint8_t mbox[MFI_MBOX_SIZE]; 1517 1518 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n", 1519 DEVNAME(sc), bv->bv_volid); 1520 1521 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN, 1522 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL)) 1523 goto done; 1524 1525 i = bv->bv_volid; 1526 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target; 1527 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n", 1528 DEVNAME(sc), mbox[0]); 1529 1530 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, 1531 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox)) 1532 goto done; 1533 1534 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) { 1535 /* go do hotspares */ 1536 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv); 1537 goto done; 1538 } 1539 1540 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev)); 1541 1542 switch(sc->sc_ld_list.mll_list[i].mll_state) { 1543 case MFI_LD_OFFLINE: 1544 bv->bv_status = BIOC_SVOFFLINE; 1545 break; 1546 1547 case MFI_LD_PART_DEGRADED: 1548 case MFI_LD_DEGRADED: 1549 bv->bv_status = BIOC_SVDEGRADED; 1550 break; 1551 1552 case MFI_LD_ONLINE: 1553 bv->bv_status = BIOC_SVONLINE; 1554 break; 1555 1556 default: 1557 bv->bv_status = BIOC_SVINVALID; 1558 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n", 1559 DEVNAME(sc), 1560 sc->sc_ld_list.mll_list[i].mll_state); 1561 } 1562 1563 /* additional status can modify MFI status */ 1564 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) { 1565 case MFI_LD_PROG_CC: 1566 case MFI_LD_PROG_BGI: 1567 bv->bv_status = BIOC_SVSCRUB; 1568 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress; 1569 bv->bv_percent = (per * 100) / 0xffff; 1570 bv->bv_seconds = 1571 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds; 1572 break; 1573 1574 case MFI_LD_PROG_FGI: 1575 case MFI_LD_PROG_RECONSTRUCT: 1576 /* nothing yet */ 1577 break; 1578 } 1579 1580 /* 1581 * The RAID levels are determined per the SNIA DDF spec, this is only 1582 * a subset that is valid for the MFI contrller. 1583 */ 1584 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid; 1585 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid == 1586 MFI_DDF_SRL_SPANNED) 1587 bv->bv_level *= 10; 1588 1589 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span * 1590 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth; 1591 1592 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */ 1593 1594 rv = 0; 1595 done: 1596 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n", 1597 DEVNAME(sc), rv); 1598 return rv; 1599 } 1600 1601 static int 1602 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd) 1603 { 1604 struct mfi_conf *cfg; 1605 struct mfi_array *ar; 1606 struct mfi_ld_cfg *ld; 1607 struct mfi_pd_details *pd; 1608 struct scsipi_inquiry_data *inqbuf; 1609 char vend[8+16+4+1]; 1610 int i, rv = EINVAL; 1611 int arr, vol, disk; 1612 uint32_t size; 1613 uint8_t mbox[MFI_MBOX_SIZE]; 1614 1615 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n", 1616 DEVNAME(sc), bd->bd_diskid); 1617 1618 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO); 1619 1620 /* send single element command to retrieve size for full structure */ 1621 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 1622 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1623 sizeof *cfg, cfg, NULL)) 1624 goto freeme; 1625 1626 size = cfg->mfc_size; 1627 free(cfg, M_DEVBUF); 1628 1629 /* memory for read config */ 1630 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); 1631 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1632 size, cfg, NULL)) 1633 goto freeme; 1634 1635 ar = cfg->mfc_array; 1636 1637 /* calculate offset to ld structure */ 1638 ld = (struct mfi_ld_cfg *)( 1639 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 1640 cfg->mfc_array_size * cfg->mfc_no_array); 1641 1642 vol = bd->bd_volid; 1643 1644 if (vol >= cfg->mfc_no_ld) { 1645 /* do hotspares */ 1646 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd); 1647 goto freeme; 1648 } 1649 1650 /* find corresponding array for ld */ 1651 for (i = 0, arr = 0; i < vol; i++) 1652 arr += ld[i].mlc_parm.mpa_span_depth; 1653 1654 /* offset disk into pd list */ 1655 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span; 1656 1657 /* offset array index into the next spans */ 1658 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span; 1659 1660 bd->bd_target = ar[arr].pd[disk].mar_enc_slot; 1661 switch (ar[arr].pd[disk].mar_pd_state){ 1662 case MFI_PD_UNCONFIG_GOOD: 1663 bd->bd_status = BIOC_SDUNUSED; 1664 break; 1665 1666 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */ 1667 bd->bd_status = BIOC_SDHOTSPARE; 1668 break; 1669 1670 case MFI_PD_OFFLINE: 1671 bd->bd_status = BIOC_SDOFFLINE; 1672 break; 1673 1674 case MFI_PD_FAILED: 1675 bd->bd_status = BIOC_SDFAILED; 1676 break; 1677 1678 case MFI_PD_REBUILD: 1679 bd->bd_status = BIOC_SDREBUILD; 1680 break; 1681 1682 case MFI_PD_ONLINE: 1683 bd->bd_status = BIOC_SDONLINE; 1684 break; 1685 1686 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */ 1687 default: 1688 bd->bd_status = BIOC_SDINVALID; 1689 break; 1690 1691 } 1692 1693 /* get the remaining fields */ 1694 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id; 1695 memset(pd, 0, sizeof(*pd)); 1696 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, 1697 sizeof *pd, pd, mbox)) 1698 goto freeme; 1699 1700 bd->bd_size = pd->mpd_size * 512; /* bytes per block */ 1701 1702 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */ 1703 bd->bd_channel = pd->mpd_enc_idx; 1704 1705 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data; 1706 memcpy(vend, inqbuf->vendor, sizeof vend - 1); 1707 vend[sizeof vend - 1] = '\0'; 1708 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor)); 1709 1710 /* XXX find a way to retrieve serial nr from drive */ 1711 /* XXX find a way to get bd_procdev */ 1712 1713 rv = 0; 1714 freeme: 1715 free(pd, M_DEVBUF); 1716 free(cfg, M_DEVBUF); 1717 1718 return rv; 1719 } 1720 1721 static int 1722 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba) 1723 { 1724 uint32_t opc, dir = MFI_DATA_NONE; 1725 int rv = 0; 1726 int8_t ret; 1727 1728 switch(ba->ba_opcode) { 1729 case BIOC_SADISABLE: 1730 opc = MR_DCMD_SPEAKER_DISABLE; 1731 break; 1732 1733 case BIOC_SAENABLE: 1734 opc = MR_DCMD_SPEAKER_ENABLE; 1735 break; 1736 1737 case BIOC_SASILENCE: 1738 opc = MR_DCMD_SPEAKER_SILENCE; 1739 break; 1740 1741 case BIOC_GASTATUS: 1742 opc = MR_DCMD_SPEAKER_GET; 1743 dir = MFI_DATA_IN; 1744 break; 1745 1746 case BIOC_SATEST: 1747 opc = MR_DCMD_SPEAKER_TEST; 1748 break; 1749 1750 default: 1751 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid " 1752 "opcode %x\n", DEVNAME(sc), ba->ba_opcode); 1753 return EINVAL; 1754 } 1755 1756 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL)) 1757 rv = EINVAL; 1758 else 1759 if (ba->ba_opcode == BIOC_GASTATUS) 1760 ba->ba_status = ret; 1761 else 1762 ba->ba_status = 0; 1763 1764 return rv; 1765 } 1766 1767 static int 1768 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb) 1769 { 1770 int i, found, rv = EINVAL; 1771 uint8_t mbox[MFI_MBOX_SIZE]; 1772 uint32_t cmd; 1773 struct mfi_pd_list *pd; 1774 1775 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc), 1776 bb->bb_status); 1777 1778 /* channel 0 means not in an enclosure so can't be blinked */ 1779 if (bb->bb_channel == 0) 1780 return EINVAL; 1781 1782 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); 1783 1784 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, 1785 MFI_PD_LIST_SIZE, pd, NULL)) 1786 goto done; 1787 1788 for (i = 0, found = 0; i < pd->mpl_no_pd; i++) 1789 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index && 1790 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) { 1791 found = 1; 1792 break; 1793 } 1794 1795 if (!found) 1796 goto done; 1797 1798 memset(mbox, 0, sizeof mbox); 1799 1800 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id; 1801 1802 switch (bb->bb_status) { 1803 case BIOC_SBUNBLINK: 1804 cmd = MR_DCMD_PD_UNBLINK; 1805 break; 1806 1807 case BIOC_SBBLINK: 1808 cmd = MR_DCMD_PD_BLINK; 1809 break; 1810 1811 case BIOC_SBALARM: 1812 default: 1813 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid " 1814 "opcode %x\n", DEVNAME(sc), bb->bb_status); 1815 goto done; 1816 } 1817 1818 1819 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox)) 1820 goto done; 1821 1822 rv = 0; 1823 done: 1824 free(pd, M_DEVBUF); 1825 return rv; 1826 } 1827 1828 static int 1829 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs) 1830 { 1831 struct mfi_pd_list *pd; 1832 int i, found, rv = EINVAL; 1833 uint8_t mbox[MFI_MBOX_SIZE]; 1834 uint32_t cmd; 1835 1836 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc), 1837 bs->bs_status); 1838 1839 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); 1840 1841 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, 1842 MFI_PD_LIST_SIZE, pd, NULL)) 1843 goto done; 1844 1845 for (i = 0, found = 0; i < pd->mpl_no_pd; i++) 1846 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index && 1847 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) { 1848 found = 1; 1849 break; 1850 } 1851 1852 if (!found) 1853 goto done; 1854 1855 memset(mbox, 0, sizeof mbox); 1856 1857 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id; 1858 1859 switch (bs->bs_status) { 1860 case BIOC_SSONLINE: 1861 mbox[2] = MFI_PD_ONLINE; 1862 cmd = MD_DCMD_PD_SET_STATE; 1863 break; 1864 1865 case BIOC_SSOFFLINE: 1866 mbox[2] = MFI_PD_OFFLINE; 1867 cmd = MD_DCMD_PD_SET_STATE; 1868 break; 1869 1870 case BIOC_SSHOTSPARE: 1871 mbox[2] = MFI_PD_HOTSPARE; 1872 cmd = MD_DCMD_PD_SET_STATE; 1873 break; 1874 /* 1875 case BIOC_SSREBUILD: 1876 cmd = MD_DCMD_PD_REBUILD; 1877 break; 1878 */ 1879 default: 1880 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid " 1881 "opcode %x\n", DEVNAME(sc), bs->bs_status); 1882 goto done; 1883 } 1884 1885 1886 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, 1887 0, NULL, mbox)) 1888 goto done; 1889 1890 rv = 0; 1891 done: 1892 free(pd, M_DEVBUF); 1893 return rv; 1894 } 1895 1896 static int 1897 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs) 1898 { 1899 struct mfi_conf *cfg; 1900 struct mfi_hotspare *hs; 1901 struct mfi_pd_details *pd; 1902 struct bioc_disk *sdhs; 1903 struct bioc_vol *vdhs; 1904 struct scsipi_inquiry_data *inqbuf; 1905 char vend[8+16+4+1]; 1906 int i, rv = EINVAL; 1907 uint32_t size; 1908 uint8_t mbox[MFI_MBOX_SIZE]; 1909 1910 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid); 1911 1912 if (!bio_hs) 1913 return EINVAL; 1914 1915 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO); 1916 1917 /* send single element command to retrieve size for full structure */ 1918 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 1919 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1920 sizeof *cfg, cfg, NULL)) 1921 goto freeme; 1922 1923 size = cfg->mfc_size; 1924 free(cfg, M_DEVBUF); 1925 1926 /* memory for read config */ 1927 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); 1928 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1929 size, cfg, NULL)) 1930 goto freeme; 1931 1932 /* calculate offset to hs structure */ 1933 hs = (struct mfi_hotspare *)( 1934 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 1935 cfg->mfc_array_size * cfg->mfc_no_array + 1936 cfg->mfc_ld_size * cfg->mfc_no_ld); 1937 1938 if (volid < cfg->mfc_no_ld) 1939 goto freeme; /* not a hotspare */ 1940 1941 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs)) 1942 goto freeme; /* not a hotspare */ 1943 1944 /* offset into hotspare structure */ 1945 i = volid - cfg->mfc_no_ld; 1946 1947 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d " 1948 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld, 1949 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id); 1950 1951 /* get pd fields */ 1952 memset(mbox, 0, sizeof mbox); 1953 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id; 1954 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, 1955 sizeof *pd, pd, mbox)) { 1956 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n", 1957 DEVNAME(sc)); 1958 goto freeme; 1959 } 1960 1961 switch (type) { 1962 case MFI_MGMT_VD: 1963 vdhs = bio_hs; 1964 vdhs->bv_status = BIOC_SVONLINE; 1965 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */ 1966 vdhs->bv_level = -1; /* hotspare */ 1967 vdhs->bv_nodisk = 1; 1968 break; 1969 1970 case MFI_MGMT_SD: 1971 sdhs = bio_hs; 1972 sdhs->bd_status = BIOC_SDHOTSPARE; 1973 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */ 1974 sdhs->bd_channel = pd->mpd_enc_idx; 1975 sdhs->bd_target = pd->mpd_enc_slot; 1976 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data; 1977 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1); 1978 vend[sizeof vend - 1] = '\0'; 1979 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor)); 1980 break; 1981 1982 default: 1983 goto freeme; 1984 } 1985 1986 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc)); 1987 rv = 0; 1988 freeme: 1989 free(pd, M_DEVBUF); 1990 free(cfg, M_DEVBUF); 1991 1992 return rv; 1993 } 1994 1995 static int 1996 mfi_destroy_sensors(struct mfi_softc *sc) 1997 { 1998 if (sc->sc_sme == NULL) 1999 return 0; 2000 sysmon_envsys_unregister(sc->sc_sme); 2001 sc->sc_sme = NULL; 2002 free(sc->sc_sensor, M_DEVBUF); 2003 return 0; 2004 } 2005 2006 static int 2007 mfi_create_sensors(struct mfi_softc *sc) 2008 { 2009 int i; 2010 int nsensors = sc->sc_ld_cnt; 2011 int rv; 2012 2013 sc->sc_sme = sysmon_envsys_create(); 2014 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors, 2015 M_DEVBUF, M_NOWAIT | M_ZERO); 2016 if (sc->sc_sensor == NULL) { 2017 aprint_error("%s: can't allocate envsys_data_t\n", 2018 DEVNAME(sc)); 2019 return ENOMEM; 2020 } 2021 2022 for (i = 0; i < nsensors; i++) { 2023 sc->sc_sensor[i].units = ENVSYS_DRIVE; 2024 /* Enable monitoring for drive state changes */ 2025 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED; 2026 /* logical drives */ 2027 snprintf(sc->sc_sensor[i].desc, 2028 sizeof(sc->sc_sensor[i].desc), "%s:%d", 2029 DEVNAME(sc), i); 2030 if (sysmon_envsys_sensor_attach(sc->sc_sme, 2031 &sc->sc_sensor[i])) 2032 goto out; 2033 } 2034 2035 sc->sc_sme->sme_name = DEVNAME(sc); 2036 sc->sc_sme->sme_cookie = sc; 2037 sc->sc_sme->sme_refresh = mfi_sensor_refresh; 2038 rv = sysmon_envsys_register(sc->sc_sme); 2039 if (rv != 0) { 2040 aprint_error("%s: unable to register with sysmon (rv = %d)\n", 2041 DEVNAME(sc), rv); 2042 goto out; 2043 } 2044 return 0; 2045 2046 out: 2047 free(sc->sc_sensor, M_DEVBUF); 2048 sysmon_envsys_destroy(sc->sc_sme); 2049 sc->sc_sme = NULL; 2050 return EINVAL; 2051 } 2052 2053 static void 2054 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata) 2055 { 2056 struct mfi_softc *sc = sme->sme_cookie; 2057 struct bioc_vol bv; 2058 int s; 2059 int error; 2060 2061 if (edata->sensor >= sc->sc_ld_cnt) 2062 return; 2063 2064 memset(&bv, 0, sizeof(bv)); 2065 bv.bv_volid = edata->sensor; 2066 KERNEL_LOCK(1, curlwp); 2067 s = splbio(); 2068 error = mfi_ioctl_vol(sc, &bv); 2069 splx(s); 2070 KERNEL_UNLOCK_ONE(curlwp); 2071 if (error) 2072 return; 2073 2074 switch(bv.bv_status) { 2075 case BIOC_SVOFFLINE: 2076 edata->value_cur = ENVSYS_DRIVE_FAIL; 2077 edata->state = ENVSYS_SCRITICAL; 2078 break; 2079 2080 case BIOC_SVDEGRADED: 2081 edata->value_cur = ENVSYS_DRIVE_PFAIL; 2082 edata->state = ENVSYS_SCRITICAL; 2083 break; 2084 2085 case BIOC_SVSCRUB: 2086 case BIOC_SVONLINE: 2087 edata->value_cur = ENVSYS_DRIVE_ONLINE; 2088 edata->state = ENVSYS_SVALID; 2089 break; 2090 2091 case BIOC_SVINVALID: 2092 /* FALLTRHOUGH */ 2093 default: 2094 edata->value_cur = 0; /* unknown */ 2095 edata->state = ENVSYS_SINVALID; 2096 } 2097 } 2098 2099 #endif /* NBIO > 0 */ 2100 2101 static uint32_t 2102 mfi_xscale_fw_state(struct mfi_softc *sc) 2103 { 2104 return mfi_read(sc, MFI_OMSG0); 2105 } 2106 2107 static void 2108 mfi_xscale_intr_dis(struct mfi_softc *sc) 2109 { 2110 mfi_write(sc, MFI_OMSK, 0); 2111 } 2112 2113 static void 2114 mfi_xscale_intr_ena(struct mfi_softc *sc) 2115 { 2116 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR); 2117 } 2118 2119 static int 2120 mfi_xscale_intr(struct mfi_softc *sc) 2121 { 2122 uint32_t status; 2123 2124 status = mfi_read(sc, MFI_OSTS); 2125 if (!ISSET(status, MFI_OSTS_INTR_VALID)) 2126 return 0; 2127 2128 /* write status back to acknowledge interrupt */ 2129 mfi_write(sc, MFI_OSTS, status); 2130 return 1; 2131 } 2132 2133 static void 2134 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2135 { 2136 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 2137 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 2138 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2139 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense), 2140 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense), 2141 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD); 2142 2143 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) | 2144 ccb->ccb_extra_frames); 2145 } 2146 2147 static uint32_t 2148 mfi_ppc_fw_state(struct mfi_softc *sc) 2149 { 2150 return mfi_read(sc, MFI_OSP); 2151 } 2152 2153 static void 2154 mfi_ppc_intr_dis(struct mfi_softc *sc) 2155 { 2156 /* Taking a wild guess --dyoung */ 2157 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0); 2158 mfi_write(sc, MFI_ODC, 0xffffffff); 2159 } 2160 2161 static void 2162 mfi_ppc_intr_ena(struct mfi_softc *sc) 2163 { 2164 mfi_write(sc, MFI_ODC, 0xffffffff); 2165 mfi_write(sc, MFI_OMSK, ~0x80000004); 2166 } 2167 2168 static int 2169 mfi_ppc_intr(struct mfi_softc *sc) 2170 { 2171 uint32_t status; 2172 2173 status = mfi_read(sc, MFI_OSTS); 2174 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID)) 2175 return 0; 2176 2177 /* write status back to acknowledge interrupt */ 2178 mfi_write(sc, MFI_ODC, status); 2179 return 1; 2180 } 2181 2182 static void 2183 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2184 { 2185 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe | 2186 (ccb->ccb_extra_frames << 1)); 2187 } 2188 2189 u_int32_t 2190 mfi_gen2_fw_state(struct mfi_softc *sc) 2191 { 2192 return (mfi_read(sc, MFI_OSP)); 2193 } 2194 2195 void 2196 mfi_gen2_intr_dis(struct mfi_softc *sc) 2197 { 2198 mfi_write(sc, MFI_OMSK, 0xffffffff); 2199 mfi_write(sc, MFI_ODC, 0xffffffff); 2200 } 2201 2202 void 2203 mfi_gen2_intr_ena(struct mfi_softc *sc) 2204 { 2205 mfi_write(sc, MFI_ODC, 0xffffffff); 2206 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID); 2207 } 2208 2209 int 2210 mfi_gen2_intr(struct mfi_softc *sc) 2211 { 2212 u_int32_t status; 2213 2214 status = mfi_read(sc, MFI_OSTS); 2215 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID)) 2216 return (0); 2217 2218 /* write status back to acknowledge interrupt */ 2219 mfi_write(sc, MFI_ODC, status); 2220 2221 return (1); 2222 } 2223 2224 void 2225 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2226 { 2227 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe | 2228 (ccb->ccb_extra_frames << 1)); 2229 } 2230