1 /* $NetBSD: mfi.c,v 1.20 2009/01/03 03:43:22 yamt Exp $ */ 2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */ 3 /* 4 * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/cdefs.h> 20 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.20 2009/01/03 03:43:22 yamt Exp $"); 21 22 #include "bio.h" 23 24 #include <sys/param.h> 25 #include <sys/systm.h> 26 #include <sys/buf.h> 27 #include <sys/ioctl.h> 28 #include <sys/device.h> 29 #include <sys/kernel.h> 30 #include <sys/malloc.h> 31 #include <sys/proc.h> 32 33 #include <uvm/uvm_param.h> 34 35 #include <sys/bus.h> 36 37 #include <dev/scsipi/scsipi_all.h> 38 #include <dev/scsipi/scsi_all.h> 39 #include <dev/scsipi/scsi_spc.h> 40 #include <dev/scsipi/scsipi_disk.h> 41 #include <dev/scsipi/scsi_disk.h> 42 #include <dev/scsipi/scsiconf.h> 43 44 #include <dev/ic/mfireg.h> 45 #include <dev/ic/mfivar.h> 46 47 #if NBIO > 0 48 #include <dev/biovar.h> 49 #endif /* NBIO > 0 */ 50 51 #ifdef MFI_DEBUG 52 uint32_t mfi_debug = 0 53 /* | MFI_D_CMD */ 54 /* | MFI_D_INTR */ 55 /* | MFI_D_MISC */ 56 /* | MFI_D_DMA */ 57 | MFI_D_IOCTL 58 /* | MFI_D_RW */ 59 /* | MFI_D_MEM */ 60 /* | MFI_D_CCB */ 61 ; 62 #endif 63 64 static void mfi_scsipi_request(struct scsipi_channel *, 65 scsipi_adapter_req_t, void *); 66 static void mfiminphys(struct buf *bp); 67 68 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *); 69 static void mfi_put_ccb(struct mfi_ccb *); 70 static int mfi_init_ccb(struct mfi_softc *); 71 72 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t); 73 static void mfi_freemem(struct mfi_softc *, struct mfi_mem *); 74 75 static int mfi_transition_firmware(struct mfi_softc *); 76 static int mfi_initialize_firmware(struct mfi_softc *); 77 static int mfi_get_info(struct mfi_softc *); 78 static uint32_t mfi_read(struct mfi_softc *, bus_size_t); 79 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t); 80 static int mfi_poll(struct mfi_ccb *); 81 static int mfi_create_sgl(struct mfi_ccb *, int); 82 83 /* commands */ 84 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *); 85 static int mfi_scsi_io(struct mfi_ccb *, struct scsipi_xfer *, 86 uint32_t, uint32_t); 87 static void mfi_scsi_xs_done(struct mfi_ccb *); 88 static int mfi_mgmt_internal(struct mfi_softc *, 89 uint32_t, uint32_t, uint32_t, void *, uint8_t *); 90 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *, 91 uint32_t, uint32_t, uint32_t, void *, uint8_t *); 92 static void mfi_mgmt_done(struct mfi_ccb *); 93 94 #if NBIO > 0 95 static int mfi_ioctl(struct device *, u_long, void *); 96 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *); 97 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *); 98 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *); 99 static int mfi_ioctl_alarm(struct mfi_softc *, 100 struct bioc_alarm *); 101 static int mfi_ioctl_blink(struct mfi_softc *sc, 102 struct bioc_blink *); 103 static int mfi_ioctl_setstate(struct mfi_softc *, 104 struct bioc_setstate *); 105 static int mfi_bio_hs(struct mfi_softc *, int, int, void *); 106 static int mfi_create_sensors(struct mfi_softc *); 107 static void mfi_sensor_refresh(struct sysmon_envsys *, 108 envsys_data_t *); 109 #endif /* NBIO > 0 */ 110 111 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc); 112 static void mfi_xscale_intr_ena(struct mfi_softc *sc); 113 static int mfi_xscale_intr(struct mfi_softc *sc); 114 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb); 115 116 static const struct mfi_iop_ops mfi_iop_xscale = { 117 mfi_xscale_fw_state, 118 mfi_xscale_intr_ena, 119 mfi_xscale_intr, 120 mfi_xscale_post 121 }; 122 123 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc); 124 static void mfi_ppc_intr_ena(struct mfi_softc *sc); 125 static int mfi_ppc_intr(struct mfi_softc *sc); 126 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb); 127 128 static const struct mfi_iop_ops mfi_iop_ppc = { 129 mfi_ppc_fw_state, 130 mfi_ppc_intr_ena, 131 mfi_ppc_intr, 132 mfi_ppc_post 133 }; 134 135 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s)) 136 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s)) 137 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s)) 138 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c))) 139 140 static struct mfi_ccb * 141 mfi_get_ccb(struct mfi_softc *sc) 142 { 143 struct mfi_ccb *ccb; 144 int s; 145 146 s = splbio(); 147 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq); 148 if (ccb) { 149 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link); 150 ccb->ccb_state = MFI_CCB_READY; 151 } 152 splx(s); 153 154 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb); 155 156 return ccb; 157 } 158 159 static void 160 mfi_put_ccb(struct mfi_ccb *ccb) 161 { 162 struct mfi_softc *sc = ccb->ccb_sc; 163 int s; 164 165 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb); 166 167 s = splbio(); 168 ccb->ccb_state = MFI_CCB_FREE; 169 ccb->ccb_xs = NULL; 170 ccb->ccb_flags = 0; 171 ccb->ccb_done = NULL; 172 ccb->ccb_direction = 0; 173 ccb->ccb_frame_size = 0; 174 ccb->ccb_extra_frames = 0; 175 ccb->ccb_sgl = NULL; 176 ccb->ccb_data = NULL; 177 ccb->ccb_len = 0; 178 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link); 179 splx(s); 180 } 181 182 static int 183 mfi_init_ccb(struct mfi_softc *sc) 184 { 185 struct mfi_ccb *ccb; 186 uint32_t i; 187 int error; 188 189 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc)); 190 191 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds, 192 M_DEVBUF, M_WAITOK|M_ZERO); 193 194 for (i = 0; i < sc->sc_max_cmds; i++) { 195 ccb = &sc->sc_ccb[i]; 196 197 ccb->ccb_sc = sc; 198 199 /* select i'th frame */ 200 ccb->ccb_frame = (union mfi_frame *) 201 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i); 202 ccb->ccb_pframe = 203 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i; 204 ccb->ccb_frame->mfr_header.mfh_context = i; 205 206 /* select i'th sense */ 207 ccb->ccb_sense = (struct mfi_sense *) 208 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i); 209 ccb->ccb_psense = 210 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i); 211 212 /* create a dma map for transfer */ 213 error = bus_dmamap_create(sc->sc_dmat, 214 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0, 215 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap); 216 if (error) { 217 printf("%s: cannot create ccb dmamap (%d)\n", 218 DEVNAME(sc), error); 219 goto destroy; 220 } 221 222 DNPRINTF(MFI_D_CCB, 223 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n", 224 ccb->ccb_frame->mfr_header.mfh_context, ccb, 225 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe, 226 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense, 227 (u_long)ccb->ccb_dmamap); 228 229 /* add ccb to queue */ 230 mfi_put_ccb(ccb); 231 } 232 233 return 0; 234 destroy: 235 /* free dma maps and ccb memory */ 236 while (i) { 237 i--; 238 ccb = &sc->sc_ccb[i]; 239 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 240 } 241 242 free(sc->sc_ccb, M_DEVBUF); 243 244 return 1; 245 } 246 247 static uint32_t 248 mfi_read(struct mfi_softc *sc, bus_size_t r) 249 { 250 uint32_t rv; 251 252 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 253 BUS_SPACE_BARRIER_READ); 254 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 255 256 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv); 257 return rv; 258 } 259 260 static void 261 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v) 262 { 263 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v); 264 265 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 266 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 267 BUS_SPACE_BARRIER_WRITE); 268 } 269 270 static struct mfi_mem * 271 mfi_allocmem(struct mfi_softc *sc, size_t size) 272 { 273 struct mfi_mem *mm; 274 int nsegs; 275 276 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc), 277 (long)size); 278 279 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO); 280 if (mm == NULL) 281 return NULL; 282 283 mm->am_size = size; 284 285 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 286 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0) 287 goto amfree; 288 289 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1, 290 &nsegs, BUS_DMA_NOWAIT) != 0) 291 goto destroy; 292 293 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva, 294 BUS_DMA_NOWAIT) != 0) 295 goto free; 296 297 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL, 298 BUS_DMA_NOWAIT) != 0) 299 goto unmap; 300 301 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n", 302 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map); 303 304 memset(mm->am_kva, 0, size); 305 return mm; 306 307 unmap: 308 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size); 309 free: 310 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1); 311 destroy: 312 bus_dmamap_destroy(sc->sc_dmat, mm->am_map); 313 amfree: 314 free(mm, M_DEVBUF); 315 316 return NULL; 317 } 318 319 static void 320 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm) 321 { 322 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm); 323 324 bus_dmamap_unload(sc->sc_dmat, mm->am_map); 325 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size); 326 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1); 327 bus_dmamap_destroy(sc->sc_dmat, mm->am_map); 328 free(mm, M_DEVBUF); 329 } 330 331 static int 332 mfi_transition_firmware(struct mfi_softc *sc) 333 { 334 uint32_t fw_state, cur_state; 335 int max_wait, i; 336 337 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK; 338 339 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc), 340 fw_state); 341 342 while (fw_state != MFI_STATE_READY) { 343 DNPRINTF(MFI_D_MISC, 344 "%s: waiting for firmware to become ready\n", 345 DEVNAME(sc)); 346 cur_state = fw_state; 347 switch (fw_state) { 348 case MFI_STATE_FAULT: 349 printf("%s: firmware fault\n", DEVNAME(sc)); 350 return 1; 351 case MFI_STATE_WAIT_HANDSHAKE: 352 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE); 353 max_wait = 2; 354 break; 355 case MFI_STATE_OPERATIONAL: 356 mfi_write(sc, MFI_IDB, MFI_INIT_READY); 357 max_wait = 10; 358 break; 359 case MFI_STATE_UNDEFINED: 360 case MFI_STATE_BB_INIT: 361 max_wait = 2; 362 break; 363 case MFI_STATE_FW_INIT: 364 case MFI_STATE_DEVICE_SCAN: 365 case MFI_STATE_FLUSH_CACHE: 366 max_wait = 20; 367 break; 368 default: 369 printf("%s: unknown firmware state %d\n", 370 DEVNAME(sc), fw_state); 371 return 1; 372 } 373 for (i = 0; i < (max_wait * 10); i++) { 374 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK; 375 if (fw_state == cur_state) 376 DELAY(100000); 377 else 378 break; 379 } 380 if (fw_state == cur_state) { 381 printf("%s: firmware stuck in state %#x\n", 382 DEVNAME(sc), fw_state); 383 return 1; 384 } 385 } 386 387 return 0; 388 } 389 390 static int 391 mfi_initialize_firmware(struct mfi_softc *sc) 392 { 393 struct mfi_ccb *ccb; 394 struct mfi_init_frame *init; 395 struct mfi_init_qinfo *qinfo; 396 397 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc)); 398 399 if ((ccb = mfi_get_ccb(sc)) == NULL) 400 return 1; 401 402 init = &ccb->ccb_frame->mfr_init; 403 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE); 404 405 memset(qinfo, 0, sizeof *qinfo); 406 qinfo->miq_rq_entries = sc->sc_max_cmds + 1; 407 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) + 408 offsetof(struct mfi_prod_cons, mpc_reply_q)); 409 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) + 410 offsetof(struct mfi_prod_cons, mpc_producer)); 411 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) + 412 offsetof(struct mfi_prod_cons, mpc_consumer)); 413 414 init->mif_header.mfh_cmd = MFI_CMD_INIT; 415 init->mif_header.mfh_data_len = sizeof *qinfo; 416 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE); 417 418 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n", 419 DEVNAME(sc), 420 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo, 421 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo); 422 423 if (mfi_poll(ccb)) { 424 printf("%s: mfi_initialize_firmware failed\n", DEVNAME(sc)); 425 return 1; 426 } 427 428 mfi_put_ccb(ccb); 429 430 return 0; 431 } 432 433 static int 434 mfi_get_info(struct mfi_softc *sc) 435 { 436 #ifdef MFI_DEBUG 437 int i; 438 #endif 439 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc)); 440 441 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN, 442 sizeof(sc->sc_info), &sc->sc_info, NULL)) 443 return 1; 444 445 #ifdef MFI_DEBUG 446 447 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) { 448 printf("%s: active FW %s Version %s date %s time %s\n", 449 DEVNAME(sc), 450 sc->sc_info.mci_image_component[i].mic_name, 451 sc->sc_info.mci_image_component[i].mic_version, 452 sc->sc_info.mci_image_component[i].mic_build_date, 453 sc->sc_info.mci_image_component[i].mic_build_time); 454 } 455 456 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) { 457 printf("%s: pending FW %s Version %s date %s time %s\n", 458 DEVNAME(sc), 459 sc->sc_info.mci_pending_image_component[i].mic_name, 460 sc->sc_info.mci_pending_image_component[i].mic_version, 461 sc->sc_info.mci_pending_image_component[i].mic_build_date, 462 sc->sc_info.mci_pending_image_component[i].mic_build_time); 463 } 464 465 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n", 466 DEVNAME(sc), 467 sc->sc_info.mci_max_arms, 468 sc->sc_info.mci_max_spans, 469 sc->sc_info.mci_max_arrays, 470 sc->sc_info.mci_max_lds, 471 sc->sc_info.mci_product_name); 472 473 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n", 474 DEVNAME(sc), 475 sc->sc_info.mci_serial_number, 476 sc->sc_info.mci_hw_present, 477 sc->sc_info.mci_current_fw_time, 478 sc->sc_info.mci_max_cmds, 479 sc->sc_info.mci_max_sg_elements); 480 481 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n", 482 DEVNAME(sc), 483 sc->sc_info.mci_max_request_size, 484 sc->sc_info.mci_lds_present, 485 sc->sc_info.mci_lds_degraded, 486 sc->sc_info.mci_lds_offline, 487 sc->sc_info.mci_pd_present); 488 489 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n", 490 DEVNAME(sc), 491 sc->sc_info.mci_pd_disks_present, 492 sc->sc_info.mci_pd_disks_pred_failure, 493 sc->sc_info.mci_pd_disks_failed); 494 495 printf("%s: nvram %d mem %d flash %d\n", 496 DEVNAME(sc), 497 sc->sc_info.mci_nvram_size, 498 sc->sc_info.mci_memory_size, 499 sc->sc_info.mci_flash_size); 500 501 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n", 502 DEVNAME(sc), 503 sc->sc_info.mci_ram_correctable_errors, 504 sc->sc_info.mci_ram_uncorrectable_errors, 505 sc->sc_info.mci_cluster_allowed, 506 sc->sc_info.mci_cluster_active); 507 508 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n", 509 DEVNAME(sc), 510 sc->sc_info.mci_max_strips_per_io, 511 sc->sc_info.mci_raid_levels, 512 sc->sc_info.mci_adapter_ops, 513 sc->sc_info.mci_ld_ops); 514 515 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n", 516 DEVNAME(sc), 517 sc->sc_info.mci_stripe_sz_ops.min, 518 sc->sc_info.mci_stripe_sz_ops.max, 519 sc->sc_info.mci_pd_ops, 520 sc->sc_info.mci_pd_mix_support); 521 522 printf("%s: ecc_bucket %d pckg_prop %s\n", 523 DEVNAME(sc), 524 sc->sc_info.mci_ecc_bucket_count, 525 sc->sc_info.mci_package_version); 526 527 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n", 528 DEVNAME(sc), 529 sc->sc_info.mci_properties.mcp_seq_num, 530 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval, 531 sc->sc_info.mci_properties.mcp_intr_throttle_cnt, 532 sc->sc_info.mci_properties.mcp_intr_throttle_timeout); 533 534 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n", 535 DEVNAME(sc), 536 sc->sc_info.mci_properties.mcp_rebuild_rate, 537 sc->sc_info.mci_properties.mcp_patrol_read_rate, 538 sc->sc_info.mci_properties.mcp_bgi_rate, 539 sc->sc_info.mci_properties.mcp_cc_rate); 540 541 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n", 542 DEVNAME(sc), 543 sc->sc_info.mci_properties.mcp_recon_rate, 544 sc->sc_info.mci_properties.mcp_cache_flush_interval, 545 sc->sc_info.mci_properties.mcp_spinup_drv_cnt, 546 sc->sc_info.mci_properties.mcp_spinup_delay, 547 sc->sc_info.mci_properties.mcp_cluster_enable); 548 549 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n", 550 DEVNAME(sc), 551 sc->sc_info.mci_properties.mcp_coercion_mode, 552 sc->sc_info.mci_properties.mcp_alarm_enable, 553 sc->sc_info.mci_properties.mcp_disable_auto_rebuild, 554 sc->sc_info.mci_properties.mcp_disable_battery_warn, 555 sc->sc_info.mci_properties.mcp_ecc_bucket_size); 556 557 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n", 558 DEVNAME(sc), 559 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate, 560 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion, 561 sc->sc_info.mci_properties.mcp_expose_encl_devices); 562 563 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n", 564 DEVNAME(sc), 565 sc->sc_info.mci_pci.mip_vendor, 566 sc->sc_info.mci_pci.mip_device, 567 sc->sc_info.mci_pci.mip_subvendor, 568 sc->sc_info.mci_pci.mip_subdevice); 569 570 printf("%s: type %#x port_count %d port_addr ", 571 DEVNAME(sc), 572 sc->sc_info.mci_host.mih_type, 573 sc->sc_info.mci_host.mih_port_count); 574 575 for (i = 0; i < 8; i++) 576 printf("%.0lx ", sc->sc_info.mci_host.mih_port_addr[i]); 577 printf("\n"); 578 579 printf("%s: type %.x port_count %d port_addr ", 580 DEVNAME(sc), 581 sc->sc_info.mci_device.mid_type, 582 sc->sc_info.mci_device.mid_port_count); 583 584 for (i = 0; i < 8; i++) 585 printf("%.0lx ", sc->sc_info.mci_device.mid_port_addr[i]); 586 printf("\n"); 587 #endif /* MFI_DEBUG */ 588 589 return 0; 590 } 591 592 static void 593 mfiminphys(struct buf *bp) 594 { 595 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount); 596 597 /* XXX currently using MFI_MAXFER = MAXPHYS */ 598 if (bp->b_bcount > MFI_MAXFER) 599 bp->b_bcount = MFI_MAXFER; 600 minphys(bp); 601 } 602 603 int 604 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop) 605 { 606 struct scsipi_adapter *adapt = &sc->sc_adapt; 607 struct scsipi_channel *chan = &sc->sc_chan; 608 uint32_t status, frames; 609 int i; 610 611 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc)); 612 613 switch (iop) { 614 case MFI_IOP_XSCALE: 615 sc->sc_iop = &mfi_iop_xscale; 616 break; 617 case MFI_IOP_PPC: 618 sc->sc_iop = &mfi_iop_ppc; 619 break; 620 default: 621 panic("%s: unknown iop %d", DEVNAME(sc), iop); 622 } 623 624 if (mfi_transition_firmware(sc)) 625 return 1; 626 627 TAILQ_INIT(&sc->sc_ccb_freeq); 628 629 status = mfi_fw_state(sc); 630 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK; 631 sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16; 632 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n", 633 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl); 634 635 /* consumer/producer and reply queue memory */ 636 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) + 637 sizeof(struct mfi_prod_cons)); 638 if (sc->sc_pcq == NULL) { 639 aprint_error("%s: unable to allocate reply queue memory\n", 640 DEVNAME(sc)); 641 goto nopcq; 642 } 643 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, 644 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons), 645 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 646 647 /* frame memory */ 648 /* we are not doing 64 bit IO so only calculate # of 32 bit frames */ 649 frames = (sizeof(struct mfi_sg32) * sc->sc_max_sgl + 650 MFI_FRAME_SIZE - 1) / MFI_FRAME_SIZE + 1; 651 sc->sc_frames_size = frames * MFI_FRAME_SIZE; 652 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds); 653 if (sc->sc_frames == NULL) { 654 aprint_error("%s: unable to allocate frame memory\n", 655 DEVNAME(sc)); 656 goto noframe; 657 } 658 /* XXX hack, fix this */ 659 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) { 660 aprint_error("%s: improper frame alignment (%#llx) FIXME\n", 661 DEVNAME(sc), (long long int)MFIMEM_DVA(sc->sc_frames)); 662 goto noframe; 663 } 664 665 /* sense memory */ 666 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE); 667 if (sc->sc_sense == NULL) { 668 aprint_error("%s: unable to allocate sense memory\n", 669 DEVNAME(sc)); 670 goto nosense; 671 } 672 673 /* now that we have all memory bits go initialize ccbs */ 674 if (mfi_init_ccb(sc)) { 675 aprint_error("%s: could not init ccb list\n", DEVNAME(sc)); 676 goto noinit; 677 } 678 679 /* kickstart firmware with all addresses and pointers */ 680 if (mfi_initialize_firmware(sc)) { 681 aprint_error("%s: could not initialize firmware\n", 682 DEVNAME(sc)); 683 goto noinit; 684 } 685 686 if (mfi_get_info(sc)) { 687 aprint_error("%s: could not retrieve controller information\n", 688 DEVNAME(sc)); 689 goto noinit; 690 } 691 692 aprint_normal("%s: logical drives %d, version %s, %dMB RAM\n", 693 DEVNAME(sc), 694 sc->sc_info.mci_lds_present, 695 sc->sc_info.mci_package_version, 696 sc->sc_info.mci_memory_size); 697 698 sc->sc_ld_cnt = sc->sc_info.mci_lds_present; 699 sc->sc_max_ld = sc->sc_ld_cnt; 700 for (i = 0; i < sc->sc_ld_cnt; i++) 701 sc->sc_ld[i].ld_present = 1; 702 703 memset(adapt, 0, sizeof(*adapt)); 704 adapt->adapt_dev = &sc->sc_dev; 705 adapt->adapt_nchannels = 1; 706 if (sc->sc_ld_cnt) 707 adapt->adapt_openings = sc->sc_max_cmds / sc->sc_ld_cnt; 708 else 709 adapt->adapt_openings = sc->sc_max_cmds; 710 adapt->adapt_max_periph = adapt->adapt_openings; 711 adapt->adapt_request = mfi_scsipi_request; 712 adapt->adapt_minphys = mfiminphys; 713 714 memset(chan, 0, sizeof(*chan)); 715 chan->chan_adapter = adapt; 716 chan->chan_bustype = &scsi_bustype; 717 chan->chan_channel = 0; 718 chan->chan_flags = 0; 719 chan->chan_nluns = 8; 720 chan->chan_ntargets = MFI_MAX_LD; 721 chan->chan_id = MFI_MAX_LD; 722 723 (void)config_found(&sc->sc_dev, &sc->sc_chan, scsiprint); 724 725 /* enable interrupts */ 726 mfi_intr_enable(sc); 727 728 #if NBIO > 0 729 if (bio_register(&sc->sc_dev, mfi_ioctl) != 0) 730 panic("%s: controller registration failed", DEVNAME(sc)); 731 if (mfi_create_sensors(sc) != 0) 732 aprint_error("%s: unable to create sensors\n", DEVNAME(sc)); 733 #endif /* NBIO > 0 */ 734 735 return 0; 736 noinit: 737 mfi_freemem(sc, sc->sc_sense); 738 nosense: 739 mfi_freemem(sc, sc->sc_frames); 740 noframe: 741 mfi_freemem(sc, sc->sc_pcq); 742 nopcq: 743 return 1; 744 } 745 746 static int 747 mfi_poll(struct mfi_ccb *ccb) 748 { 749 struct mfi_softc *sc = ccb->ccb_sc; 750 struct mfi_frame_header *hdr; 751 int to = 0; 752 753 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc)); 754 755 hdr = &ccb->ccb_frame->mfr_header; 756 hdr->mfh_cmd_status = 0xff; 757 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 758 759 mfi_post(sc, ccb); 760 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 761 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 762 sc->sc_frames_size, BUS_DMASYNC_POSTREAD); 763 764 while (hdr->mfh_cmd_status == 0xff) { 765 delay(1000); 766 if (to++ > 5000) /* XXX 5 seconds busywait sucks */ 767 break; 768 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 769 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 770 sc->sc_frames_size, BUS_DMASYNC_POSTREAD); 771 } 772 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 773 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 774 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 775 776 if (ccb->ccb_data != NULL) { 777 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n", 778 DEVNAME(sc)); 779 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 780 ccb->ccb_dmamap->dm_mapsize, 781 (ccb->ccb_direction & MFI_DATA_IN) ? 782 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 783 784 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 785 } 786 787 if (hdr->mfh_cmd_status == 0xff) { 788 printf("%s: timeout on ccb %d\n", DEVNAME(sc), 789 hdr->mfh_context); 790 ccb->ccb_flags |= MFI_CCB_F_ERR; 791 return 1; 792 } 793 794 return 0; 795 } 796 797 int 798 mfi_intr(void *arg) 799 { 800 struct mfi_softc *sc = arg; 801 struct mfi_prod_cons *pcq; 802 struct mfi_ccb *ccb; 803 uint32_t producer, consumer, ctx; 804 int claimed = 0; 805 806 if (!mfi_my_intr(sc)) 807 return 0; 808 809 pcq = MFIMEM_KVA(sc->sc_pcq); 810 811 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc), 812 (u_long)sc, (u_long)pcq); 813 814 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, 815 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons), 816 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 817 818 producer = pcq->mpc_producer; 819 consumer = pcq->mpc_consumer; 820 821 while (consumer != producer) { 822 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n", 823 DEVNAME(sc), producer, consumer); 824 825 ctx = pcq->mpc_reply_q[consumer]; 826 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX; 827 if (ctx == MFI_INVALID_CTX) 828 printf("%s: invalid context, p: %d c: %d\n", 829 DEVNAME(sc), producer, consumer); 830 else { 831 /* XXX remove from queue and call scsi_done */ 832 ccb = &sc->sc_ccb[ctx]; 833 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n", 834 DEVNAME(sc), ctx); 835 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 836 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 837 sc->sc_frames_size, 838 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 839 ccb->ccb_done(ccb); 840 841 claimed = 1; 842 } 843 consumer++; 844 if (consumer == (sc->sc_max_cmds + 1)) 845 consumer = 0; 846 } 847 848 pcq->mpc_consumer = consumer; 849 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, 850 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons), 851 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 852 853 return claimed; 854 } 855 856 static int 857 mfi_scsi_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint32_t blockno, 858 uint32_t blockcnt) 859 { 860 struct scsipi_periph *periph = xs->xs_periph; 861 struct mfi_io_frame *io; 862 863 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n", 864 device_xname(periph->periph_channel->chan_adapter->adapt_dev), 865 periph->periph_target); 866 867 if (!xs->data) 868 return 1; 869 870 io = &ccb->ccb_frame->mfr_io; 871 if (xs->xs_control & XS_CTL_DATA_IN) { 872 io->mif_header.mfh_cmd = MFI_CMD_LD_READ; 873 ccb->ccb_direction = MFI_DATA_IN; 874 } else { 875 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE; 876 ccb->ccb_direction = MFI_DATA_OUT; 877 } 878 io->mif_header.mfh_target_id = periph->periph_target; 879 io->mif_header.mfh_timeout = 0; 880 io->mif_header.mfh_flags = 0; 881 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE; 882 io->mif_header.mfh_data_len= blockcnt; 883 io->mif_lba_hi = 0; 884 io->mif_lba_lo = blockno; 885 io->mif_sense_addr_lo = htole32(ccb->ccb_psense); 886 io->mif_sense_addr_hi = 0; 887 888 ccb->ccb_done = mfi_scsi_xs_done; 889 ccb->ccb_xs = xs; 890 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE; 891 ccb->ccb_sgl = &io->mif_sgl; 892 ccb->ccb_data = xs->data; 893 ccb->ccb_len = xs->datalen; 894 895 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ? 896 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)) 897 return 1; 898 899 return 0; 900 } 901 902 static void 903 mfi_scsi_xs_done(struct mfi_ccb *ccb) 904 { 905 struct scsipi_xfer *xs = ccb->ccb_xs; 906 struct mfi_softc *sc = ccb->ccb_sc; 907 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 908 909 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n", 910 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame); 911 912 if (xs->data != NULL) { 913 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n", 914 DEVNAME(sc)); 915 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 916 ccb->ccb_dmamap->dm_mapsize, 917 (xs->xs_control & XS_CTL_DATA_IN) ? 918 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 919 920 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 921 } 922 923 if (hdr->mfh_cmd_status != MFI_STAT_OK) { 924 xs->error = XS_DRIVER_STUFFUP; 925 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n", 926 DEVNAME(sc), hdr->mfh_cmd_status); 927 928 if (hdr->mfh_scsi_status != 0) { 929 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense), 930 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense), 931 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD); 932 DNPRINTF(MFI_D_INTR, 933 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n", 934 DEVNAME(sc), hdr->mfh_scsi_status, 935 (u_long)&xs->sense, (u_long)ccb->ccb_sense); 936 memset(&xs->sense, 0, sizeof(xs->sense)); 937 memcpy(&xs->sense, ccb->ccb_sense, 938 sizeof(struct scsi_sense_data)); 939 xs->error = XS_SENSE; 940 } 941 } else { 942 xs->error = XS_NOERROR; 943 xs->status = SCSI_OK; 944 xs->resid = 0; 945 } 946 947 mfi_put_ccb(ccb); 948 scsipi_done(xs); 949 } 950 951 static int 952 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs) 953 { 954 struct mfi_pass_frame *pf; 955 struct scsipi_periph *periph = xs->xs_periph; 956 957 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n", 958 device_xname(periph->periph_channel->chan_adapter->adapt_dev), 959 periph->periph_target); 960 961 pf = &ccb->ccb_frame->mfr_pass; 962 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO; 963 pf->mpf_header.mfh_target_id = periph->periph_target; 964 pf->mpf_header.mfh_lun_id = 0; 965 pf->mpf_header.mfh_cdb_len = xs->cmdlen; 966 pf->mpf_header.mfh_timeout = 0; 967 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */ 968 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE; 969 970 pf->mpf_sense_addr_hi = 0; 971 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense); 972 973 memset(pf->mpf_cdb, 0, 16); 974 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen); 975 976 ccb->ccb_done = mfi_scsi_xs_done; 977 ccb->ccb_xs = xs; 978 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE; 979 ccb->ccb_sgl = &pf->mpf_sgl; 980 981 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) 982 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ? 983 MFI_DATA_IN : MFI_DATA_OUT; 984 else 985 ccb->ccb_direction = MFI_DATA_NONE; 986 987 if (xs->data) { 988 ccb->ccb_data = xs->data; 989 ccb->ccb_len = xs->datalen; 990 991 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ? 992 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)) 993 return 1; 994 } 995 996 return 0; 997 } 998 999 static void 1000 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1001 void *arg) 1002 { 1003 struct scsipi_periph *periph; 1004 struct scsipi_xfer *xs; 1005 struct scsipi_adapter *adapt = chan->chan_adapter; 1006 struct mfi_softc *sc = (void *) adapt->adapt_dev; 1007 struct mfi_ccb *ccb; 1008 struct scsi_rw_6 *rw; 1009 struct scsipi_rw_10 *rwb; 1010 uint32_t blockno, blockcnt; 1011 uint8_t target; 1012 uint8_t mbox[MFI_MBOX_SIZE]; 1013 int s; 1014 1015 switch (req) { 1016 case ADAPTER_REQ_GROW_RESOURCES: 1017 /* Not supported. */ 1018 return; 1019 case ADAPTER_REQ_SET_XFER_MODE: 1020 /* Not supported. */ 1021 return; 1022 case ADAPTER_REQ_RUN_XFER: 1023 break; 1024 } 1025 1026 xs = arg; 1027 1028 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x\n", 1029 DEVNAME(sc), req, xs->cmd->opcode); 1030 1031 periph = xs->xs_periph; 1032 target = periph->periph_target; 1033 1034 s = splbio(); 1035 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present || 1036 periph->periph_lun != 0) { 1037 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n", 1038 DEVNAME(sc), target); 1039 xs->error = XS_SELTIMEOUT; 1040 scsipi_done(xs); 1041 splx(s); 1042 return; 1043 } 1044 1045 if ((ccb = mfi_get_ccb(sc)) == NULL) { 1046 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc)); 1047 xs->error = XS_RESOURCE_SHORTAGE; 1048 scsipi_done(xs); 1049 splx(s); 1050 return; 1051 } 1052 1053 switch (xs->cmd->opcode) { 1054 /* IO path */ 1055 case READ_10: 1056 case WRITE_10: 1057 rwb = (struct scsipi_rw_10 *)xs->cmd; 1058 blockno = _4btol(rwb->addr); 1059 blockcnt = _2btol(rwb->length); 1060 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) { 1061 mfi_put_ccb(ccb); 1062 goto stuffup; 1063 } 1064 break; 1065 1066 case SCSI_READ_6_COMMAND: 1067 case SCSI_WRITE_6_COMMAND: 1068 rw = (struct scsi_rw_6 *)xs->cmd; 1069 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); 1070 blockcnt = rw->length ? rw->length : 0x100; 1071 if (mfi_scsi_io(ccb, xs, blockno, blockcnt)) { 1072 mfi_put_ccb(ccb); 1073 goto stuffup; 1074 } 1075 break; 1076 1077 case SCSI_SYNCHRONIZE_CACHE_10: 1078 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 1079 if (mfi_mgmt(ccb, xs, 1080 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) { 1081 mfi_put_ccb(ccb); 1082 goto stuffup; 1083 } 1084 break; 1085 1086 /* hand it of to the firmware and let it deal with it */ 1087 case SCSI_TEST_UNIT_READY: 1088 /* save off sd? after autoconf */ 1089 if (!cold) /* XXX bogus */ 1090 strlcpy(sc->sc_ld[target].ld_dev, device_xname(&sc->sc_dev), 1091 sizeof(sc->sc_ld[target].ld_dev)); 1092 /* FALLTHROUGH */ 1093 1094 default: 1095 if (mfi_scsi_ld(ccb, xs)) { 1096 mfi_put_ccb(ccb); 1097 goto stuffup; 1098 } 1099 break; 1100 } 1101 1102 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target); 1103 1104 if (xs->xs_control & XS_CTL_POLL) { 1105 if (mfi_poll(ccb)) { 1106 /* XXX check for sense in ccb->ccb_sense? */ 1107 printf("%s: mfi_scsipi_request poll failed\n", 1108 DEVNAME(sc)); 1109 mfi_put_ccb(ccb); 1110 bzero(&xs->sense, sizeof(xs->sense)); 1111 xs->sense.scsi_sense.response_code = 1112 SSD_RCODE_VALID | SSD_RCODE_CURRENT; 1113 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST; 1114 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */ 1115 xs->error = XS_SENSE; 1116 xs->status = SCSI_CHECK; 1117 } else { 1118 DNPRINTF(MFI_D_DMA, 1119 "%s: mfi_scsipi_request poll complete %d\n", 1120 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs); 1121 xs->error = XS_NOERROR; 1122 xs->status = SCSI_OK; 1123 xs->resid = 0; 1124 } 1125 mfi_put_ccb(ccb); 1126 scsipi_done(xs); 1127 splx(s); 1128 return; 1129 } 1130 1131 mfi_post(sc, ccb); 1132 1133 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc), 1134 ccb->ccb_dmamap->dm_nsegs); 1135 1136 splx(s); 1137 return; 1138 1139 stuffup: 1140 xs->error = XS_DRIVER_STUFFUP; 1141 scsipi_done(xs); 1142 splx(s); 1143 } 1144 1145 static int 1146 mfi_create_sgl(struct mfi_ccb *ccb, int flags) 1147 { 1148 struct mfi_softc *sc = ccb->ccb_sc; 1149 struct mfi_frame_header *hdr; 1150 bus_dma_segment_t *sgd; 1151 union mfi_sgl *sgl; 1152 int error, i; 1153 1154 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc), 1155 (u_long)ccb->ccb_data); 1156 1157 if (!ccb->ccb_data) 1158 return 1; 1159 1160 error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap, 1161 ccb->ccb_data, ccb->ccb_len, NULL, flags); 1162 if (error) { 1163 if (error == EFBIG) 1164 printf("more than %d dma segs\n", 1165 sc->sc_max_sgl); 1166 else 1167 printf("error %d loading dma map\n", error); 1168 return 1; 1169 } 1170 1171 hdr = &ccb->ccb_frame->mfr_header; 1172 sgl = ccb->ccb_sgl; 1173 sgd = ccb->ccb_dmamap->dm_segs; 1174 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) { 1175 sgl->sg32[i].addr = htole32(sgd[i].ds_addr); 1176 sgl->sg32[i].len = htole32(sgd[i].ds_len); 1177 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n", 1178 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len); 1179 } 1180 1181 if (ccb->ccb_direction == MFI_DATA_IN) { 1182 hdr->mfh_flags |= MFI_FRAME_DIR_READ; 1183 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1184 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1185 } else { 1186 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE; 1187 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1188 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1189 } 1190 1191 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs; 1192 /* for 64 bit io make the sizeof a variable to hold whatever sg size */ 1193 ccb->ccb_frame_size += sizeof(struct mfi_sg32) * 1194 ccb->ccb_dmamap->dm_nsegs; 1195 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE; 1196 1197 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d" 1198 " dm_nsegs: %d extra_frames: %d\n", 1199 DEVNAME(sc), 1200 hdr->mfh_sg_count, 1201 ccb->ccb_frame_size, 1202 sc->sc_frames_size, 1203 ccb->ccb_dmamap->dm_nsegs, 1204 ccb->ccb_extra_frames); 1205 1206 return 0; 1207 } 1208 1209 static int 1210 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir, 1211 uint32_t len, void *buf, uint8_t *mbox) { 1212 struct mfi_ccb *ccb; 1213 int rv = 1; 1214 1215 if ((ccb = mfi_get_ccb(sc)) == NULL) 1216 return rv; 1217 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox); 1218 if (rv) 1219 return rv; 1220 1221 if (cold) { 1222 if (mfi_poll(ccb)) 1223 goto done; 1224 } else { 1225 mfi_post(sc, ccb); 1226 1227 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n", 1228 DEVNAME(sc)); 1229 while (ccb->ccb_state != MFI_CCB_DONE) 1230 tsleep(ccb, PRIBIO, "mfi_mgmt", 0); 1231 1232 if (ccb->ccb_flags & MFI_CCB_F_ERR) 1233 goto done; 1234 } 1235 rv = 0; 1236 1237 done: 1238 mfi_put_ccb(ccb); 1239 return rv; 1240 } 1241 1242 static int 1243 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs, 1244 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox) 1245 { 1246 struct mfi_dcmd_frame *dcmd; 1247 1248 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc); 1249 1250 dcmd = &ccb->ccb_frame->mfr_dcmd; 1251 memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE); 1252 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD; 1253 dcmd->mdf_header.mfh_timeout = 0; 1254 1255 dcmd->mdf_opcode = opc; 1256 dcmd->mdf_header.mfh_data_len = 0; 1257 ccb->ccb_direction = dir; 1258 ccb->ccb_xs = xs; 1259 ccb->ccb_done = mfi_mgmt_done; 1260 1261 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE; 1262 1263 /* handle special opcodes */ 1264 if (mbox) 1265 memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE); 1266 1267 if (dir != MFI_DATA_NONE) { 1268 dcmd->mdf_header.mfh_data_len = len; 1269 ccb->ccb_data = buf; 1270 ccb->ccb_len = len; 1271 ccb->ccb_sgl = &dcmd->mdf_sgl; 1272 1273 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) 1274 return 1; 1275 } 1276 return 0; 1277 } 1278 1279 static void 1280 mfi_mgmt_done(struct mfi_ccb *ccb) 1281 { 1282 struct scsipi_xfer *xs = ccb->ccb_xs; 1283 struct mfi_softc *sc = ccb->ccb_sc; 1284 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 1285 1286 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n", 1287 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame); 1288 1289 if (ccb->ccb_data != NULL) { 1290 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n", 1291 DEVNAME(sc)); 1292 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0, 1293 ccb->ccb_dmamap->dm_mapsize, 1294 (ccb->ccb_direction & MFI_DATA_IN) ? 1295 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1296 1297 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap); 1298 } 1299 1300 if (hdr->mfh_cmd_status != MFI_STAT_OK) 1301 ccb->ccb_flags |= MFI_CCB_F_ERR; 1302 1303 ccb->ccb_state = MFI_CCB_DONE; 1304 if (xs) { 1305 if (hdr->mfh_cmd_status != MFI_STAT_OK) { 1306 xs->error = XS_DRIVER_STUFFUP; 1307 } else { 1308 xs->error = XS_NOERROR; 1309 xs->status = SCSI_OK; 1310 xs->resid = 0; 1311 } 1312 mfi_put_ccb(ccb); 1313 scsipi_done(xs); 1314 } else 1315 wakeup(ccb); 1316 } 1317 1318 #if NBIO > 0 1319 int 1320 mfi_ioctl(struct device *dev, u_long cmd, void *addr) 1321 { 1322 struct mfi_softc *sc = (struct mfi_softc *)dev; 1323 int error = 0; 1324 int s = splbio(); 1325 1326 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc)); 1327 1328 switch (cmd) { 1329 case BIOCINQ: 1330 DNPRINTF(MFI_D_IOCTL, "inq\n"); 1331 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr); 1332 break; 1333 1334 case BIOCVOL: 1335 DNPRINTF(MFI_D_IOCTL, "vol\n"); 1336 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr); 1337 break; 1338 1339 case BIOCDISK: 1340 DNPRINTF(MFI_D_IOCTL, "disk\n"); 1341 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr); 1342 break; 1343 1344 case BIOCALARM: 1345 DNPRINTF(MFI_D_IOCTL, "alarm\n"); 1346 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr); 1347 break; 1348 1349 case BIOCBLINK: 1350 DNPRINTF(MFI_D_IOCTL, "blink\n"); 1351 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr); 1352 break; 1353 1354 case BIOCSETSTATE: 1355 DNPRINTF(MFI_D_IOCTL, "setstate\n"); 1356 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr); 1357 break; 1358 1359 default: 1360 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n"); 1361 error = EINVAL; 1362 } 1363 splx(s); 1364 1365 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error); 1366 return error; 1367 } 1368 1369 static int 1370 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi) 1371 { 1372 struct mfi_conf *cfg; 1373 int rv = EINVAL; 1374 1375 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc)); 1376 1377 if (mfi_get_info(sc)) { 1378 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n", 1379 DEVNAME(sc)); 1380 return EIO; 1381 } 1382 1383 /* get figures */ 1384 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 1385 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1386 sizeof *cfg, cfg, NULL)) 1387 goto freeme; 1388 1389 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 1390 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs; 1391 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present; 1392 1393 rv = 0; 1394 freeme: 1395 free(cfg, M_DEVBUF); 1396 return rv; 1397 } 1398 1399 static int 1400 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv) 1401 { 1402 int i, per, rv = EINVAL; 1403 uint8_t mbox[MFI_MBOX_SIZE]; 1404 1405 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n", 1406 DEVNAME(sc), bv->bv_volid); 1407 1408 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN, 1409 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL)) 1410 goto done; 1411 1412 i = bv->bv_volid; 1413 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target; 1414 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n", 1415 DEVNAME(sc), mbox[0]); 1416 1417 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, 1418 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox)) 1419 goto done; 1420 1421 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) { 1422 /* go do hotspares */ 1423 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv); 1424 goto done; 1425 } 1426 1427 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev)); 1428 1429 switch(sc->sc_ld_list.mll_list[i].mll_state) { 1430 case MFI_LD_OFFLINE: 1431 bv->bv_status = BIOC_SVOFFLINE; 1432 break; 1433 1434 case MFI_LD_PART_DEGRADED: 1435 case MFI_LD_DEGRADED: 1436 bv->bv_status = BIOC_SVDEGRADED; 1437 break; 1438 1439 case MFI_LD_ONLINE: 1440 bv->bv_status = BIOC_SVONLINE; 1441 break; 1442 1443 default: 1444 bv->bv_status = BIOC_SVINVALID; 1445 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n", 1446 DEVNAME(sc), 1447 sc->sc_ld_list.mll_list[i].mll_state); 1448 } 1449 1450 /* additional status can modify MFI status */ 1451 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) { 1452 case MFI_LD_PROG_CC: 1453 case MFI_LD_PROG_BGI: 1454 bv->bv_status = BIOC_SVSCRUB; 1455 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress; 1456 bv->bv_percent = (per * 100) / 0xffff; 1457 bv->bv_seconds = 1458 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds; 1459 break; 1460 1461 case MFI_LD_PROG_FGI: 1462 case MFI_LD_PROG_RECONSTRUCT: 1463 /* nothing yet */ 1464 break; 1465 } 1466 1467 /* 1468 * The RAID levels are determined per the SNIA DDF spec, this is only 1469 * a subset that is valid for the MFI contrller. 1470 */ 1471 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid; 1472 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid == 1473 MFI_DDF_SRL_SPANNED) 1474 bv->bv_level *= 10; 1475 1476 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span * 1477 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth; 1478 1479 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */ 1480 1481 rv = 0; 1482 done: 1483 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n", 1484 DEVNAME(sc), rv); 1485 return rv; 1486 } 1487 1488 static int 1489 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd) 1490 { 1491 struct mfi_conf *cfg; 1492 struct mfi_array *ar; 1493 struct mfi_ld_cfg *ld; 1494 struct mfi_pd_details *pd; 1495 struct scsipi_inquiry_data *inqbuf; 1496 char vend[8+16+4+1]; 1497 int i, rv = EINVAL; 1498 int arr, vol, disk; 1499 uint32_t size; 1500 uint8_t mbox[MFI_MBOX_SIZE]; 1501 1502 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n", 1503 DEVNAME(sc), bd->bd_diskid); 1504 1505 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO); 1506 1507 /* send single element command to retrieve size for full structure */ 1508 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 1509 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1510 sizeof *cfg, cfg, NULL)) 1511 goto freeme; 1512 1513 size = cfg->mfc_size; 1514 free(cfg, M_DEVBUF); 1515 1516 /* memory for read config */ 1517 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); 1518 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1519 size, cfg, NULL)) 1520 goto freeme; 1521 1522 ar = cfg->mfc_array; 1523 1524 /* calculate offset to ld structure */ 1525 ld = (struct mfi_ld_cfg *)( 1526 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 1527 cfg->mfc_array_size * cfg->mfc_no_array); 1528 1529 vol = bd->bd_volid; 1530 1531 if (vol >= cfg->mfc_no_ld) { 1532 /* do hotspares */ 1533 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd); 1534 goto freeme; 1535 } 1536 1537 /* find corresponding array for ld */ 1538 for (i = 0, arr = 0; i < vol; i++) 1539 arr += ld[i].mlc_parm.mpa_span_depth; 1540 1541 /* offset disk into pd list */ 1542 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span; 1543 1544 /* offset array index into the next spans */ 1545 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span; 1546 1547 bd->bd_target = ar[arr].pd[disk].mar_enc_slot; 1548 switch (ar[arr].pd[disk].mar_pd_state){ 1549 case MFI_PD_UNCONFIG_GOOD: 1550 bd->bd_status = BIOC_SDUNUSED; 1551 break; 1552 1553 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */ 1554 bd->bd_status = BIOC_SDHOTSPARE; 1555 break; 1556 1557 case MFI_PD_OFFLINE: 1558 bd->bd_status = BIOC_SDOFFLINE; 1559 break; 1560 1561 case MFI_PD_FAILED: 1562 bd->bd_status = BIOC_SDFAILED; 1563 break; 1564 1565 case MFI_PD_REBUILD: 1566 bd->bd_status = BIOC_SDREBUILD; 1567 break; 1568 1569 case MFI_PD_ONLINE: 1570 bd->bd_status = BIOC_SDONLINE; 1571 break; 1572 1573 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */ 1574 default: 1575 bd->bd_status = BIOC_SDINVALID; 1576 break; 1577 1578 } 1579 1580 /* get the remaining fields */ 1581 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id; 1582 memset(pd, 0, sizeof(*pd)); 1583 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, 1584 sizeof *pd, pd, mbox)) 1585 goto freeme; 1586 1587 bd->bd_size = pd->mpd_size * 512; /* bytes per block */ 1588 1589 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */ 1590 bd->bd_channel = pd->mpd_enc_idx; 1591 1592 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data; 1593 memcpy(vend, inqbuf->vendor, sizeof vend - 1); 1594 vend[sizeof vend - 1] = '\0'; 1595 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor)); 1596 1597 /* XXX find a way to retrieve serial nr from drive */ 1598 /* XXX find a way to get bd_procdev */ 1599 1600 rv = 0; 1601 freeme: 1602 free(pd, M_DEVBUF); 1603 free(cfg, M_DEVBUF); 1604 1605 return rv; 1606 } 1607 1608 static int 1609 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba) 1610 { 1611 uint32_t opc, dir = MFI_DATA_NONE; 1612 int rv = 0; 1613 int8_t ret; 1614 1615 switch(ba->ba_opcode) { 1616 case BIOC_SADISABLE: 1617 opc = MR_DCMD_SPEAKER_DISABLE; 1618 break; 1619 1620 case BIOC_SAENABLE: 1621 opc = MR_DCMD_SPEAKER_ENABLE; 1622 break; 1623 1624 case BIOC_SASILENCE: 1625 opc = MR_DCMD_SPEAKER_SILENCE; 1626 break; 1627 1628 case BIOC_GASTATUS: 1629 opc = MR_DCMD_SPEAKER_GET; 1630 dir = MFI_DATA_IN; 1631 break; 1632 1633 case BIOC_SATEST: 1634 opc = MR_DCMD_SPEAKER_TEST; 1635 break; 1636 1637 default: 1638 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid " 1639 "opcode %x\n", DEVNAME(sc), ba->ba_opcode); 1640 return EINVAL; 1641 } 1642 1643 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL)) 1644 rv = EINVAL; 1645 else 1646 if (ba->ba_opcode == BIOC_GASTATUS) 1647 ba->ba_status = ret; 1648 else 1649 ba->ba_status = 0; 1650 1651 return rv; 1652 } 1653 1654 static int 1655 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb) 1656 { 1657 int i, found, rv = EINVAL; 1658 uint8_t mbox[MFI_MBOX_SIZE]; 1659 uint32_t cmd; 1660 struct mfi_pd_list *pd; 1661 1662 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc), 1663 bb->bb_status); 1664 1665 /* channel 0 means not in an enclosure so can't be blinked */ 1666 if (bb->bb_channel == 0) 1667 return EINVAL; 1668 1669 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); 1670 1671 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, 1672 MFI_PD_LIST_SIZE, pd, NULL)) 1673 goto done; 1674 1675 for (i = 0, found = 0; i < pd->mpl_no_pd; i++) 1676 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index && 1677 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) { 1678 found = 1; 1679 break; 1680 } 1681 1682 if (!found) 1683 goto done; 1684 1685 memset(mbox, 0, sizeof mbox); 1686 1687 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id; 1688 1689 switch (bb->bb_status) { 1690 case BIOC_SBUNBLINK: 1691 cmd = MR_DCMD_PD_UNBLINK; 1692 break; 1693 1694 case BIOC_SBBLINK: 1695 cmd = MR_DCMD_PD_BLINK; 1696 break; 1697 1698 case BIOC_SBALARM: 1699 default: 1700 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid " 1701 "opcode %x\n", DEVNAME(sc), bb->bb_status); 1702 goto done; 1703 } 1704 1705 1706 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox)) 1707 goto done; 1708 1709 rv = 0; 1710 done: 1711 free(pd, M_DEVBUF); 1712 return rv; 1713 } 1714 1715 static int 1716 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs) 1717 { 1718 struct mfi_pd_list *pd; 1719 int i, found, rv = EINVAL; 1720 uint8_t mbox[MFI_MBOX_SIZE]; 1721 uint32_t cmd; 1722 1723 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc), 1724 bs->bs_status); 1725 1726 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); 1727 1728 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, 1729 MFI_PD_LIST_SIZE, pd, NULL)) 1730 goto done; 1731 1732 for (i = 0, found = 0; i < pd->mpl_no_pd; i++) 1733 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index && 1734 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) { 1735 found = 1; 1736 break; 1737 } 1738 1739 if (!found) 1740 goto done; 1741 1742 memset(mbox, 0, sizeof mbox); 1743 1744 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id; 1745 1746 switch (bs->bs_status) { 1747 case BIOC_SSONLINE: 1748 mbox[2] = MFI_PD_ONLINE; 1749 cmd = MD_DCMD_PD_SET_STATE; 1750 break; 1751 1752 case BIOC_SSOFFLINE: 1753 mbox[2] = MFI_PD_OFFLINE; 1754 cmd = MD_DCMD_PD_SET_STATE; 1755 break; 1756 1757 case BIOC_SSHOTSPARE: 1758 mbox[2] = MFI_PD_HOTSPARE; 1759 cmd = MD_DCMD_PD_SET_STATE; 1760 break; 1761 /* 1762 case BIOC_SSREBUILD: 1763 cmd = MD_DCMD_PD_REBUILD; 1764 break; 1765 */ 1766 default: 1767 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid " 1768 "opcode %x\n", DEVNAME(sc), bs->bs_status); 1769 goto done; 1770 } 1771 1772 1773 if (mfi_mgmt_internal(sc, MD_DCMD_PD_SET_STATE, MFI_DATA_NONE, 1774 0, NULL, mbox)) 1775 goto done; 1776 1777 rv = 0; 1778 done: 1779 free(pd, M_DEVBUF); 1780 return rv; 1781 } 1782 1783 static int 1784 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs) 1785 { 1786 struct mfi_conf *cfg; 1787 struct mfi_hotspare *hs; 1788 struct mfi_pd_details *pd; 1789 struct bioc_disk *sdhs; 1790 struct bioc_vol *vdhs; 1791 struct scsipi_inquiry_data *inqbuf; 1792 char vend[8+16+4+1]; 1793 int i, rv = EINVAL; 1794 uint32_t size; 1795 uint8_t mbox[MFI_MBOX_SIZE]; 1796 1797 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid); 1798 1799 if (!bio_hs) 1800 return EINVAL; 1801 1802 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO); 1803 1804 /* send single element command to retrieve size for full structure */ 1805 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 1806 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1807 sizeof *cfg, cfg, NULL)) 1808 goto freeme; 1809 1810 size = cfg->mfc_size; 1811 free(cfg, M_DEVBUF); 1812 1813 /* memory for read config */ 1814 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); 1815 if (mfi_mgmt_internal(sc, MD_DCMD_CONF_GET, MFI_DATA_IN, 1816 size, cfg, NULL)) 1817 goto freeme; 1818 1819 /* calculate offset to hs structure */ 1820 hs = (struct mfi_hotspare *)( 1821 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 1822 cfg->mfc_array_size * cfg->mfc_no_array + 1823 cfg->mfc_ld_size * cfg->mfc_no_ld); 1824 1825 if (volid < cfg->mfc_no_ld) 1826 goto freeme; /* not a hotspare */ 1827 1828 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs)) 1829 goto freeme; /* not a hotspare */ 1830 1831 /* offset into hotspare structure */ 1832 i = volid - cfg->mfc_no_ld; 1833 1834 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d " 1835 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld, 1836 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id); 1837 1838 /* get pd fields */ 1839 memset(mbox, 0, sizeof mbox); 1840 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id; 1841 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, 1842 sizeof *pd, pd, mbox)) { 1843 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n", 1844 DEVNAME(sc)); 1845 goto freeme; 1846 } 1847 1848 switch (type) { 1849 case MFI_MGMT_VD: 1850 vdhs = bio_hs; 1851 vdhs->bv_status = BIOC_SVONLINE; 1852 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */ 1853 vdhs->bv_level = -1; /* hotspare */ 1854 vdhs->bv_nodisk = 1; 1855 break; 1856 1857 case MFI_MGMT_SD: 1858 sdhs = bio_hs; 1859 sdhs->bd_status = BIOC_SDHOTSPARE; 1860 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */ 1861 sdhs->bd_channel = pd->mpd_enc_idx; 1862 sdhs->bd_target = pd->mpd_enc_slot; 1863 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data; 1864 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1); 1865 vend[sizeof vend - 1] = '\0'; 1866 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor)); 1867 break; 1868 1869 default: 1870 goto freeme; 1871 } 1872 1873 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc)); 1874 rv = 0; 1875 freeme: 1876 free(pd, M_DEVBUF); 1877 free(cfg, M_DEVBUF); 1878 1879 return rv; 1880 } 1881 1882 static int 1883 mfi_create_sensors(struct mfi_softc *sc) 1884 { 1885 int i; 1886 int nsensors = sc->sc_ld_cnt; 1887 1888 sc->sc_sme = sysmon_envsys_create(); 1889 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors, 1890 M_DEVBUF, M_NOWAIT | M_ZERO); 1891 if (sc->sc_sensor == NULL) { 1892 aprint_error("%s: can't allocate envsys_data_t\n", 1893 DEVNAME(sc)); 1894 return ENOMEM; 1895 } 1896 1897 for (i = 0; i < nsensors; i++) { 1898 sc->sc_sensor[i].units = ENVSYS_DRIVE; 1899 sc->sc_sensor[i].monitor = true; 1900 /* Enable monitoring for drive state changes */ 1901 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED; 1902 /* logical drives */ 1903 snprintf(sc->sc_sensor[i].desc, 1904 sizeof(sc->sc_sensor[i].desc), "%s:%d", 1905 DEVNAME(sc), i); 1906 if (sysmon_envsys_sensor_attach(sc->sc_sme, 1907 &sc->sc_sensor[i])) 1908 goto out; 1909 } 1910 1911 sc->sc_sme->sme_name = DEVNAME(sc); 1912 sc->sc_sme->sme_cookie = sc; 1913 sc->sc_sme->sme_refresh = mfi_sensor_refresh; 1914 if (sysmon_envsys_register(sc->sc_sme)) { 1915 aprint_error("%s: unable to register with sysmon\n", 1916 DEVNAME(sc)); 1917 goto out; 1918 } 1919 return 0; 1920 1921 out: 1922 free(sc->sc_sensor, M_DEVBUF); 1923 sysmon_envsys_destroy(sc->sc_sme); 1924 return EINVAL; 1925 } 1926 1927 static void 1928 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata) 1929 { 1930 struct mfi_softc *sc = sme->sme_cookie; 1931 struct bioc_vol bv; 1932 int s; 1933 1934 if (edata->sensor >= sc->sc_ld_cnt) 1935 return; 1936 1937 bzero(&bv, sizeof(bv)); 1938 bv.bv_volid = edata->sensor; 1939 s = splbio(); 1940 if (mfi_ioctl_vol(sc, &bv)) { 1941 splx(s); 1942 return; 1943 } 1944 splx(s); 1945 1946 switch(bv.bv_status) { 1947 case BIOC_SVOFFLINE: 1948 edata->value_cur = ENVSYS_DRIVE_FAIL; 1949 edata->state = ENVSYS_SCRITICAL; 1950 break; 1951 1952 case BIOC_SVDEGRADED: 1953 edata->value_cur = ENVSYS_DRIVE_PFAIL; 1954 edata->state = ENVSYS_SCRITICAL; 1955 break; 1956 1957 case BIOC_SVSCRUB: 1958 case BIOC_SVONLINE: 1959 edata->value_cur = ENVSYS_DRIVE_ONLINE; 1960 edata->state = ENVSYS_SVALID; 1961 break; 1962 1963 case BIOC_SVINVALID: 1964 /* FALLTRHOUGH */ 1965 default: 1966 edata->value_cur = 0; /* unknown */ 1967 edata->state = ENVSYS_SINVALID; 1968 } 1969 } 1970 1971 #endif /* NBIO > 0 */ 1972 1973 static uint32_t 1974 mfi_xscale_fw_state(struct mfi_softc *sc) 1975 { 1976 return mfi_read(sc, MFI_OMSG0); 1977 } 1978 1979 static void 1980 mfi_xscale_intr_ena(struct mfi_softc *sc) 1981 { 1982 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR); 1983 } 1984 1985 static int 1986 mfi_xscale_intr(struct mfi_softc *sc) 1987 { 1988 uint32_t status; 1989 1990 status = mfi_read(sc, MFI_OSTS); 1991 if (!ISSET(status, MFI_OSTS_INTR_VALID)) 1992 return 0; 1993 1994 /* write status back to acknowledge interrupt */ 1995 mfi_write(sc, MFI_OSTS, status); 1996 return 1; 1997 } 1998 1999 static void 2000 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2001 { 2002 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 2003 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 2004 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2005 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense), 2006 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense), 2007 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD); 2008 2009 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) | 2010 ccb->ccb_extra_frames); 2011 } 2012 2013 static uint32_t 2014 mfi_ppc_fw_state(struct mfi_softc *sc) 2015 { 2016 return mfi_read(sc, MFI_OSP); 2017 } 2018 2019 static void 2020 mfi_ppc_intr_ena(struct mfi_softc *sc) 2021 { 2022 mfi_write(sc, MFI_ODC, 0xffffffff); 2023 mfi_write(sc, MFI_OMSK, ~0x80000004); 2024 } 2025 2026 static int 2027 mfi_ppc_intr(struct mfi_softc *sc) 2028 { 2029 uint32_t status; 2030 2031 status = mfi_read(sc, MFI_OSTS); 2032 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID)) 2033 return 0; 2034 2035 /* write status back to acknowledge interrupt */ 2036 mfi_write(sc, MFI_ODC, status); 2037 return 1; 2038 } 2039 2040 static void 2041 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2042 { 2043 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe | 2044 (ccb->ccb_extra_frames << 1)); 2045 } 2046