1 /* $NetBSD: mfi.c,v 1.63 2020/01/07 06:12:09 maxv Exp $ */ 2 /* $OpenBSD: mfi.c,v 1.66 2006/11/28 23:59:45 dlg Exp $ */ 3 4 /* 5 * Copyright (c) 2012 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us> 30 * 31 * Permission to use, copy, modify, and distribute this software for any 32 * purpose with or without fee is hereby granted, provided that the above 33 * copyright notice and this permission notice appear in all copies. 34 * 35 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 36 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 37 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 38 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 39 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 40 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 41 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 42 */ 43 44 /*- 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 49 * Copyright 1994-2009 The FreeBSD Project. 50 * All rights reserved. 51 * 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE FREEBSD PROJECT``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 60 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 61 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR 62 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 63 * EXEMPLARY,OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 64 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 65 * PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY THEORY 66 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 67 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 68 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 69 * 70 * The views and conclusions contained in the software and documentation 71 * are those of the authors and should not be interpreted as representing 72 * official policies,either expressed or implied, of the FreeBSD Project. 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: mfi.c,v 1.63 2020/01/07 06:12:09 maxv Exp $"); 77 78 #include "bio.h" 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/buf.h> 83 #include <sys/ioctl.h> 84 #include <sys/device.h> 85 #include <sys/kernel.h> 86 #include <sys/malloc.h> 87 #include <sys/proc.h> 88 #include <sys/cpu.h> 89 #include <sys/conf.h> 90 #include <sys/kauth.h> 91 92 #include <uvm/uvm_param.h> 93 94 #include <sys/bus.h> 95 96 #include <dev/scsipi/scsipi_all.h> 97 #include <dev/scsipi/scsi_all.h> 98 #include <dev/scsipi/scsi_spc.h> 99 #include <dev/scsipi/scsipi_disk.h> 100 #include <dev/scsipi/scsi_disk.h> 101 #include <dev/scsipi/scsiconf.h> 102 103 #include <dev/ic/mfireg.h> 104 #include <dev/ic/mfivar.h> 105 #include <dev/ic/mfiio.h> 106 107 #if NBIO > 0 108 #include <dev/biovar.h> 109 #endif /* NBIO > 0 */ 110 111 #include "ioconf.h" 112 113 #ifdef MFI_DEBUG 114 uint32_t mfi_debug = 0 115 /* | MFI_D_CMD */ 116 /* | MFI_D_INTR */ 117 /* | MFI_D_MISC */ 118 /* | MFI_D_DMA */ 119 /* | MFI_D_IOCTL */ 120 /* | MFI_D_RW */ 121 /* | MFI_D_MEM */ 122 /* | MFI_D_CCB */ 123 /* | MFI_D_SYNC */ 124 ; 125 #endif 126 127 static void mfi_scsipi_request(struct scsipi_channel *, 128 scsipi_adapter_req_t, void *); 129 static void mfiminphys(struct buf *bp); 130 131 static struct mfi_ccb *mfi_get_ccb(struct mfi_softc *); 132 static void mfi_put_ccb(struct mfi_ccb *); 133 static int mfi_init_ccb(struct mfi_softc *); 134 135 static struct mfi_mem *mfi_allocmem(struct mfi_softc *, size_t); 136 static void mfi_freemem(struct mfi_softc *, struct mfi_mem **); 137 138 static int mfi_transition_firmware(struct mfi_softc *); 139 static int mfi_initialize_firmware(struct mfi_softc *); 140 static int mfi_get_info(struct mfi_softc *); 141 static int mfi_get_bbu(struct mfi_softc *, 142 struct mfi_bbu_status *); 143 /* return codes for mfi_get_bbu */ 144 #define MFI_BBU_GOOD 0 145 #define MFI_BBU_BAD 1 146 #define MFI_BBU_UNKNOWN 2 147 static uint32_t mfi_read(struct mfi_softc *, bus_size_t); 148 static void mfi_write(struct mfi_softc *, bus_size_t, uint32_t); 149 static int mfi_poll(struct mfi_ccb *); 150 static int mfi_create_sgl(struct mfi_ccb *, int); 151 152 /* commands */ 153 static int mfi_scsi_ld(struct mfi_ccb *, struct scsipi_xfer *); 154 static int mfi_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *, 155 uint64_t, uint32_t); 156 static void mfi_scsi_ld_done(struct mfi_ccb *); 157 static void mfi_scsi_xs_done(struct mfi_ccb *, int, int); 158 static int mfi_mgmt_internal(struct mfi_softc *, uint32_t, 159 uint32_t, uint32_t, void *, uint8_t *, bool); 160 static int mfi_mgmt(struct mfi_ccb *,struct scsipi_xfer *, 161 uint32_t, uint32_t, uint32_t, void *, uint8_t *); 162 static void mfi_mgmt_done(struct mfi_ccb *); 163 164 #if NBIO > 0 165 static int mfi_ioctl(device_t, u_long, void *); 166 static int mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *); 167 static int mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *); 168 static int mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *); 169 static int mfi_ioctl_alarm(struct mfi_softc *, 170 struct bioc_alarm *); 171 static int mfi_ioctl_blink(struct mfi_softc *sc, 172 struct bioc_blink *); 173 static int mfi_ioctl_setstate(struct mfi_softc *, 174 struct bioc_setstate *); 175 static int mfi_bio_hs(struct mfi_softc *, int, int, void *); 176 static int mfi_create_sensors(struct mfi_softc *); 177 static int mfi_destroy_sensors(struct mfi_softc *); 178 static void mfi_sensor_refresh(struct sysmon_envsys *, 179 envsys_data_t *); 180 #endif /* NBIO > 0 */ 181 static bool mfi_shutdown(device_t, int); 182 static bool mfi_suspend(device_t, const pmf_qual_t *); 183 static bool mfi_resume(device_t, const pmf_qual_t *); 184 185 static dev_type_open(mfifopen); 186 static dev_type_close(mfifclose); 187 static dev_type_ioctl(mfifioctl); 188 const struct cdevsw mfi_cdevsw = { 189 .d_open = mfifopen, 190 .d_close = mfifclose, 191 .d_read = noread, 192 .d_write = nowrite, 193 .d_ioctl = mfifioctl, 194 .d_stop = nostop, 195 .d_tty = notty, 196 .d_poll = nopoll, 197 .d_mmap = nommap, 198 .d_kqfilter = nokqfilter, 199 .d_discard = nodiscard, 200 .d_flag = D_OTHER 201 }; 202 203 static uint32_t mfi_xscale_fw_state(struct mfi_softc *sc); 204 static void mfi_xscale_intr_ena(struct mfi_softc *sc); 205 static void mfi_xscale_intr_dis(struct mfi_softc *sc); 206 static int mfi_xscale_intr(struct mfi_softc *sc); 207 static void mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb); 208 209 static const struct mfi_iop_ops mfi_iop_xscale = { 210 mfi_xscale_fw_state, 211 mfi_xscale_intr_dis, 212 mfi_xscale_intr_ena, 213 mfi_xscale_intr, 214 mfi_xscale_post, 215 mfi_scsi_ld_io, 216 }; 217 218 static uint32_t mfi_ppc_fw_state(struct mfi_softc *sc); 219 static void mfi_ppc_intr_ena(struct mfi_softc *sc); 220 static void mfi_ppc_intr_dis(struct mfi_softc *sc); 221 static int mfi_ppc_intr(struct mfi_softc *sc); 222 static void mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb); 223 224 static const struct mfi_iop_ops mfi_iop_ppc = { 225 mfi_ppc_fw_state, 226 mfi_ppc_intr_dis, 227 mfi_ppc_intr_ena, 228 mfi_ppc_intr, 229 mfi_ppc_post, 230 mfi_scsi_ld_io, 231 }; 232 233 uint32_t mfi_gen2_fw_state(struct mfi_softc *sc); 234 void mfi_gen2_intr_ena(struct mfi_softc *sc); 235 void mfi_gen2_intr_dis(struct mfi_softc *sc); 236 int mfi_gen2_intr(struct mfi_softc *sc); 237 void mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb); 238 239 static const struct mfi_iop_ops mfi_iop_gen2 = { 240 mfi_gen2_fw_state, 241 mfi_gen2_intr_dis, 242 mfi_gen2_intr_ena, 243 mfi_gen2_intr, 244 mfi_gen2_post, 245 mfi_scsi_ld_io, 246 }; 247 248 u_int32_t mfi_skinny_fw_state(struct mfi_softc *); 249 void mfi_skinny_intr_dis(struct mfi_softc *); 250 void mfi_skinny_intr_ena(struct mfi_softc *); 251 int mfi_skinny_intr(struct mfi_softc *); 252 void mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *); 253 254 static const struct mfi_iop_ops mfi_iop_skinny = { 255 mfi_skinny_fw_state, 256 mfi_skinny_intr_dis, 257 mfi_skinny_intr_ena, 258 mfi_skinny_intr, 259 mfi_skinny_post, 260 mfi_scsi_ld_io, 261 }; 262 263 static int mfi_tbolt_init_desc_pool(struct mfi_softc *); 264 static int mfi_tbolt_init_MFI_queue(struct mfi_softc *); 265 static void mfi_tbolt_build_mpt_ccb(struct mfi_ccb *); 266 int mfi_tbolt_scsi_ld_io(struct mfi_ccb *, struct scsipi_xfer *, 267 uint64_t, uint32_t); 268 static void mfi_tbolt_scsi_ld_done(struct mfi_ccb *); 269 static int mfi_tbolt_create_sgl(struct mfi_ccb *, int); 270 void mfi_tbolt_sync_map_info(struct work *, void *); 271 static void mfi_sync_map_complete(struct mfi_ccb *); 272 273 u_int32_t mfi_tbolt_fw_state(struct mfi_softc *); 274 void mfi_tbolt_intr_dis(struct mfi_softc *); 275 void mfi_tbolt_intr_ena(struct mfi_softc *); 276 int mfi_tbolt_intr(struct mfi_softc *sc); 277 void mfi_tbolt_post(struct mfi_softc *, struct mfi_ccb *); 278 279 static const struct mfi_iop_ops mfi_iop_tbolt = { 280 mfi_tbolt_fw_state, 281 mfi_tbolt_intr_dis, 282 mfi_tbolt_intr_ena, 283 mfi_tbolt_intr, 284 mfi_tbolt_post, 285 mfi_tbolt_scsi_ld_io, 286 }; 287 288 #define mfi_fw_state(_s) ((_s)->sc_iop->mio_fw_state(_s)) 289 #define mfi_intr_enable(_s) ((_s)->sc_iop->mio_intr_ena(_s)) 290 #define mfi_intr_disable(_s) ((_s)->sc_iop->mio_intr_dis(_s)) 291 #define mfi_my_intr(_s) ((_s)->sc_iop->mio_intr(_s)) 292 #define mfi_post(_s, _c) ((_s)->sc_iop->mio_post((_s), (_c))) 293 294 static struct mfi_ccb * 295 mfi_get_ccb(struct mfi_softc *sc) 296 { 297 struct mfi_ccb *ccb; 298 int s; 299 300 s = splbio(); 301 ccb = TAILQ_FIRST(&sc->sc_ccb_freeq); 302 if (ccb) { 303 TAILQ_REMOVE(&sc->sc_ccb_freeq, ccb, ccb_link); 304 ccb->ccb_state = MFI_CCB_READY; 305 } 306 splx(s); 307 308 DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb); 309 if (__predict_false(ccb == NULL && sc->sc_running)) 310 aprint_error_dev(sc->sc_dev, "out of ccb\n"); 311 312 return ccb; 313 } 314 315 static void 316 mfi_put_ccb(struct mfi_ccb *ccb) 317 { 318 struct mfi_softc *sc = ccb->ccb_sc; 319 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 320 int s; 321 322 DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb); 323 324 hdr->mfh_cmd_status = 0x0; 325 hdr->mfh_flags = 0x0; 326 ccb->ccb_state = MFI_CCB_FREE; 327 ccb->ccb_xs = NULL; 328 ccb->ccb_flags = 0; 329 ccb->ccb_done = NULL; 330 ccb->ccb_direction = 0; 331 ccb->ccb_frame_size = 0; 332 ccb->ccb_extra_frames = 0; 333 ccb->ccb_sgl = NULL; 334 ccb->ccb_data = NULL; 335 ccb->ccb_len = 0; 336 if (sc->sc_ioptype == MFI_IOP_TBOLT) { 337 /* erase tb_request_desc but preserve SMID */ 338 int index = ccb->ccb_tb_request_desc.header.SMID; 339 ccb->ccb_tb_request_desc.words = 0; 340 ccb->ccb_tb_request_desc.header.SMID = index; 341 } 342 s = splbio(); 343 TAILQ_INSERT_TAIL(&sc->sc_ccb_freeq, ccb, ccb_link); 344 splx(s); 345 } 346 347 static int 348 mfi_destroy_ccb(struct mfi_softc *sc) 349 { 350 struct mfi_ccb *ccb; 351 uint32_t i; 352 353 DNPRINTF(MFI_D_CCB, "%s: mfi_destroy_ccb\n", DEVNAME(sc)); 354 355 356 for (i = 0; (ccb = mfi_get_ccb(sc)) != NULL; i++) { 357 /* create a dma map for transfer */ 358 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap); 359 } 360 361 if (i < sc->sc_max_cmds) 362 return EBUSY; 363 364 free(sc->sc_ccb, M_DEVBUF); 365 366 return 0; 367 } 368 369 static int 370 mfi_init_ccb(struct mfi_softc *sc) 371 { 372 struct mfi_ccb *ccb; 373 uint32_t i; 374 int error; 375 bus_addr_t io_req_base_phys; 376 uint8_t *io_req_base; 377 int offset; 378 379 DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc)); 380 381 sc->sc_ccb = malloc(sizeof(struct mfi_ccb) * sc->sc_max_cmds, 382 M_DEVBUF, M_WAITOK|M_ZERO); 383 if (sc->sc_ioptype == MFI_IOP_TBOLT) { 384 /* 385 * The first 256 bytes (SMID 0) is not used. 386 * Don't add to the cmd list. 387 */ 388 io_req_base = (uint8_t *)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool) + 389 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 390 io_req_base_phys = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + 391 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE; 392 } else { 393 io_req_base = NULL; /* XXX: gcc */ 394 io_req_base_phys = 0; /* XXX: gcc */ 395 } 396 397 for (i = 0; i < sc->sc_max_cmds; i++) { 398 ccb = &sc->sc_ccb[i]; 399 400 ccb->ccb_sc = sc; 401 402 /* select i'th frame */ 403 ccb->ccb_frame = (union mfi_frame *) 404 ((char*)MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i); 405 ccb->ccb_pframe = 406 MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i; 407 ccb->ccb_frame->mfr_header.mfh_context = i; 408 409 /* select i'th sense */ 410 ccb->ccb_sense = (struct mfi_sense *) 411 ((char*)MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i); 412 ccb->ccb_psense = 413 (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i); 414 415 /* create a dma map for transfer */ 416 error = bus_dmamap_create(sc->sc_datadmat, 417 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0, 418 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap); 419 if (error) { 420 aprint_error_dev(sc->sc_dev, 421 "cannot create ccb dmamap (%d)\n", error); 422 goto destroy; 423 } 424 if (sc->sc_ioptype == MFI_IOP_TBOLT) { 425 offset = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * i; 426 ccb->ccb_tb_io_request = 427 (struct mfi_mpi2_request_raid_scsi_io *) 428 (io_req_base + offset); 429 ccb->ccb_tb_pio_request = 430 io_req_base_phys + offset; 431 offset = MEGASAS_MAX_SZ_CHAIN_FRAME * i; 432 ccb->ccb_tb_sg_frame = 433 (mpi2_sge_io_union *)(sc->sc_reply_pool_limit + 434 offset); 435 ccb->ccb_tb_psg_frame = sc->sc_sg_frame_busaddr + 436 offset; 437 /* SMID 0 is reserved. Set SMID/index from 1 */ 438 ccb->ccb_tb_request_desc.header.SMID = i + 1; 439 } 440 441 DNPRINTF(MFI_D_CCB, 442 "ccb(%d): %p frame: %#lx (%#lx) sense: %#lx (%#lx) map: %#lx\n", 443 ccb->ccb_frame->mfr_header.mfh_context, ccb, 444 (u_long)ccb->ccb_frame, (u_long)ccb->ccb_pframe, 445 (u_long)ccb->ccb_sense, (u_long)ccb->ccb_psense, 446 (u_long)ccb->ccb_dmamap); 447 448 /* add ccb to queue */ 449 mfi_put_ccb(ccb); 450 } 451 452 return 0; 453 destroy: 454 /* free dma maps and ccb memory */ 455 while (i) { 456 i--; 457 ccb = &sc->sc_ccb[i]; 458 bus_dmamap_destroy(sc->sc_datadmat, ccb->ccb_dmamap); 459 } 460 461 free(sc->sc_ccb, M_DEVBUF); 462 463 return 1; 464 } 465 466 static uint32_t 467 mfi_read(struct mfi_softc *sc, bus_size_t r) 468 { 469 uint32_t rv; 470 471 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 472 BUS_SPACE_BARRIER_READ); 473 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 474 475 DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), (u_long)r, rv); 476 return rv; 477 } 478 479 static void 480 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v) 481 { 482 DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), (u_long)r, v); 483 484 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 485 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 486 BUS_SPACE_BARRIER_WRITE); 487 } 488 489 static struct mfi_mem * 490 mfi_allocmem(struct mfi_softc *sc, size_t size) 491 { 492 struct mfi_mem *mm; 493 int nsegs; 494 495 DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %ld\n", DEVNAME(sc), 496 (long)size); 497 498 mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_WAITOK|M_ZERO); 499 mm->am_size = size; 500 501 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 502 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0) 503 goto amfree; 504 505 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1, 506 &nsegs, BUS_DMA_NOWAIT) != 0) 507 goto destroy; 508 509 if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva, 510 BUS_DMA_NOWAIT) != 0) 511 goto free; 512 513 if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL, 514 BUS_DMA_NOWAIT) != 0) 515 goto unmap; 516 517 DNPRINTF(MFI_D_MEM, " kva: %p dva: %p map: %p\n", 518 mm->am_kva, (void *)mm->am_map->dm_segs[0].ds_addr, mm->am_map); 519 520 memset(mm->am_kva, 0, size); 521 return mm; 522 523 unmap: 524 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size); 525 free: 526 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1); 527 destroy: 528 bus_dmamap_destroy(sc->sc_dmat, mm->am_map); 529 amfree: 530 free(mm, M_DEVBUF); 531 532 return NULL; 533 } 534 535 static void 536 mfi_freemem(struct mfi_softc *sc, struct mfi_mem **mmp) 537 { 538 struct mfi_mem *mm = *mmp; 539 540 if (mm == NULL) 541 return; 542 543 *mmp = NULL; 544 545 DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm); 546 547 bus_dmamap_unload(sc->sc_dmat, mm->am_map); 548 bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size); 549 bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1); 550 bus_dmamap_destroy(sc->sc_dmat, mm->am_map); 551 free(mm, M_DEVBUF); 552 } 553 554 static int 555 mfi_transition_firmware(struct mfi_softc *sc) 556 { 557 uint32_t fw_state, cur_state; 558 int max_wait, i; 559 560 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK; 561 562 DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc), 563 fw_state); 564 565 while (fw_state != MFI_STATE_READY) { 566 DNPRINTF(MFI_D_MISC, 567 "%s: waiting for firmware to become ready\n", 568 DEVNAME(sc)); 569 cur_state = fw_state; 570 switch (fw_state) { 571 case MFI_STATE_FAULT: 572 aprint_error_dev(sc->sc_dev, "firmware fault\n"); 573 return 1; 574 case MFI_STATE_WAIT_HANDSHAKE: 575 if (sc->sc_ioptype == MFI_IOP_SKINNY || 576 sc->sc_ioptype == MFI_IOP_TBOLT) 577 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_CLEAR_HANDSHAKE); 578 else 579 mfi_write(sc, MFI_IDB, MFI_INIT_CLEAR_HANDSHAKE); 580 max_wait = 2; 581 break; 582 case MFI_STATE_OPERATIONAL: 583 if (sc->sc_ioptype == MFI_IOP_SKINNY || 584 sc->sc_ioptype == MFI_IOP_TBOLT) 585 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY); 586 else 587 mfi_write(sc, MFI_IDB, MFI_INIT_READY); 588 max_wait = 10; 589 break; 590 case MFI_STATE_UNDEFINED: 591 case MFI_STATE_BB_INIT: 592 max_wait = 2; 593 break; 594 case MFI_STATE_FW_INIT: 595 case MFI_STATE_DEVICE_SCAN: 596 case MFI_STATE_FLUSH_CACHE: 597 max_wait = 20; 598 break; 599 case MFI_STATE_BOOT_MESSAGE_PENDING: 600 if (sc->sc_ioptype == MFI_IOP_SKINNY || 601 sc->sc_ioptype == MFI_IOP_TBOLT) { 602 mfi_write(sc, MFI_SKINNY_IDB, MFI_INIT_HOTPLUG); 603 } else { 604 mfi_write(sc, MFI_IDB, MFI_INIT_HOTPLUG); 605 } 606 max_wait = 180; 607 break; 608 default: 609 aprint_error_dev(sc->sc_dev, 610 "unknown firmware state %d\n", fw_state); 611 return 1; 612 } 613 for (i = 0; i < (max_wait * 10); i++) { 614 fw_state = mfi_fw_state(sc) & MFI_STATE_MASK; 615 if (fw_state == cur_state) 616 DELAY(100000); 617 else 618 break; 619 } 620 if (fw_state == cur_state) { 621 aprint_error_dev(sc->sc_dev, 622 "firmware stuck in state %#x\n", fw_state); 623 return 1; 624 } 625 } 626 627 return 0; 628 } 629 630 static int 631 mfi_initialize_firmware(struct mfi_softc *sc) 632 { 633 struct mfi_ccb *ccb; 634 struct mfi_init_frame *init; 635 struct mfi_init_qinfo *qinfo; 636 637 DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc)); 638 639 if ((ccb = mfi_get_ccb(sc)) == NULL) 640 return 1; 641 642 init = &ccb->ccb_frame->mfr_init; 643 qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE); 644 645 memset(qinfo, 0, sizeof *qinfo); 646 qinfo->miq_rq_entries = sc->sc_max_cmds + 1; 647 qinfo->miq_rq_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) + 648 offsetof(struct mfi_prod_cons, mpc_reply_q)); 649 qinfo->miq_pi_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) + 650 offsetof(struct mfi_prod_cons, mpc_producer)); 651 qinfo->miq_ci_addr_lo = htole32(MFIMEM_DVA(sc->sc_pcq) + 652 offsetof(struct mfi_prod_cons, mpc_consumer)); 653 654 init->mif_header.mfh_cmd = MFI_CMD_INIT; 655 init->mif_header.mfh_data_len = sizeof *qinfo; 656 init->mif_qinfo_new_addr_lo = htole32(ccb->ccb_pframe + MFI_FRAME_SIZE); 657 658 DNPRINTF(MFI_D_MISC, "%s: entries: %#x rq: %#x pi: %#x ci: %#x\n", 659 DEVNAME(sc), 660 qinfo->miq_rq_entries, qinfo->miq_rq_addr_lo, 661 qinfo->miq_pi_addr_lo, qinfo->miq_ci_addr_lo); 662 663 if (mfi_poll(ccb)) { 664 aprint_error_dev(sc->sc_dev, 665 "mfi_initialize_firmware failed\n"); 666 return 1; 667 } 668 669 mfi_put_ccb(ccb); 670 671 return 0; 672 } 673 674 static int 675 mfi_get_info(struct mfi_softc *sc) 676 { 677 #ifdef MFI_DEBUG 678 int i; 679 #endif 680 DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc)); 681 682 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN, 683 sizeof(sc->sc_info), &sc->sc_info, NULL, cold ? true : false)) 684 return 1; 685 686 #ifdef MFI_DEBUG 687 688 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) { 689 printf("%s: active FW %s Version %s date %s time %s\n", 690 DEVNAME(sc), 691 sc->sc_info.mci_image_component[i].mic_name, 692 sc->sc_info.mci_image_component[i].mic_version, 693 sc->sc_info.mci_image_component[i].mic_build_date, 694 sc->sc_info.mci_image_component[i].mic_build_time); 695 } 696 697 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) { 698 printf("%s: pending FW %s Version %s date %s time %s\n", 699 DEVNAME(sc), 700 sc->sc_info.mci_pending_image_component[i].mic_name, 701 sc->sc_info.mci_pending_image_component[i].mic_version, 702 sc->sc_info.mci_pending_image_component[i].mic_build_date, 703 sc->sc_info.mci_pending_image_component[i].mic_build_time); 704 } 705 706 printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n", 707 DEVNAME(sc), 708 sc->sc_info.mci_max_arms, 709 sc->sc_info.mci_max_spans, 710 sc->sc_info.mci_max_arrays, 711 sc->sc_info.mci_max_lds, 712 sc->sc_info.mci_product_name); 713 714 printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n", 715 DEVNAME(sc), 716 sc->sc_info.mci_serial_number, 717 sc->sc_info.mci_hw_present, 718 sc->sc_info.mci_current_fw_time, 719 sc->sc_info.mci_max_cmds, 720 sc->sc_info.mci_max_sg_elements); 721 722 printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n", 723 DEVNAME(sc), 724 sc->sc_info.mci_max_request_size, 725 sc->sc_info.mci_lds_present, 726 sc->sc_info.mci_lds_degraded, 727 sc->sc_info.mci_lds_offline, 728 sc->sc_info.mci_pd_present); 729 730 printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n", 731 DEVNAME(sc), 732 sc->sc_info.mci_pd_disks_present, 733 sc->sc_info.mci_pd_disks_pred_failure, 734 sc->sc_info.mci_pd_disks_failed); 735 736 printf("%s: nvram %d mem %d flash %d\n", 737 DEVNAME(sc), 738 sc->sc_info.mci_nvram_size, 739 sc->sc_info.mci_memory_size, 740 sc->sc_info.mci_flash_size); 741 742 printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n", 743 DEVNAME(sc), 744 sc->sc_info.mci_ram_correctable_errors, 745 sc->sc_info.mci_ram_uncorrectable_errors, 746 sc->sc_info.mci_cluster_allowed, 747 sc->sc_info.mci_cluster_active); 748 749 printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n", 750 DEVNAME(sc), 751 sc->sc_info.mci_max_strips_per_io, 752 sc->sc_info.mci_raid_levels, 753 sc->sc_info.mci_adapter_ops, 754 sc->sc_info.mci_ld_ops); 755 756 printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n", 757 DEVNAME(sc), 758 sc->sc_info.mci_stripe_sz_ops.min, 759 sc->sc_info.mci_stripe_sz_ops.max, 760 sc->sc_info.mci_pd_ops, 761 sc->sc_info.mci_pd_mix_support); 762 763 printf("%s: ecc_bucket %d pckg_prop %s\n", 764 DEVNAME(sc), 765 sc->sc_info.mci_ecc_bucket_count, 766 sc->sc_info.mci_package_version); 767 768 printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n", 769 DEVNAME(sc), 770 sc->sc_info.mci_properties.mcp_seq_num, 771 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval, 772 sc->sc_info.mci_properties.mcp_intr_throttle_cnt, 773 sc->sc_info.mci_properties.mcp_intr_throttle_timeout); 774 775 printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n", 776 DEVNAME(sc), 777 sc->sc_info.mci_properties.mcp_rebuild_rate, 778 sc->sc_info.mci_properties.mcp_patrol_read_rate, 779 sc->sc_info.mci_properties.mcp_bgi_rate, 780 sc->sc_info.mci_properties.mcp_cc_rate); 781 782 printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n", 783 DEVNAME(sc), 784 sc->sc_info.mci_properties.mcp_recon_rate, 785 sc->sc_info.mci_properties.mcp_cache_flush_interval, 786 sc->sc_info.mci_properties.mcp_spinup_drv_cnt, 787 sc->sc_info.mci_properties.mcp_spinup_delay, 788 sc->sc_info.mci_properties.mcp_cluster_enable); 789 790 printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n", 791 DEVNAME(sc), 792 sc->sc_info.mci_properties.mcp_coercion_mode, 793 sc->sc_info.mci_properties.mcp_alarm_enable, 794 sc->sc_info.mci_properties.mcp_disable_auto_rebuild, 795 sc->sc_info.mci_properties.mcp_disable_battery_warn, 796 sc->sc_info.mci_properties.mcp_ecc_bucket_size); 797 798 printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n", 799 DEVNAME(sc), 800 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate, 801 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion, 802 sc->sc_info.mci_properties.mcp_expose_encl_devices); 803 804 printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n", 805 DEVNAME(sc), 806 sc->sc_info.mci_pci.mip_vendor, 807 sc->sc_info.mci_pci.mip_device, 808 sc->sc_info.mci_pci.mip_subvendor, 809 sc->sc_info.mci_pci.mip_subdevice); 810 811 printf("%s: type %#x port_count %d port_addr ", 812 DEVNAME(sc), 813 sc->sc_info.mci_host.mih_type, 814 sc->sc_info.mci_host.mih_port_count); 815 816 for (i = 0; i < 8; i++) 817 printf("%.0" PRIx64 " ", sc->sc_info.mci_host.mih_port_addr[i]); 818 printf("\n"); 819 820 printf("%s: type %.x port_count %d port_addr ", 821 DEVNAME(sc), 822 sc->sc_info.mci_device.mid_type, 823 sc->sc_info.mci_device.mid_port_count); 824 825 for (i = 0; i < 8; i++) { 826 printf("%.0" PRIx64 " ", 827 sc->sc_info.mci_device.mid_port_addr[i]); 828 } 829 printf("\n"); 830 #endif /* MFI_DEBUG */ 831 832 return 0; 833 } 834 835 static int 836 mfi_get_bbu(struct mfi_softc *sc, struct mfi_bbu_status *stat) 837 { 838 DNPRINTF(MFI_D_MISC, "%s: mfi_get_bbu\n", DEVNAME(sc)); 839 840 if (mfi_mgmt_internal(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN, 841 sizeof(*stat), stat, NULL, cold ? true : false)) 842 return MFI_BBU_UNKNOWN; 843 #ifdef MFI_DEBUG 844 printf("bbu type %d, voltage %d, current %d, temperature %d, " 845 "status 0x%x\n", stat->battery_type, stat->voltage, stat->current, 846 stat->temperature, stat->fw_status); 847 printf("details: "); 848 switch(stat->battery_type) { 849 case MFI_BBU_TYPE_IBBU: 850 printf("guage %d relative charge %d charger state %d " 851 "charger ctrl %d\n", stat->detail.ibbu.gas_guage_status, 852 stat->detail.ibbu.relative_charge , 853 stat->detail.ibbu.charger_system_state , 854 stat->detail.ibbu.charger_system_ctrl); 855 printf("\tcurrent %d abs charge %d max error %d\n", 856 stat->detail.ibbu.charging_current , 857 stat->detail.ibbu.absolute_charge , 858 stat->detail.ibbu.max_error); 859 break; 860 case MFI_BBU_TYPE_BBU: 861 printf("guage %d relative charge %d charger state %d\n", 862 stat->detail.ibbu.gas_guage_status, 863 stat->detail.bbu.relative_charge , 864 stat->detail.bbu.charger_status ); 865 printf("\trem capacity %d fyll capacity %d SOH %d\n", 866 stat->detail.bbu.remaining_capacity , 867 stat->detail.bbu.full_charge_capacity , 868 stat->detail.bbu.is_SOH_good); 869 break; 870 default: 871 printf("\n"); 872 } 873 #endif 874 switch(stat->battery_type) { 875 case MFI_BBU_TYPE_BBU: 876 return (stat->detail.bbu.is_SOH_good ? 877 MFI_BBU_GOOD : MFI_BBU_BAD); 878 case MFI_BBU_TYPE_NONE: 879 return MFI_BBU_UNKNOWN; 880 default: 881 if (stat->fw_status & 882 (MFI_BBU_STATE_PACK_MISSING | 883 MFI_BBU_STATE_VOLTAGE_LOW | 884 MFI_BBU_STATE_TEMPERATURE_HIGH | 885 MFI_BBU_STATE_LEARN_CYC_FAIL | 886 MFI_BBU_STATE_LEARN_CYC_TIMEOUT | 887 MFI_BBU_STATE_I2C_ERR_DETECT)) 888 return MFI_BBU_BAD; 889 return MFI_BBU_GOOD; 890 } 891 } 892 893 static void 894 mfiminphys(struct buf *bp) 895 { 896 DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount); 897 898 /* XXX currently using MFI_MAXFER = MAXPHYS */ 899 if (bp->b_bcount > MFI_MAXFER) 900 bp->b_bcount = MFI_MAXFER; 901 minphys(bp); 902 } 903 904 int 905 mfi_rescan(device_t self, const char *ifattr, const int *locators) 906 { 907 struct mfi_softc *sc = device_private(self); 908 909 if (sc->sc_child != NULL) 910 return 0; 911 912 sc->sc_child = config_found_sm_loc(self, ifattr, locators, &sc->sc_chan, 913 scsiprint, NULL); 914 915 return 0; 916 } 917 918 void 919 mfi_childdetached(device_t self, device_t child) 920 { 921 struct mfi_softc *sc = device_private(self); 922 923 KASSERT(self == sc->sc_dev); 924 KASSERT(child == sc->sc_child); 925 926 if (child == sc->sc_child) 927 sc->sc_child = NULL; 928 } 929 930 int 931 mfi_detach(struct mfi_softc *sc, int flags) 932 { 933 int error; 934 935 DNPRINTF(MFI_D_MISC, "%s: mfi_detach\n", DEVNAME(sc)); 936 937 if ((error = config_detach_children(sc->sc_dev, flags)) != 0) 938 return error; 939 940 #if NBIO > 0 941 mfi_destroy_sensors(sc); 942 bio_unregister(sc->sc_dev); 943 #endif /* NBIO > 0 */ 944 945 mfi_intr_disable(sc); 946 mfi_shutdown(sc->sc_dev, 0); 947 948 if (sc->sc_ioptype == MFI_IOP_TBOLT) { 949 workqueue_destroy(sc->sc_ldsync_wq); 950 mfi_put_ccb(sc->sc_ldsync_ccb); 951 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool); 952 mfi_freemem(sc, &sc->sc_tbolt_ioc_init); 953 mfi_freemem(sc, &sc->sc_tbolt_verbuf); 954 } 955 956 if ((error = mfi_destroy_ccb(sc)) != 0) 957 return error; 958 959 mfi_freemem(sc, &sc->sc_sense); 960 961 mfi_freemem(sc, &sc->sc_frames); 962 963 mfi_freemem(sc, &sc->sc_pcq); 964 965 return 0; 966 } 967 968 static bool 969 mfi_shutdown(device_t dev, int how) 970 { 971 struct mfi_softc *sc = device_private(dev); 972 uint8_t mbox[MFI_MBOX_SIZE]; 973 int s = splbio(); 974 DNPRINTF(MFI_D_MISC, "%s: mfi_shutdown\n", DEVNAME(sc)); 975 if (sc->sc_running) { 976 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 977 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_CACHE_FLUSH, 978 MFI_DATA_NONE, 0, NULL, mbox, true)) { 979 aprint_error_dev(dev, "shutdown: cache flush failed\n"); 980 goto fail; 981 } 982 983 mbox[0] = 0; 984 if (mfi_mgmt_internal(sc, MR_DCMD_CTRL_SHUTDOWN, 985 MFI_DATA_NONE, 0, NULL, mbox, true)) { 986 aprint_error_dev(dev, "shutdown: " 987 "firmware shutdown failed\n"); 988 goto fail; 989 } 990 sc->sc_running = false; 991 } 992 splx(s); 993 return true; 994 fail: 995 splx(s); 996 return false; 997 } 998 999 static bool 1000 mfi_suspend(device_t dev, const pmf_qual_t *q) 1001 { 1002 /* XXX to be implemented */ 1003 return false; 1004 } 1005 1006 static bool 1007 mfi_resume(device_t dev, const pmf_qual_t *q) 1008 { 1009 /* XXX to be implemented */ 1010 return false; 1011 } 1012 1013 int 1014 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop) 1015 { 1016 struct scsipi_adapter *adapt = &sc->sc_adapt; 1017 struct scsipi_channel *chan = &sc->sc_chan; 1018 uint32_t status, frames, max_sgl; 1019 int i; 1020 1021 DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc)); 1022 1023 sc->sc_ioptype = iop; 1024 1025 switch (iop) { 1026 case MFI_IOP_XSCALE: 1027 sc->sc_iop = &mfi_iop_xscale; 1028 break; 1029 case MFI_IOP_PPC: 1030 sc->sc_iop = &mfi_iop_ppc; 1031 break; 1032 case MFI_IOP_GEN2: 1033 sc->sc_iop = &mfi_iop_gen2; 1034 break; 1035 case MFI_IOP_SKINNY: 1036 sc->sc_iop = &mfi_iop_skinny; 1037 break; 1038 case MFI_IOP_TBOLT: 1039 sc->sc_iop = &mfi_iop_tbolt; 1040 break; 1041 default: 1042 panic("%s: unknown iop %d", DEVNAME(sc), iop); 1043 } 1044 1045 if (mfi_transition_firmware(sc)) 1046 return 1; 1047 1048 TAILQ_INIT(&sc->sc_ccb_freeq); 1049 1050 status = mfi_fw_state(sc); 1051 sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK; 1052 max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16; 1053 if (sc->sc_ioptype == MFI_IOP_TBOLT) { 1054 sc->sc_max_sgl = uimin(max_sgl, (128 * 1024) / PAGE_SIZE + 1); 1055 sc->sc_sgl_size = sizeof(struct mfi_sg_ieee); 1056 } else if (sc->sc_64bit_dma) { 1057 sc->sc_max_sgl = uimin(max_sgl, (128 * 1024) / PAGE_SIZE + 1); 1058 sc->sc_sgl_size = sizeof(struct mfi_sg64); 1059 } else { 1060 sc->sc_max_sgl = max_sgl; 1061 sc->sc_sgl_size = sizeof(struct mfi_sg32); 1062 } 1063 DNPRINTF(MFI_D_MISC, "%s: max commands: %u, max sgl: %u\n", 1064 DEVNAME(sc), sc->sc_max_cmds, sc->sc_max_sgl); 1065 1066 if (sc->sc_ioptype == MFI_IOP_TBOLT) { 1067 uint32_t tb_mem_size; 1068 /* for Alignment */ 1069 tb_mem_size = MEGASAS_THUNDERBOLT_MSG_ALLIGNMENT; 1070 1071 tb_mem_size += 1072 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1); 1073 sc->sc_reply_pool_size = 1074 ((sc->sc_max_cmds + 1 + 15) / 16) * 16; 1075 tb_mem_size += 1076 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size; 1077 1078 /* this is for SGL's */ 1079 tb_mem_size += MEGASAS_MAX_SZ_CHAIN_FRAME * sc->sc_max_cmds; 1080 sc->sc_tbolt_reqmsgpool = mfi_allocmem(sc, tb_mem_size); 1081 if (sc->sc_tbolt_reqmsgpool == NULL) { 1082 aprint_error_dev(sc->sc_dev, 1083 "unable to allocate thunderbolt " 1084 "request message pool\n"); 1085 goto nopcq; 1086 } 1087 if (mfi_tbolt_init_desc_pool(sc)) { 1088 aprint_error_dev(sc->sc_dev, 1089 "Thunderbolt pool preparation error\n"); 1090 goto nopcq; 1091 } 1092 1093 /* 1094 * Allocate DMA memory mapping for MPI2 IOC Init descriptor, 1095 * we are taking it diffrent from what we have allocated for 1096 * Request and reply descriptors to avoid confusion later 1097 */ 1098 sc->sc_tbolt_ioc_init = mfi_allocmem(sc, 1099 sizeof(struct mpi2_ioc_init_request)); 1100 if (sc->sc_tbolt_ioc_init == NULL) { 1101 aprint_error_dev(sc->sc_dev, 1102 "unable to allocate thunderbolt IOC init memory"); 1103 goto nopcq; 1104 } 1105 1106 sc->sc_tbolt_verbuf = mfi_allocmem(sc, 1107 MEGASAS_MAX_NAME*sizeof(bus_addr_t)); 1108 if (sc->sc_tbolt_verbuf == NULL) { 1109 aprint_error_dev(sc->sc_dev, 1110 "unable to allocate thunderbolt version buffer\n"); 1111 goto nopcq; 1112 } 1113 1114 } 1115 /* consumer/producer and reply queue memory */ 1116 sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) + 1117 sizeof(struct mfi_prod_cons)); 1118 if (sc->sc_pcq == NULL) { 1119 aprint_error_dev(sc->sc_dev, 1120 "unable to allocate reply queue memory\n"); 1121 goto nopcq; 1122 } 1123 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, 1124 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons), 1125 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1126 1127 /* frame memory */ 1128 frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) / 1129 MFI_FRAME_SIZE + 1; 1130 sc->sc_frames_size = frames * MFI_FRAME_SIZE; 1131 sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds); 1132 if (sc->sc_frames == NULL) { 1133 aprint_error_dev(sc->sc_dev, 1134 "unable to allocate frame memory\n"); 1135 goto noframe; 1136 } 1137 /* XXX hack, fix this */ 1138 if (MFIMEM_DVA(sc->sc_frames) & 0x3f) { 1139 aprint_error_dev(sc->sc_dev, 1140 "improper frame alignment (%#llx) FIXME\n", 1141 (long long int)MFIMEM_DVA(sc->sc_frames)); 1142 goto noframe; 1143 } 1144 1145 /* sense memory */ 1146 sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE); 1147 if (sc->sc_sense == NULL) { 1148 aprint_error_dev(sc->sc_dev, 1149 "unable to allocate sense memory\n"); 1150 goto nosense; 1151 } 1152 1153 /* now that we have all memory bits go initialize ccbs */ 1154 if (mfi_init_ccb(sc)) { 1155 aprint_error_dev(sc->sc_dev, "could not init ccb list\n"); 1156 goto noinit; 1157 } 1158 1159 /* kickstart firmware with all addresses and pointers */ 1160 if (sc->sc_ioptype == MFI_IOP_TBOLT) { 1161 if (mfi_tbolt_init_MFI_queue(sc)) { 1162 aprint_error_dev(sc->sc_dev, 1163 "could not initialize firmware\n"); 1164 goto noinit; 1165 } 1166 } else { 1167 if (mfi_initialize_firmware(sc)) { 1168 aprint_error_dev(sc->sc_dev, 1169 "could not initialize firmware\n"); 1170 goto noinit; 1171 } 1172 } 1173 sc->sc_running = true; 1174 1175 if (mfi_get_info(sc)) { 1176 aprint_error_dev(sc->sc_dev, 1177 "could not retrieve controller information\n"); 1178 goto noinit; 1179 } 1180 aprint_normal_dev(sc->sc_dev, 1181 "%s version %s\n", 1182 sc->sc_info.mci_product_name, 1183 sc->sc_info.mci_package_version); 1184 1185 1186 aprint_normal_dev(sc->sc_dev, "logical drives %d, %dMB RAM, ", 1187 sc->sc_info.mci_lds_present, 1188 sc->sc_info.mci_memory_size); 1189 sc->sc_bbuok = false; 1190 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) { 1191 struct mfi_bbu_status bbu_stat; 1192 int mfi_bbu_status = mfi_get_bbu(sc, &bbu_stat); 1193 aprint_normal("BBU type "); 1194 switch (bbu_stat.battery_type) { 1195 case MFI_BBU_TYPE_BBU: 1196 aprint_normal("BBU"); 1197 break; 1198 case MFI_BBU_TYPE_IBBU: 1199 aprint_normal("IBBU"); 1200 break; 1201 default: 1202 aprint_normal("unknown type %d", bbu_stat.battery_type); 1203 } 1204 aprint_normal(", status "); 1205 switch(mfi_bbu_status) { 1206 case MFI_BBU_GOOD: 1207 aprint_normal("good\n"); 1208 sc->sc_bbuok = true; 1209 break; 1210 case MFI_BBU_BAD: 1211 aprint_normal("bad\n"); 1212 break; 1213 case MFI_BBU_UNKNOWN: 1214 aprint_normal("unknown\n"); 1215 break; 1216 default: 1217 panic("mfi_bbu_status"); 1218 } 1219 } else { 1220 aprint_normal("BBU not present\n"); 1221 } 1222 1223 sc->sc_ld_cnt = sc->sc_info.mci_lds_present; 1224 sc->sc_max_ld = sc->sc_ld_cnt; 1225 for (i = 0; i < sc->sc_ld_cnt; i++) 1226 sc->sc_ld[i].ld_present = 1; 1227 1228 memset(adapt, 0, sizeof(*adapt)); 1229 adapt->adapt_dev = sc->sc_dev; 1230 adapt->adapt_nchannels = 1; 1231 /* keep a few commands for management */ 1232 if (sc->sc_max_cmds > 4) 1233 adapt->adapt_openings = sc->sc_max_cmds - 4; 1234 else 1235 adapt->adapt_openings = sc->sc_max_cmds; 1236 adapt->adapt_max_periph = adapt->adapt_openings; 1237 adapt->adapt_request = mfi_scsipi_request; 1238 adapt->adapt_minphys = mfiminphys; 1239 1240 memset(chan, 0, sizeof(*chan)); 1241 chan->chan_adapter = adapt; 1242 chan->chan_bustype = &scsi_sas_bustype; 1243 chan->chan_channel = 0; 1244 chan->chan_flags = 0; 1245 chan->chan_nluns = 8; 1246 chan->chan_ntargets = MFI_MAX_LD; 1247 chan->chan_id = MFI_MAX_LD; 1248 1249 mfi_rescan(sc->sc_dev, "scsi", NULL); 1250 1251 /* enable interrupts */ 1252 mfi_intr_enable(sc); 1253 1254 #if NBIO > 0 1255 if (bio_register(sc->sc_dev, mfi_ioctl) != 0) 1256 panic("%s: controller registration failed", DEVNAME(sc)); 1257 if (mfi_create_sensors(sc) != 0) 1258 aprint_error_dev(sc->sc_dev, "unable to create sensors\n"); 1259 #endif /* NBIO > 0 */ 1260 if (!pmf_device_register1(sc->sc_dev, mfi_suspend, mfi_resume, 1261 mfi_shutdown)) { 1262 aprint_error_dev(sc->sc_dev, 1263 "couldn't establish power handler\n"); 1264 } 1265 1266 return 0; 1267 noinit: 1268 mfi_freemem(sc, &sc->sc_sense); 1269 nosense: 1270 mfi_freemem(sc, &sc->sc_frames); 1271 noframe: 1272 mfi_freemem(sc, &sc->sc_pcq); 1273 nopcq: 1274 if (sc->sc_ioptype == MFI_IOP_TBOLT) { 1275 if (sc->sc_tbolt_reqmsgpool) 1276 mfi_freemem(sc, &sc->sc_tbolt_reqmsgpool); 1277 if (sc->sc_tbolt_verbuf) 1278 mfi_freemem(sc, &sc->sc_tbolt_verbuf); 1279 } 1280 return 1; 1281 } 1282 1283 static int 1284 mfi_poll(struct mfi_ccb *ccb) 1285 { 1286 struct mfi_softc *sc = ccb->ccb_sc; 1287 struct mfi_frame_header *hdr; 1288 int to = 0; 1289 int rv = 0; 1290 1291 DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc)); 1292 1293 hdr = &ccb->ccb_frame->mfr_header; 1294 hdr->mfh_cmd_status = 0xff; 1295 if (!sc->sc_MFA_enabled) 1296 hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 1297 1298 /* no callback, caller is supposed to do the cleanup */ 1299 ccb->ccb_done = NULL; 1300 1301 mfi_post(sc, ccb); 1302 if (sc->sc_MFA_enabled) { 1303 /* 1304 * depending on the command type, result may be posted 1305 * to *hdr, or not. In addition it seems there's 1306 * no way to avoid posting the SMID to the reply queue. 1307 * So pool using the interrupt routine. 1308 */ 1309 while (ccb->ccb_state != MFI_CCB_DONE) { 1310 delay(1000); 1311 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */ 1312 rv = 1; 1313 break; 1314 } 1315 mfi_tbolt_intrh(sc); 1316 } 1317 } else { 1318 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 1319 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 1320 sc->sc_frames_size, BUS_DMASYNC_POSTREAD); 1321 1322 while (hdr->mfh_cmd_status == 0xff) { 1323 delay(1000); 1324 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */ 1325 rv = 1; 1326 break; 1327 } 1328 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 1329 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 1330 sc->sc_frames_size, BUS_DMASYNC_POSTREAD); 1331 } 1332 } 1333 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 1334 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 1335 sc->sc_frames_size, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1336 1337 if (ccb->ccb_data != NULL) { 1338 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n", 1339 DEVNAME(sc)); 1340 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, 1341 ccb->ccb_dmamap->dm_mapsize, 1342 (ccb->ccb_direction & MFI_DATA_IN) ? 1343 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1344 1345 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap); 1346 } 1347 1348 if (rv != 0) { 1349 aprint_error_dev(sc->sc_dev, "timeout on ccb %d\n", 1350 hdr->mfh_context); 1351 ccb->ccb_flags |= MFI_CCB_F_ERR; 1352 return 1; 1353 } 1354 1355 return 0; 1356 } 1357 1358 int 1359 mfi_intr(void *arg) 1360 { 1361 struct mfi_softc *sc = arg; 1362 struct mfi_prod_cons *pcq; 1363 struct mfi_ccb *ccb; 1364 uint32_t producer, consumer, ctx; 1365 int claimed = 0; 1366 1367 if (!mfi_my_intr(sc)) 1368 return 0; 1369 1370 pcq = MFIMEM_KVA(sc->sc_pcq); 1371 1372 DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#lx %#lx\n", DEVNAME(sc), 1373 (u_long)sc, (u_long)pcq); 1374 1375 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, 1376 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons), 1377 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1378 1379 producer = pcq->mpc_producer; 1380 consumer = pcq->mpc_consumer; 1381 1382 while (consumer != producer) { 1383 DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n", 1384 DEVNAME(sc), producer, consumer); 1385 1386 ctx = pcq->mpc_reply_q[consumer]; 1387 pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX; 1388 if (ctx == MFI_INVALID_CTX) 1389 aprint_error_dev(sc->sc_dev, 1390 "invalid context, p: %d c: %d\n", 1391 producer, consumer); 1392 else { 1393 /* XXX remove from queue and call scsi_done */ 1394 ccb = &sc->sc_ccb[ctx]; 1395 DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n", 1396 DEVNAME(sc), ctx); 1397 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 1398 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 1399 sc->sc_frames_size, 1400 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1401 ccb->ccb_done(ccb); 1402 1403 claimed = 1; 1404 } 1405 consumer++; 1406 if (consumer == (sc->sc_max_cmds + 1)) 1407 consumer = 0; 1408 } 1409 1410 pcq->mpc_consumer = consumer; 1411 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq), 0, 1412 sizeof(uint32_t) * sc->sc_max_cmds + sizeof(struct mfi_prod_cons), 1413 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1414 1415 return claimed; 1416 } 1417 1418 static int 1419 mfi_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, uint64_t blockno, 1420 uint32_t blockcnt) 1421 { 1422 struct scsipi_periph *periph = xs->xs_periph; 1423 struct mfi_io_frame *io; 1424 1425 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld_io: %d\n", 1426 device_xname(periph->periph_channel->chan_adapter->adapt_dev), 1427 periph->periph_target); 1428 1429 if (!xs->data) 1430 return 1; 1431 1432 io = &ccb->ccb_frame->mfr_io; 1433 if (xs->xs_control & XS_CTL_DATA_IN) { 1434 io->mif_header.mfh_cmd = MFI_CMD_LD_READ; 1435 ccb->ccb_direction = MFI_DATA_IN; 1436 } else { 1437 io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE; 1438 ccb->ccb_direction = MFI_DATA_OUT; 1439 } 1440 io->mif_header.mfh_target_id = periph->periph_target; 1441 io->mif_header.mfh_timeout = 0; 1442 io->mif_header.mfh_flags = 0; 1443 io->mif_header.mfh_sense_len = MFI_SENSE_SIZE; 1444 io->mif_header.mfh_data_len= blockcnt; 1445 io->mif_lba_hi = (blockno >> 32); 1446 io->mif_lba_lo = (blockno & 0xffffffff); 1447 io->mif_sense_addr_lo = htole32(ccb->ccb_psense); 1448 io->mif_sense_addr_hi = 0; 1449 1450 ccb->ccb_done = mfi_scsi_ld_done; 1451 ccb->ccb_xs = xs; 1452 ccb->ccb_frame_size = MFI_IO_FRAME_SIZE; 1453 ccb->ccb_sgl = &io->mif_sgl; 1454 ccb->ccb_data = xs->data; 1455 ccb->ccb_len = xs->datalen; 1456 1457 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ? 1458 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)) 1459 return 1; 1460 1461 return 0; 1462 } 1463 1464 static void 1465 mfi_scsi_ld_done(struct mfi_ccb *ccb) 1466 { 1467 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 1468 mfi_scsi_xs_done(ccb, hdr->mfh_cmd_status, hdr->mfh_scsi_status); 1469 } 1470 1471 static void 1472 mfi_scsi_xs_done(struct mfi_ccb *ccb, int status, int scsi_status) 1473 { 1474 struct scsipi_xfer *xs = ccb->ccb_xs; 1475 struct mfi_softc *sc = ccb->ccb_sc; 1476 1477 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#lx %#lx\n", 1478 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame); 1479 1480 if (xs->data != NULL) { 1481 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done sync\n", 1482 DEVNAME(sc)); 1483 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, 1484 ccb->ccb_dmamap->dm_mapsize, 1485 (xs->xs_control & XS_CTL_DATA_IN) ? 1486 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1487 1488 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap); 1489 } 1490 1491 if (status != MFI_STAT_OK) { 1492 xs->error = XS_DRIVER_STUFFUP; 1493 DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done stuffup %#x\n", 1494 DEVNAME(sc), status); 1495 1496 if (scsi_status != 0) { 1497 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense), 1498 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense), 1499 MFI_SENSE_SIZE, BUS_DMASYNC_POSTREAD); 1500 DNPRINTF(MFI_D_INTR, 1501 "%s: mfi_scsi_xs_done sense %#x %lx %lx\n", 1502 DEVNAME(sc), scsi_status, 1503 (u_long)&xs->sense, (u_long)ccb->ccb_sense); 1504 memset(&xs->sense, 0, sizeof(xs->sense)); 1505 memcpy(&xs->sense, ccb->ccb_sense, 1506 sizeof(struct scsi_sense_data)); 1507 xs->error = XS_SENSE; 1508 } 1509 } else { 1510 xs->error = XS_NOERROR; 1511 xs->status = SCSI_OK; 1512 xs->resid = 0; 1513 } 1514 1515 mfi_put_ccb(ccb); 1516 scsipi_done(xs); 1517 } 1518 1519 static int 1520 mfi_scsi_ld(struct mfi_ccb *ccb, struct scsipi_xfer *xs) 1521 { 1522 struct mfi_pass_frame *pf; 1523 struct scsipi_periph *periph = xs->xs_periph; 1524 1525 DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n", 1526 device_xname(periph->periph_channel->chan_adapter->adapt_dev), 1527 periph->periph_target); 1528 1529 pf = &ccb->ccb_frame->mfr_pass; 1530 pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO; 1531 pf->mpf_header.mfh_target_id = periph->periph_target; 1532 pf->mpf_header.mfh_lun_id = 0; 1533 pf->mpf_header.mfh_cdb_len = xs->cmdlen; 1534 pf->mpf_header.mfh_timeout = 0; 1535 pf->mpf_header.mfh_data_len= xs->datalen; /* XXX */ 1536 pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE; 1537 1538 pf->mpf_sense_addr_hi = 0; 1539 pf->mpf_sense_addr_lo = htole32(ccb->ccb_psense); 1540 1541 memset(pf->mpf_cdb, 0, 16); 1542 memcpy(pf->mpf_cdb, &xs->cmdstore, xs->cmdlen); 1543 1544 ccb->ccb_done = mfi_scsi_ld_done; 1545 ccb->ccb_xs = xs; 1546 ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE; 1547 ccb->ccb_sgl = &pf->mpf_sgl; 1548 1549 if (xs->xs_control & (XS_CTL_DATA_IN | XS_CTL_DATA_OUT)) 1550 ccb->ccb_direction = (xs->xs_control & XS_CTL_DATA_IN) ? 1551 MFI_DATA_IN : MFI_DATA_OUT; 1552 else 1553 ccb->ccb_direction = MFI_DATA_NONE; 1554 1555 if (xs->data) { 1556 ccb->ccb_data = xs->data; 1557 ccb->ccb_len = xs->datalen; 1558 1559 if (mfi_create_sgl(ccb, (xs->xs_control & XS_CTL_NOSLEEP) ? 1560 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)) 1561 return 1; 1562 } 1563 1564 return 0; 1565 } 1566 1567 static void 1568 mfi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1569 void *arg) 1570 { 1571 struct scsipi_periph *periph; 1572 struct scsipi_xfer *xs; 1573 struct scsipi_adapter *adapt = chan->chan_adapter; 1574 struct mfi_softc *sc = device_private(adapt->adapt_dev); 1575 struct mfi_ccb *ccb; 1576 struct scsi_rw_6 *rw; 1577 struct scsipi_rw_10 *rwb; 1578 struct scsipi_rw_12 *rw12; 1579 struct scsipi_rw_16 *rw16; 1580 uint64_t blockno; 1581 uint32_t blockcnt; 1582 uint8_t target; 1583 uint8_t mbox[MFI_MBOX_SIZE]; 1584 int s; 1585 1586 switch (req) { 1587 case ADAPTER_REQ_GROW_RESOURCES: 1588 /* Not supported. */ 1589 return; 1590 case ADAPTER_REQ_SET_XFER_MODE: 1591 { 1592 struct scsipi_xfer_mode *xm = arg; 1593 xm->xm_mode = PERIPH_CAP_TQING; 1594 xm->xm_period = 0; 1595 xm->xm_offset = 0; 1596 scsipi_async_event(&sc->sc_chan, ASYNC_EVENT_XFER_MODE, xm); 1597 return; 1598 } 1599 case ADAPTER_REQ_RUN_XFER: 1600 break; 1601 } 1602 1603 xs = arg; 1604 1605 periph = xs->xs_periph; 1606 target = periph->periph_target; 1607 1608 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request req %d opcode: %#x " 1609 "target %d lun %d\n", DEVNAME(sc), req, xs->cmd->opcode, 1610 periph->periph_target, periph->periph_lun); 1611 1612 s = splbio(); 1613 if (target >= MFI_MAX_LD || !sc->sc_ld[target].ld_present || 1614 periph->periph_lun != 0) { 1615 DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n", 1616 DEVNAME(sc), target); 1617 xs->error = XS_SELTIMEOUT; 1618 scsipi_done(xs); 1619 splx(s); 1620 return; 1621 } 1622 if ((xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_10 || 1623 xs->cmd->opcode == SCSI_SYNCHRONIZE_CACHE_16) && sc->sc_bbuok) { 1624 /* the cache is stable storage, don't flush */ 1625 xs->error = XS_NOERROR; 1626 xs->status = SCSI_OK; 1627 xs->resid = 0; 1628 scsipi_done(xs); 1629 splx(s); 1630 return; 1631 } 1632 1633 if ((ccb = mfi_get_ccb(sc)) == NULL) { 1634 DNPRINTF(MFI_D_CMD, "%s: mfi_scsipi_request no ccb\n", DEVNAME(sc)); 1635 xs->error = XS_RESOURCE_SHORTAGE; 1636 scsipi_done(xs); 1637 splx(s); 1638 return; 1639 } 1640 1641 switch (xs->cmd->opcode) { 1642 /* IO path */ 1643 case READ_16: 1644 case WRITE_16: 1645 rw16 = (struct scsipi_rw_16 *)xs->cmd; 1646 blockno = _8btol(rw16->addr); 1647 blockcnt = _4btol(rw16->length); 1648 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) { 1649 goto stuffup; 1650 } 1651 break; 1652 1653 case READ_12: 1654 case WRITE_12: 1655 rw12 = (struct scsipi_rw_12 *)xs->cmd; 1656 blockno = _4btol(rw12->addr); 1657 blockcnt = _4btol(rw12->length); 1658 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) { 1659 goto stuffup; 1660 } 1661 break; 1662 1663 case READ_10: 1664 case WRITE_10: 1665 rwb = (struct scsipi_rw_10 *)xs->cmd; 1666 blockno = _4btol(rwb->addr); 1667 blockcnt = _2btol(rwb->length); 1668 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) { 1669 goto stuffup; 1670 } 1671 break; 1672 1673 case SCSI_READ_6_COMMAND: 1674 case SCSI_WRITE_6_COMMAND: 1675 rw = (struct scsi_rw_6 *)xs->cmd; 1676 blockno = _3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff); 1677 blockcnt = rw->length ? rw->length : 0x100; 1678 if (sc->sc_iop->mio_ld_io(ccb, xs, blockno, blockcnt)) { 1679 goto stuffup; 1680 } 1681 break; 1682 1683 case SCSI_SYNCHRONIZE_CACHE_10: 1684 case SCSI_SYNCHRONIZE_CACHE_16: 1685 mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 1686 if (mfi_mgmt(ccb, xs, 1687 MR_DCMD_CTRL_CACHE_FLUSH, MFI_DATA_NONE, 0, NULL, mbox)) { 1688 goto stuffup; 1689 } 1690 break; 1691 1692 /* hand it of to the firmware and let it deal with it */ 1693 case SCSI_TEST_UNIT_READY: 1694 /* save off sd? after autoconf */ 1695 if (!cold) /* XXX bogus */ 1696 strlcpy(sc->sc_ld[target].ld_dev, device_xname(sc->sc_dev), 1697 sizeof(sc->sc_ld[target].ld_dev)); 1698 /* FALLTHROUGH */ 1699 1700 default: 1701 if (mfi_scsi_ld(ccb, xs)) { 1702 goto stuffup; 1703 } 1704 break; 1705 } 1706 1707 DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target); 1708 1709 if (xs->xs_control & XS_CTL_POLL) { 1710 if (mfi_poll(ccb)) { 1711 /* XXX check for sense in ccb->ccb_sense? */ 1712 aprint_error_dev(sc->sc_dev, 1713 "mfi_scsipi_request poll failed\n"); 1714 memset(&xs->sense, 0, sizeof(xs->sense)); 1715 xs->sense.scsi_sense.response_code = 1716 SSD_RCODE_VALID | SSD_RCODE_CURRENT; 1717 xs->sense.scsi_sense.flags = SKEY_ILLEGAL_REQUEST; 1718 xs->sense.scsi_sense.asc = 0x20; /* invalid opcode */ 1719 xs->error = XS_SENSE; 1720 xs->status = SCSI_CHECK; 1721 } else { 1722 DNPRINTF(MFI_D_DMA, 1723 "%s: mfi_scsipi_request poll complete %d\n", 1724 DEVNAME(sc), ccb->ccb_dmamap->dm_nsegs); 1725 xs->error = XS_NOERROR; 1726 xs->status = SCSI_OK; 1727 xs->resid = 0; 1728 } 1729 mfi_put_ccb(ccb); 1730 scsipi_done(xs); 1731 splx(s); 1732 return; 1733 } 1734 1735 mfi_post(sc, ccb); 1736 1737 DNPRINTF(MFI_D_DMA, "%s: mfi_scsipi_request queued %d\n", DEVNAME(sc), 1738 ccb->ccb_dmamap->dm_nsegs); 1739 1740 splx(s); 1741 return; 1742 1743 stuffup: 1744 mfi_put_ccb(ccb); 1745 xs->error = XS_DRIVER_STUFFUP; 1746 scsipi_done(xs); 1747 splx(s); 1748 } 1749 1750 static int 1751 mfi_create_sgl(struct mfi_ccb *ccb, int flags) 1752 { 1753 struct mfi_softc *sc = ccb->ccb_sc; 1754 struct mfi_frame_header *hdr; 1755 bus_dma_segment_t *sgd; 1756 union mfi_sgl *sgl; 1757 int error, i; 1758 1759 DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#lx\n", DEVNAME(sc), 1760 (u_long)ccb->ccb_data); 1761 1762 if (!ccb->ccb_data) 1763 return 1; 1764 1765 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p()); 1766 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap, 1767 ccb->ccb_data, ccb->ccb_len, NULL, flags); 1768 if (error) { 1769 if (error == EFBIG) { 1770 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n", 1771 sc->sc_max_sgl); 1772 } else { 1773 aprint_error_dev(sc->sc_dev, 1774 "error %d loading dma map\n", error); 1775 } 1776 return 1; 1777 } 1778 1779 hdr = &ccb->ccb_frame->mfr_header; 1780 sgl = ccb->ccb_sgl; 1781 sgd = ccb->ccb_dmamap->dm_segs; 1782 for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) { 1783 if (sc->sc_ioptype == MFI_IOP_TBOLT && 1784 (hdr->mfh_cmd == MFI_CMD_PD_SCSI_IO || 1785 hdr->mfh_cmd == MFI_CMD_LD_READ || 1786 hdr->mfh_cmd == MFI_CMD_LD_WRITE)) { 1787 sgl->sg_ieee[i].addr = htole64(sgd[i].ds_addr); 1788 sgl->sg_ieee[i].len = htole32(sgd[i].ds_len); 1789 sgl->sg_ieee[i].flags = 0; 1790 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#" 1791 PRIx32 "\n", 1792 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len); 1793 hdr->mfh_flags |= MFI_FRAME_IEEE_SGL | MFI_FRAME_SGL64; 1794 } else if (sc->sc_64bit_dma) { 1795 sgl->sg64[i].addr = htole64(sgd[i].ds_addr); 1796 sgl->sg64[i].len = htole32(sgd[i].ds_len); 1797 DNPRINTF(MFI_D_DMA, "%s: addr: %#" PRIx64 " len: %#" 1798 PRIx32 "\n", 1799 DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len); 1800 hdr->mfh_flags |= MFI_FRAME_SGL64; 1801 } else { 1802 sgl->sg32[i].addr = htole32(sgd[i].ds_addr); 1803 sgl->sg32[i].len = htole32(sgd[i].ds_len); 1804 DNPRINTF(MFI_D_DMA, "%s: addr: %#x len: %#x\n", 1805 DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len); 1806 hdr->mfh_flags |= MFI_FRAME_SGL32; 1807 } 1808 } 1809 1810 if (ccb->ccb_direction == MFI_DATA_IN) { 1811 hdr->mfh_flags |= MFI_FRAME_DIR_READ; 1812 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, 1813 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1814 } else { 1815 hdr->mfh_flags |= MFI_FRAME_DIR_WRITE; 1816 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, 1817 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1818 } 1819 1820 hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs; 1821 ccb->ccb_frame_size += sc->sc_sgl_size * ccb->ccb_dmamap->dm_nsegs; 1822 ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE; 1823 1824 DNPRINTF(MFI_D_DMA, "%s: sg_count: %d frame_size: %d frames_size: %d" 1825 " dm_nsegs: %d extra_frames: %d\n", 1826 DEVNAME(sc), 1827 hdr->mfh_sg_count, 1828 ccb->ccb_frame_size, 1829 sc->sc_frames_size, 1830 ccb->ccb_dmamap->dm_nsegs, 1831 ccb->ccb_extra_frames); 1832 1833 return 0; 1834 } 1835 1836 static int 1837 mfi_mgmt_internal(struct mfi_softc *sc, uint32_t opc, uint32_t dir, 1838 uint32_t len, void *buf, uint8_t *mbox, bool poll) 1839 { 1840 struct mfi_ccb *ccb; 1841 int rv = 1; 1842 1843 if ((ccb = mfi_get_ccb(sc)) == NULL) 1844 return rv; 1845 rv = mfi_mgmt(ccb, NULL, opc, dir, len, buf, mbox); 1846 if (rv) 1847 return rv; 1848 1849 if (poll) { 1850 rv = 1; 1851 if (mfi_poll(ccb)) 1852 goto done; 1853 } else { 1854 mfi_post(sc, ccb); 1855 1856 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt_internal sleeping\n", 1857 DEVNAME(sc)); 1858 while (ccb->ccb_state != MFI_CCB_DONE) 1859 tsleep(ccb, PRIBIO, "mfi_mgmt", 0); 1860 1861 if (ccb->ccb_flags & MFI_CCB_F_ERR) 1862 goto done; 1863 } 1864 rv = 0; 1865 1866 done: 1867 mfi_put_ccb(ccb); 1868 return rv; 1869 } 1870 1871 static int 1872 mfi_mgmt(struct mfi_ccb *ccb, struct scsipi_xfer *xs, 1873 uint32_t opc, uint32_t dir, uint32_t len, void *buf, uint8_t *mbox) 1874 { 1875 struct mfi_dcmd_frame *dcmd; 1876 1877 DNPRINTF(MFI_D_MISC, "%s: mfi_mgmt %#x\n", DEVNAME(ccb->ccb_sc), opc); 1878 1879 dcmd = &ccb->ccb_frame->mfr_dcmd; 1880 memset(dcmd->mdf_mbox.b, 0, MFI_MBOX_SIZE); 1881 dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD; 1882 dcmd->mdf_header.mfh_timeout = 0; 1883 1884 dcmd->mdf_opcode = opc; 1885 dcmd->mdf_header.mfh_data_len = 0; 1886 ccb->ccb_direction = dir; 1887 ccb->ccb_xs = xs; 1888 ccb->ccb_done = mfi_mgmt_done; 1889 1890 ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE; 1891 1892 /* handle special opcodes */ 1893 if (mbox) 1894 memcpy(dcmd->mdf_mbox.b, mbox, MFI_MBOX_SIZE); 1895 1896 if (dir != MFI_DATA_NONE) { 1897 dcmd->mdf_header.mfh_data_len = len; 1898 ccb->ccb_data = buf; 1899 ccb->ccb_len = len; 1900 ccb->ccb_sgl = &dcmd->mdf_sgl; 1901 1902 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) 1903 return 1; 1904 } 1905 return 0; 1906 } 1907 1908 static void 1909 mfi_mgmt_done(struct mfi_ccb *ccb) 1910 { 1911 struct scsipi_xfer *xs = ccb->ccb_xs; 1912 struct mfi_softc *sc = ccb->ccb_sc; 1913 struct mfi_frame_header *hdr = &ccb->ccb_frame->mfr_header; 1914 1915 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done %#lx %#lx\n", 1916 DEVNAME(sc), (u_long)ccb, (u_long)ccb->ccb_frame); 1917 1918 if (ccb->ccb_data != NULL) { 1919 DNPRINTF(MFI_D_INTR, "%s: mfi_mgmt_done sync\n", 1920 DEVNAME(sc)); 1921 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, 1922 ccb->ccb_dmamap->dm_mapsize, 1923 (ccb->ccb_direction & MFI_DATA_IN) ? 1924 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1925 1926 bus_dmamap_unload(sc->sc_datadmat, ccb->ccb_dmamap); 1927 } 1928 1929 if (hdr->mfh_cmd_status != MFI_STAT_OK) 1930 ccb->ccb_flags |= MFI_CCB_F_ERR; 1931 1932 ccb->ccb_state = MFI_CCB_DONE; 1933 if (xs) { 1934 if (hdr->mfh_cmd_status != MFI_STAT_OK) { 1935 xs->error = XS_DRIVER_STUFFUP; 1936 } else { 1937 xs->error = XS_NOERROR; 1938 xs->status = SCSI_OK; 1939 xs->resid = 0; 1940 } 1941 mfi_put_ccb(ccb); 1942 scsipi_done(xs); 1943 } else 1944 wakeup(ccb); 1945 } 1946 1947 #if NBIO > 0 1948 int 1949 mfi_ioctl(device_t dev, u_long cmd, void *addr) 1950 { 1951 struct mfi_softc *sc = device_private(dev); 1952 int error = 0; 1953 int s; 1954 1955 KERNEL_LOCK(1, curlwp); 1956 s = splbio(); 1957 1958 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc)); 1959 1960 switch (cmd) { 1961 case BIOCINQ: 1962 DNPRINTF(MFI_D_IOCTL, "inq\n"); 1963 error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr); 1964 break; 1965 1966 case BIOCVOL: 1967 DNPRINTF(MFI_D_IOCTL, "vol\n"); 1968 error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr); 1969 break; 1970 1971 case BIOCDISK: 1972 DNPRINTF(MFI_D_IOCTL, "disk\n"); 1973 error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr); 1974 break; 1975 1976 case BIOCALARM: 1977 DNPRINTF(MFI_D_IOCTL, "alarm\n"); 1978 error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr); 1979 break; 1980 1981 case BIOCBLINK: 1982 DNPRINTF(MFI_D_IOCTL, "blink\n"); 1983 error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr); 1984 break; 1985 1986 case BIOCSETSTATE: 1987 DNPRINTF(MFI_D_IOCTL, "setstate\n"); 1988 error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr); 1989 break; 1990 1991 default: 1992 DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n"); 1993 error = EINVAL; 1994 } 1995 splx(s); 1996 KERNEL_UNLOCK_ONE(curlwp); 1997 1998 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl return %x\n", DEVNAME(sc), error); 1999 return error; 2000 } 2001 2002 static int 2003 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi) 2004 { 2005 struct mfi_conf *cfg; 2006 int rv = EINVAL; 2007 2008 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc)); 2009 2010 if (mfi_get_info(sc)) { 2011 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq failed\n", 2012 DEVNAME(sc)); 2013 return EIO; 2014 } 2015 2016 /* get figures */ 2017 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 2018 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, 2019 sizeof *cfg, cfg, NULL, false)) 2020 goto freeme; 2021 2022 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 2023 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs; 2024 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present; 2025 2026 rv = 0; 2027 freeme: 2028 free(cfg, M_DEVBUF); 2029 return rv; 2030 } 2031 2032 static int 2033 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv) 2034 { 2035 int i, per, rv = EINVAL; 2036 uint8_t mbox[MFI_MBOX_SIZE]; 2037 2038 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n", 2039 DEVNAME(sc), bv->bv_volid); 2040 2041 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN, 2042 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) 2043 goto done; 2044 2045 i = bv->bv_volid; 2046 mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target; 2047 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol target %#x\n", 2048 DEVNAME(sc), mbox[0]); 2049 2050 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, 2051 sizeof(sc->sc_ld_details), &sc->sc_ld_details, mbox, false)) 2052 goto done; 2053 2054 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) { 2055 /* go do hotspares */ 2056 rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv); 2057 goto done; 2058 } 2059 2060 strlcpy(bv->bv_dev, sc->sc_ld[i].ld_dev, sizeof(bv->bv_dev)); 2061 2062 switch(sc->sc_ld_list.mll_list[i].mll_state) { 2063 case MFI_LD_OFFLINE: 2064 bv->bv_status = BIOC_SVOFFLINE; 2065 break; 2066 2067 case MFI_LD_PART_DEGRADED: 2068 case MFI_LD_DEGRADED: 2069 bv->bv_status = BIOC_SVDEGRADED; 2070 break; 2071 2072 case MFI_LD_ONLINE: 2073 bv->bv_status = BIOC_SVONLINE; 2074 break; 2075 2076 default: 2077 bv->bv_status = BIOC_SVINVALID; 2078 DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n", 2079 DEVNAME(sc), 2080 sc->sc_ld_list.mll_list[i].mll_state); 2081 } 2082 2083 /* additional status can modify MFI status */ 2084 switch (sc->sc_ld_details.mld_progress.mlp_in_prog) { 2085 case MFI_LD_PROG_CC: 2086 case MFI_LD_PROG_BGI: 2087 bv->bv_status = BIOC_SVSCRUB; 2088 per = (int)sc->sc_ld_details.mld_progress.mlp_cc.mp_progress; 2089 bv->bv_percent = (per * 100) / 0xffff; 2090 bv->bv_seconds = 2091 sc->sc_ld_details.mld_progress.mlp_cc.mp_elapsed_seconds; 2092 break; 2093 2094 case MFI_LD_PROG_FGI: 2095 case MFI_LD_PROG_RECONSTRUCT: 2096 /* nothing yet */ 2097 break; 2098 } 2099 2100 /* 2101 * The RAID levels are determined per the SNIA DDF spec, this is only 2102 * a subset that is valid for the MFI contrller. 2103 */ 2104 bv->bv_level = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_pri_raid; 2105 if (sc->sc_ld_details.mld_cfg.mlc_parm.mpa_sec_raid == 2106 MFI_DDF_SRL_SPANNED) 2107 bv->bv_level *= 10; 2108 2109 bv->bv_nodisk = sc->sc_ld_details.mld_cfg.mlc_parm.mpa_no_drv_per_span * 2110 sc->sc_ld_details.mld_cfg.mlc_parm.mpa_span_depth; 2111 2112 bv->bv_size = sc->sc_ld_details.mld_size * 512; /* bytes per block */ 2113 2114 rv = 0; 2115 done: 2116 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol done %x\n", 2117 DEVNAME(sc), rv); 2118 return rv; 2119 } 2120 2121 static int 2122 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd) 2123 { 2124 struct mfi_conf *cfg; 2125 struct mfi_array *ar; 2126 struct mfi_ld_cfg *ld; 2127 struct mfi_pd_details *pd; 2128 struct scsipi_inquiry_data *inqbuf; 2129 char vend[8+16+4+1]; 2130 int i, rv = EINVAL; 2131 int arr, vol, disk; 2132 uint32_t size; 2133 uint8_t mbox[MFI_MBOX_SIZE]; 2134 2135 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n", 2136 DEVNAME(sc), bd->bd_diskid); 2137 2138 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO); 2139 2140 /* send single element command to retrieve size for full structure */ 2141 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 2142 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, 2143 sizeof *cfg, cfg, NULL, false)) 2144 goto freeme; 2145 2146 size = cfg->mfc_size; 2147 free(cfg, M_DEVBUF); 2148 2149 /* memory for read config */ 2150 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); 2151 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, 2152 size, cfg, NULL, false)) 2153 goto freeme; 2154 2155 ar = cfg->mfc_array; 2156 2157 /* calculate offset to ld structure */ 2158 ld = (struct mfi_ld_cfg *)( 2159 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 2160 cfg->mfc_array_size * cfg->mfc_no_array); 2161 2162 vol = bd->bd_volid; 2163 2164 if (vol >= cfg->mfc_no_ld) { 2165 /* do hotspares */ 2166 rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd); 2167 goto freeme; 2168 } 2169 2170 /* find corresponding array for ld */ 2171 for (i = 0, arr = 0; i < vol; i++) 2172 arr += ld[i].mlc_parm.mpa_span_depth; 2173 2174 /* offset disk into pd list */ 2175 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span; 2176 2177 /* offset array index into the next spans */ 2178 arr += bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span; 2179 2180 bd->bd_target = ar[arr].pd[disk].mar_enc_slot; 2181 switch (ar[arr].pd[disk].mar_pd_state){ 2182 case MFI_PD_UNCONFIG_GOOD: 2183 bd->bd_status = BIOC_SDUNUSED; 2184 break; 2185 2186 case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */ 2187 bd->bd_status = BIOC_SDHOTSPARE; 2188 break; 2189 2190 case MFI_PD_OFFLINE: 2191 bd->bd_status = BIOC_SDOFFLINE; 2192 break; 2193 2194 case MFI_PD_FAILED: 2195 bd->bd_status = BIOC_SDFAILED; 2196 break; 2197 2198 case MFI_PD_REBUILD: 2199 bd->bd_status = BIOC_SDREBUILD; 2200 break; 2201 2202 case MFI_PD_ONLINE: 2203 bd->bd_status = BIOC_SDONLINE; 2204 break; 2205 2206 case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */ 2207 default: 2208 bd->bd_status = BIOC_SDINVALID; 2209 break; 2210 2211 } 2212 2213 /* get the remaining fields */ 2214 *((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id; 2215 memset(pd, 0, sizeof(*pd)); 2216 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, 2217 sizeof *pd, pd, mbox, false)) 2218 goto freeme; 2219 2220 bd->bd_size = pd->mpd_size * 512; /* bytes per block */ 2221 2222 /* if pd->mpd_enc_idx is 0 then it is not in an enclosure */ 2223 bd->bd_channel = pd->mpd_enc_idx; 2224 2225 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data; 2226 memcpy(vend, inqbuf->vendor, sizeof vend - 1); 2227 vend[sizeof vend - 1] = '\0'; 2228 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor)); 2229 2230 /* XXX find a way to retrieve serial nr from drive */ 2231 /* XXX find a way to get bd_procdev */ 2232 2233 rv = 0; 2234 freeme: 2235 free(pd, M_DEVBUF); 2236 free(cfg, M_DEVBUF); 2237 2238 return rv; 2239 } 2240 2241 static int 2242 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba) 2243 { 2244 uint32_t opc, dir = MFI_DATA_NONE; 2245 int rv = 0; 2246 int8_t ret; 2247 2248 switch(ba->ba_opcode) { 2249 case BIOC_SADISABLE: 2250 opc = MR_DCMD_SPEAKER_DISABLE; 2251 break; 2252 2253 case BIOC_SAENABLE: 2254 opc = MR_DCMD_SPEAKER_ENABLE; 2255 break; 2256 2257 case BIOC_SASILENCE: 2258 opc = MR_DCMD_SPEAKER_SILENCE; 2259 break; 2260 2261 case BIOC_GASTATUS: 2262 opc = MR_DCMD_SPEAKER_GET; 2263 dir = MFI_DATA_IN; 2264 break; 2265 2266 case BIOC_SATEST: 2267 opc = MR_DCMD_SPEAKER_TEST; 2268 break; 2269 2270 default: 2271 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid " 2272 "opcode %x\n", DEVNAME(sc), ba->ba_opcode); 2273 return EINVAL; 2274 } 2275 2276 if (mfi_mgmt_internal(sc, opc, dir, sizeof(ret), &ret, NULL, false)) 2277 rv = EINVAL; 2278 else 2279 if (ba->ba_opcode == BIOC_GASTATUS) 2280 ba->ba_status = ret; 2281 else 2282 ba->ba_status = 0; 2283 2284 return rv; 2285 } 2286 2287 static int 2288 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb) 2289 { 2290 int i, found, rv = EINVAL; 2291 uint8_t mbox[MFI_MBOX_SIZE]; 2292 uint32_t cmd; 2293 struct mfi_pd_list *pd; 2294 2295 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc), 2296 bb->bb_status); 2297 2298 /* channel 0 means not in an enclosure so can't be blinked */ 2299 if (bb->bb_channel == 0) 2300 return EINVAL; 2301 2302 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); 2303 2304 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, 2305 MFI_PD_LIST_SIZE, pd, NULL, false)) 2306 goto done; 2307 2308 for (i = 0, found = 0; i < pd->mpl_no_pd; i++) 2309 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index && 2310 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) { 2311 found = 1; 2312 break; 2313 } 2314 2315 if (!found) 2316 goto done; 2317 2318 memset(mbox, 0, sizeof mbox); 2319 2320 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id; 2321 2322 switch (bb->bb_status) { 2323 case BIOC_SBUNBLINK: 2324 cmd = MR_DCMD_PD_UNBLINK; 2325 break; 2326 2327 case BIOC_SBBLINK: 2328 cmd = MR_DCMD_PD_BLINK; 2329 break; 2330 2331 case BIOC_SBALARM: 2332 default: 2333 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid " 2334 "opcode %x\n", DEVNAME(sc), bb->bb_status); 2335 goto done; 2336 } 2337 2338 2339 if (mfi_mgmt_internal(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox, false)) 2340 goto done; 2341 2342 rv = 0; 2343 done: 2344 free(pd, M_DEVBUF); 2345 return rv; 2346 } 2347 2348 static int 2349 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs) 2350 { 2351 struct mfi_pd_list *pd; 2352 int i, found, rv = EINVAL; 2353 uint8_t mbox[MFI_MBOX_SIZE]; 2354 2355 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc), 2356 bs->bs_status); 2357 2358 pd = malloc(MFI_PD_LIST_SIZE, M_DEVBUF, M_WAITOK); 2359 2360 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN, 2361 MFI_PD_LIST_SIZE, pd, NULL, false)) 2362 goto done; 2363 2364 for (i = 0, found = 0; i < pd->mpl_no_pd; i++) 2365 if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index && 2366 bs->bs_target == pd->mpl_address[i].mpa_enc_slot) { 2367 found = 1; 2368 break; 2369 } 2370 2371 if (!found) 2372 goto done; 2373 2374 memset(mbox, 0, sizeof mbox); 2375 2376 *((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id; 2377 2378 switch (bs->bs_status) { 2379 case BIOC_SSONLINE: 2380 mbox[2] = MFI_PD_ONLINE; 2381 break; 2382 2383 case BIOC_SSOFFLINE: 2384 mbox[2] = MFI_PD_OFFLINE; 2385 break; 2386 2387 case BIOC_SSHOTSPARE: 2388 mbox[2] = MFI_PD_HOTSPARE; 2389 break; 2390 /* 2391 case BIOC_SSREBUILD: 2392 break; 2393 */ 2394 default: 2395 DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid " 2396 "opcode %x\n", DEVNAME(sc), bs->bs_status); 2397 goto done; 2398 } 2399 2400 2401 if (mfi_mgmt_internal(sc, MR_DCMD_PD_SET_STATE, MFI_DATA_NONE, 2402 0, NULL, mbox, false)) 2403 goto done; 2404 2405 rv = 0; 2406 done: 2407 free(pd, M_DEVBUF); 2408 return rv; 2409 } 2410 2411 static int 2412 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs) 2413 { 2414 struct mfi_conf *cfg; 2415 struct mfi_hotspare *hs; 2416 struct mfi_pd_details *pd; 2417 struct bioc_disk *sdhs; 2418 struct bioc_vol *vdhs; 2419 struct scsipi_inquiry_data *inqbuf; 2420 char vend[8+16+4+1]; 2421 int i, rv = EINVAL; 2422 uint32_t size; 2423 uint8_t mbox[MFI_MBOX_SIZE]; 2424 2425 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid); 2426 2427 if (!bio_hs) 2428 return EINVAL; 2429 2430 pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK | M_ZERO); 2431 2432 /* send single element command to retrieve size for full structure */ 2433 cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK); 2434 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, 2435 sizeof *cfg, cfg, NULL, false)) 2436 goto freeme; 2437 2438 size = cfg->mfc_size; 2439 free(cfg, M_DEVBUF); 2440 2441 /* memory for read config */ 2442 cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO); 2443 if (mfi_mgmt_internal(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, 2444 size, cfg, NULL, false)) 2445 goto freeme; 2446 2447 /* calculate offset to hs structure */ 2448 hs = (struct mfi_hotspare *)( 2449 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) + 2450 cfg->mfc_array_size * cfg->mfc_no_array + 2451 cfg->mfc_ld_size * cfg->mfc_no_ld); 2452 2453 if (volid < cfg->mfc_no_ld) 2454 goto freeme; /* not a hotspare */ 2455 2456 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs)) 2457 goto freeme; /* not a hotspare */ 2458 2459 /* offset into hotspare structure */ 2460 i = volid - cfg->mfc_no_ld; 2461 2462 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d " 2463 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld, 2464 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id); 2465 2466 /* get pd fields */ 2467 memset(mbox, 0, sizeof mbox); 2468 *((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id; 2469 if (mfi_mgmt_internal(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN, 2470 sizeof *pd, pd, mbox, false)) { 2471 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n", 2472 DEVNAME(sc)); 2473 goto freeme; 2474 } 2475 2476 switch (type) { 2477 case MFI_MGMT_VD: 2478 vdhs = bio_hs; 2479 vdhs->bv_status = BIOC_SVONLINE; 2480 vdhs->bv_size = pd->mpd_size * 512; /* bytes per block */ 2481 vdhs->bv_level = -1; /* hotspare */ 2482 vdhs->bv_nodisk = 1; 2483 break; 2484 2485 case MFI_MGMT_SD: 2486 sdhs = bio_hs; 2487 sdhs->bd_status = BIOC_SDHOTSPARE; 2488 sdhs->bd_size = pd->mpd_size * 512; /* bytes per block */ 2489 sdhs->bd_channel = pd->mpd_enc_idx; 2490 sdhs->bd_target = pd->mpd_enc_slot; 2491 inqbuf = (struct scsipi_inquiry_data *)&pd->mpd_inq_data; 2492 memcpy(vend, inqbuf->vendor, sizeof(vend) - 1); 2493 vend[sizeof vend - 1] = '\0'; 2494 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor)); 2495 break; 2496 2497 default: 2498 goto freeme; 2499 } 2500 2501 DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc)); 2502 rv = 0; 2503 freeme: 2504 free(pd, M_DEVBUF); 2505 free(cfg, M_DEVBUF); 2506 2507 return rv; 2508 } 2509 2510 static int 2511 mfi_destroy_sensors(struct mfi_softc *sc) 2512 { 2513 if (sc->sc_sme == NULL) 2514 return 0; 2515 sysmon_envsys_unregister(sc->sc_sme); 2516 sc->sc_sme = NULL; 2517 free(sc->sc_sensor, M_DEVBUF); 2518 return 0; 2519 } 2520 2521 static int 2522 mfi_create_sensors(struct mfi_softc *sc) 2523 { 2524 int i; 2525 int nsensors = sc->sc_ld_cnt + 1; 2526 int rv; 2527 2528 sc->sc_sme = sysmon_envsys_create(); 2529 sc->sc_sensor = malloc(sizeof(envsys_data_t) * nsensors, 2530 M_DEVBUF, M_WAITOK | M_ZERO); 2531 2532 /* BBU */ 2533 sc->sc_sensor[0].units = ENVSYS_INDICATOR; 2534 sc->sc_sensor[0].state = ENVSYS_SINVALID; 2535 sc->sc_sensor[0].value_cur = 0; 2536 /* Enable monitoring for BBU state changes, if present */ 2537 if (sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) 2538 sc->sc_sensor[0].flags |= ENVSYS_FMONCRITICAL; 2539 snprintf(sc->sc_sensor[0].desc, 2540 sizeof(sc->sc_sensor[0].desc), "%s BBU", DEVNAME(sc)); 2541 if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[0])) 2542 goto out; 2543 2544 for (i = 1; i < nsensors; i++) { 2545 sc->sc_sensor[i].units = ENVSYS_DRIVE; 2546 sc->sc_sensor[i].state = ENVSYS_SINVALID; 2547 sc->sc_sensor[i].value_cur = ENVSYS_DRIVE_EMPTY; 2548 /* Enable monitoring for drive state changes */ 2549 sc->sc_sensor[i].flags |= ENVSYS_FMONSTCHANGED; 2550 /* logical drives */ 2551 snprintf(sc->sc_sensor[i].desc, 2552 sizeof(sc->sc_sensor[i].desc), "%s:%d", 2553 DEVNAME(sc), i - 1); 2554 if (sysmon_envsys_sensor_attach(sc->sc_sme, 2555 &sc->sc_sensor[i])) 2556 goto out; 2557 } 2558 2559 sc->sc_sme->sme_name = DEVNAME(sc); 2560 sc->sc_sme->sme_cookie = sc; 2561 sc->sc_sme->sme_refresh = mfi_sensor_refresh; 2562 rv = sysmon_envsys_register(sc->sc_sme); 2563 if (rv != 0) { 2564 aprint_error_dev(sc->sc_dev, 2565 "unable to register with sysmon (rv = %d)\n", rv); 2566 goto out; 2567 } 2568 return 0; 2569 2570 out: 2571 free(sc->sc_sensor, M_DEVBUF); 2572 sysmon_envsys_destroy(sc->sc_sme); 2573 sc->sc_sme = NULL; 2574 return EINVAL; 2575 } 2576 2577 static void 2578 mfi_sensor_refresh(struct sysmon_envsys *sme, envsys_data_t *edata) 2579 { 2580 struct mfi_softc *sc = sme->sme_cookie; 2581 struct bioc_vol bv; 2582 int s; 2583 int error; 2584 2585 if (edata->sensor >= sc->sc_ld_cnt + 1) 2586 return; 2587 2588 if (edata->sensor == 0) { 2589 /* BBU */ 2590 struct mfi_bbu_status bbu_stat; 2591 int bbu_status; 2592 if ((sc->sc_info.mci_hw_present & MFI_INFO_HW_BBU) == 0) 2593 return; 2594 2595 KERNEL_LOCK(1, curlwp); 2596 s = splbio(); 2597 bbu_status = mfi_get_bbu(sc, &bbu_stat); 2598 splx(s); 2599 KERNEL_UNLOCK_ONE(curlwp); 2600 switch(bbu_status) { 2601 case MFI_BBU_GOOD: 2602 edata->value_cur = 1; 2603 edata->state = ENVSYS_SVALID; 2604 if (!sc->sc_bbuok) 2605 aprint_normal_dev(sc->sc_dev, 2606 "BBU state changed to good\n"); 2607 sc->sc_bbuok = true; 2608 break; 2609 case MFI_BBU_BAD: 2610 edata->value_cur = 0; 2611 edata->state = ENVSYS_SCRITICAL; 2612 if (sc->sc_bbuok) 2613 aprint_normal_dev(sc->sc_dev, 2614 "BBU state changed to bad\n"); 2615 sc->sc_bbuok = false; 2616 break; 2617 case MFI_BBU_UNKNOWN: 2618 default: 2619 edata->value_cur = 0; 2620 edata->state = ENVSYS_SINVALID; 2621 sc->sc_bbuok = false; 2622 break; 2623 } 2624 return; 2625 } 2626 2627 memset(&bv, 0, sizeof(bv)); 2628 bv.bv_volid = edata->sensor - 1; 2629 KERNEL_LOCK(1, curlwp); 2630 s = splbio(); 2631 error = mfi_ioctl_vol(sc, &bv); 2632 splx(s); 2633 KERNEL_UNLOCK_ONE(curlwp); 2634 if (error) 2635 bv.bv_status = BIOC_SVINVALID; 2636 2637 bio_vol_to_envsys(edata, &bv); 2638 } 2639 2640 #endif /* NBIO > 0 */ 2641 2642 static uint32_t 2643 mfi_xscale_fw_state(struct mfi_softc *sc) 2644 { 2645 return mfi_read(sc, MFI_OMSG0); 2646 } 2647 2648 static void 2649 mfi_xscale_intr_dis(struct mfi_softc *sc) 2650 { 2651 mfi_write(sc, MFI_OMSK, 0); 2652 } 2653 2654 static void 2655 mfi_xscale_intr_ena(struct mfi_softc *sc) 2656 { 2657 mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR); 2658 } 2659 2660 static int 2661 mfi_xscale_intr(struct mfi_softc *sc) 2662 { 2663 uint32_t status; 2664 2665 status = mfi_read(sc, MFI_OSTS); 2666 if (!ISSET(status, MFI_OSTS_INTR_VALID)) 2667 return 0; 2668 2669 /* write status back to acknowledge interrupt */ 2670 mfi_write(sc, MFI_OSTS, status); 2671 return 1; 2672 } 2673 2674 static void 2675 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2676 { 2677 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames), 2678 ccb->ccb_pframe - MFIMEM_DVA(sc->sc_frames), 2679 sc->sc_frames_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2680 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_sense), 2681 ccb->ccb_psense - MFIMEM_DVA(sc->sc_sense), 2682 MFI_SENSE_SIZE, BUS_DMASYNC_PREREAD); 2683 2684 mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) | 2685 ccb->ccb_extra_frames); 2686 ccb->ccb_state = MFI_CCB_RUNNING; 2687 } 2688 2689 static uint32_t 2690 mfi_ppc_fw_state(struct mfi_softc *sc) 2691 { 2692 return mfi_read(sc, MFI_OSP); 2693 } 2694 2695 static void 2696 mfi_ppc_intr_dis(struct mfi_softc *sc) 2697 { 2698 /* Taking a wild guess --dyoung */ 2699 mfi_write(sc, MFI_OMSK, ~(uint32_t)0x0); 2700 mfi_write(sc, MFI_ODC, 0xffffffff); 2701 } 2702 2703 static void 2704 mfi_ppc_intr_ena(struct mfi_softc *sc) 2705 { 2706 mfi_write(sc, MFI_ODC, 0xffffffff); 2707 mfi_write(sc, MFI_OMSK, ~0x80000004); 2708 } 2709 2710 static int 2711 mfi_ppc_intr(struct mfi_softc *sc) 2712 { 2713 uint32_t status; 2714 2715 status = mfi_read(sc, MFI_OSTS); 2716 if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID)) 2717 return 0; 2718 2719 /* write status back to acknowledge interrupt */ 2720 mfi_write(sc, MFI_ODC, status); 2721 return 1; 2722 } 2723 2724 static void 2725 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2726 { 2727 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe | 2728 (ccb->ccb_extra_frames << 1)); 2729 ccb->ccb_state = MFI_CCB_RUNNING; 2730 } 2731 2732 u_int32_t 2733 mfi_gen2_fw_state(struct mfi_softc *sc) 2734 { 2735 return (mfi_read(sc, MFI_OSP)); 2736 } 2737 2738 void 2739 mfi_gen2_intr_dis(struct mfi_softc *sc) 2740 { 2741 mfi_write(sc, MFI_OMSK, 0xffffffff); 2742 mfi_write(sc, MFI_ODC, 0xffffffff); 2743 } 2744 2745 void 2746 mfi_gen2_intr_ena(struct mfi_softc *sc) 2747 { 2748 mfi_write(sc, MFI_ODC, 0xffffffff); 2749 mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID); 2750 } 2751 2752 int 2753 mfi_gen2_intr(struct mfi_softc *sc) 2754 { 2755 u_int32_t status; 2756 2757 status = mfi_read(sc, MFI_OSTS); 2758 if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID)) 2759 return (0); 2760 2761 /* write status back to acknowledge interrupt */ 2762 mfi_write(sc, MFI_ODC, status); 2763 2764 return (1); 2765 } 2766 2767 void 2768 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2769 { 2770 mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe | 2771 (ccb->ccb_extra_frames << 1)); 2772 ccb->ccb_state = MFI_CCB_RUNNING; 2773 } 2774 2775 u_int32_t 2776 mfi_skinny_fw_state(struct mfi_softc *sc) 2777 { 2778 return (mfi_read(sc, MFI_OSP)); 2779 } 2780 2781 void 2782 mfi_skinny_intr_dis(struct mfi_softc *sc) 2783 { 2784 mfi_write(sc, MFI_OMSK, 0); 2785 } 2786 2787 void 2788 mfi_skinny_intr_ena(struct mfi_softc *sc) 2789 { 2790 mfi_write(sc, MFI_OMSK, ~0x00000001); 2791 } 2792 2793 int 2794 mfi_skinny_intr(struct mfi_softc *sc) 2795 { 2796 u_int32_t status; 2797 2798 status = mfi_read(sc, MFI_OSTS); 2799 if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID)) 2800 return (0); 2801 2802 /* write status back to acknowledge interrupt */ 2803 mfi_write(sc, MFI_OSTS, status); 2804 2805 return (1); 2806 } 2807 2808 void 2809 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2810 { 2811 mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe | 2812 (ccb->ccb_extra_frames << 1)); 2813 mfi_write(sc, MFI_IQPH, 0x00000000); 2814 ccb->ccb_state = MFI_CCB_RUNNING; 2815 } 2816 2817 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000008) 2818 2819 void 2820 mfi_tbolt_intr_ena(struct mfi_softc *sc) 2821 { 2822 mfi_write(sc, MFI_OMSK, ~MFI_FUSION_ENABLE_INTERRUPT_MASK); 2823 mfi_read(sc, MFI_OMSK); 2824 } 2825 2826 void 2827 mfi_tbolt_intr_dis(struct mfi_softc *sc) 2828 { 2829 mfi_write(sc, MFI_OMSK, 0xFFFFFFFF); 2830 mfi_read(sc, MFI_OMSK); 2831 } 2832 2833 int 2834 mfi_tbolt_intr(struct mfi_softc *sc) 2835 { 2836 int32_t status; 2837 2838 status = mfi_read(sc, MFI_OSTS); 2839 2840 if (ISSET(status, 0x1)) { 2841 mfi_write(sc, MFI_OSTS, status); 2842 mfi_read(sc, MFI_OSTS); 2843 if (ISSET(status, MFI_STATE_CHANGE_INTERRUPT)) 2844 return 0; 2845 return 1; 2846 } 2847 if (!ISSET(status, MFI_FUSION_ENABLE_INTERRUPT_MASK)) 2848 return 0; 2849 mfi_read(sc, MFI_OSTS); 2850 return 1; 2851 } 2852 2853 u_int32_t 2854 mfi_tbolt_fw_state(struct mfi_softc *sc) 2855 { 2856 return mfi_read(sc, MFI_OSP); 2857 } 2858 2859 void 2860 mfi_tbolt_post(struct mfi_softc *sc, struct mfi_ccb *ccb) 2861 { 2862 if (sc->sc_MFA_enabled) { 2863 if ((ccb->ccb_flags & MFI_CCB_F_TBOLT) == 0) 2864 mfi_tbolt_build_mpt_ccb(ccb); 2865 mfi_write(sc, MFI_IQPL, 2866 ccb->ccb_tb_request_desc.words & 0xFFFFFFFF); 2867 mfi_write(sc, MFI_IQPH, 2868 ccb->ccb_tb_request_desc.words >> 32); 2869 ccb->ccb_state = MFI_CCB_RUNNING; 2870 return; 2871 } 2872 uint64_t bus_add = ccb->ccb_pframe; 2873 bus_add |= (MFI_REQ_DESCRIPT_FLAGS_MFA 2874 << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2875 mfi_write(sc, MFI_IQPL, bus_add); 2876 mfi_write(sc, MFI_IQPH, bus_add >> 32); 2877 ccb->ccb_state = MFI_CCB_RUNNING; 2878 } 2879 2880 static void 2881 mfi_tbolt_build_mpt_ccb(struct mfi_ccb *ccb) 2882 { 2883 union mfi_mpi2_request_descriptor *req_desc = &ccb->ccb_tb_request_desc; 2884 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request; 2885 struct mpi25_ieee_sge_chain64 *mpi25_ieee_chain; 2886 2887 io_req->Function = MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 2888 io_req->SGLOffset0 = 2889 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4; 2890 io_req->ChainOffset = 2891 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 16; 2892 2893 mpi25_ieee_chain = 2894 (struct mpi25_ieee_sge_chain64 *)&io_req->SGL.IeeeChain; 2895 mpi25_ieee_chain->Address = ccb->ccb_pframe; 2896 2897 /* 2898 In MFI pass thru, nextChainOffset will always be zero to 2899 indicate the end of the chain. 2900 */ 2901 mpi25_ieee_chain->Flags= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT 2902 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 2903 2904 /* setting the length to the maximum length */ 2905 mpi25_ieee_chain->Length = 1024; 2906 2907 req_desc->header.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 2908 MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2909 ccb->ccb_flags |= MFI_CCB_F_TBOLT; 2910 bus_dmamap_sync(ccb->ccb_sc->sc_dmat, 2911 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool), 2912 ccb->ccb_tb_pio_request - 2913 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool), 2914 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE, 2915 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2916 } 2917 2918 /* 2919 * Description: 2920 * This function will prepare message pools for the Thunderbolt controller 2921 */ 2922 static int 2923 mfi_tbolt_init_desc_pool(struct mfi_softc *sc) 2924 { 2925 uint32_t offset = 0; 2926 uint8_t *addr = MFIMEM_KVA(sc->sc_tbolt_reqmsgpool); 2927 2928 /* Request Decriptors alignment restrictions */ 2929 KASSERT(((uintptr_t)addr & 0xFF) == 0); 2930 2931 /* Skip request message pool */ 2932 addr = &addr[MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1)]; 2933 2934 /* Reply Frame Pool is initialized */ 2935 sc->sc_reply_frame_pool = (struct mfi_mpi2_reply_header *) addr; 2936 KASSERT(((uintptr_t)addr & 0xFF) == 0); 2937 2938 offset = (uintptr_t)sc->sc_reply_frame_pool 2939 - (uintptr_t)MFIMEM_KVA(sc->sc_tbolt_reqmsgpool); 2940 sc->sc_reply_frame_busaddr = 2941 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool) + offset; 2942 2943 /* initializing reply address to 0xFFFFFFFF */ 2944 memset((uint8_t *)sc->sc_reply_frame_pool, 0xFF, 2945 (MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size)); 2946 2947 /* Skip Reply Frame Pool */ 2948 addr += MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size; 2949 sc->sc_reply_pool_limit = (void *)addr; 2950 2951 offset = MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size; 2952 sc->sc_sg_frame_busaddr = sc->sc_reply_frame_busaddr + offset; 2953 2954 /* initialize the last_reply_idx to 0 */ 2955 sc->sc_last_reply_idx = 0; 2956 offset = (sc->sc_sg_frame_busaddr + (MEGASAS_MAX_SZ_CHAIN_FRAME * 2957 sc->sc_max_cmds)) - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool); 2958 KASSERT(offset <= sc->sc_tbolt_reqmsgpool->am_size); 2959 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 0, 2960 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool)->dm_mapsize, 2961 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2962 return 0; 2963 } 2964 2965 /* 2966 * This routine prepare and issue INIT2 frame to the Firmware 2967 */ 2968 2969 static int 2970 mfi_tbolt_init_MFI_queue(struct mfi_softc *sc) 2971 { 2972 struct mpi2_ioc_init_request *mpi2IocInit; 2973 struct mfi_init_frame *mfi_init; 2974 struct mfi_ccb *ccb; 2975 bus_addr_t phyAddress; 2976 mfi_address *mfiAddressTemp; 2977 int s; 2978 char *verbuf; 2979 char wqbuf[10]; 2980 2981 /* Check if initialization is already completed */ 2982 if (sc->sc_MFA_enabled) { 2983 return 1; 2984 } 2985 2986 mpi2IocInit = 2987 (struct mpi2_ioc_init_request *)MFIMEM_KVA(sc->sc_tbolt_ioc_init); 2988 2989 s = splbio(); 2990 if ((ccb = mfi_get_ccb(sc)) == NULL) { 2991 splx(s); 2992 return (EBUSY); 2993 } 2994 2995 2996 mfi_init = &ccb->ccb_frame->mfr_init; 2997 2998 memset(mpi2IocInit, 0, sizeof(struct mpi2_ioc_init_request)); 2999 mpi2IocInit->Function = MPI2_FUNCTION_IOC_INIT; 3000 mpi2IocInit->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 3001 3002 /* set MsgVersion and HeaderVersion host driver was built with */ 3003 mpi2IocInit->MsgVersion = MPI2_VERSION; 3004 mpi2IocInit->HeaderVersion = MPI2_HEADER_VERSION; 3005 mpi2IocInit->SystemRequestFrameSize = MEGASAS_THUNDERBOLT_NEW_MSG_SIZE/4; 3006 mpi2IocInit->ReplyDescriptorPostQueueDepth = 3007 (uint16_t)sc->sc_reply_pool_size; 3008 mpi2IocInit->ReplyFreeQueueDepth = 0; /* Not supported by MR. */ 3009 3010 /* Get physical address of reply frame pool */ 3011 phyAddress = sc->sc_reply_frame_busaddr; 3012 mfiAddressTemp = 3013 (mfi_address *)&mpi2IocInit->ReplyDescriptorPostQueueAddress; 3014 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; 3015 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); 3016 3017 /* Get physical address of request message pool */ 3018 phyAddress = MFIMEM_DVA(sc->sc_tbolt_reqmsgpool); 3019 mfiAddressTemp = (mfi_address *)&mpi2IocInit->SystemRequestFrameBaseAddress; 3020 mfiAddressTemp->u.addressLow = (uint32_t)phyAddress; 3021 mfiAddressTemp->u.addressHigh = (uint32_t)((uint64_t)phyAddress >> 32); 3022 3023 mpi2IocInit->ReplyFreeQueueAddress = 0; /* Not supported by MR. */ 3024 mpi2IocInit->TimeStamp = time_uptime; 3025 3026 verbuf = MFIMEM_KVA(sc->sc_tbolt_verbuf); 3027 snprintf(verbuf, strlen(MEGASAS_VERSION) + 2, "%s\n", 3028 MEGASAS_VERSION); 3029 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0, 3030 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_PREWRITE); 3031 mfi_init->driver_ver_lo = htole32(MFIMEM_DVA(sc->sc_tbolt_verbuf)); 3032 mfi_init->driver_ver_hi = 3033 htole32((uint64_t)MFIMEM_DVA(sc->sc_tbolt_verbuf) >> 32); 3034 3035 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0, 3036 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize, 3037 BUS_DMASYNC_PREWRITE); 3038 /* Get the physical address of the mpi2 ioc init command */ 3039 phyAddress = MFIMEM_DVA(sc->sc_tbolt_ioc_init); 3040 mfi_init->mif_qinfo_new_addr_lo = htole32(phyAddress); 3041 mfi_init->mif_qinfo_new_addr_hi = htole32((uint64_t)phyAddress >> 32); 3042 3043 mfi_init->mif_header.mfh_cmd = MFI_CMD_INIT; 3044 mfi_init->mif_header.mfh_data_len = sizeof(struct mpi2_ioc_init_request); 3045 if (mfi_poll(ccb) != 0) { 3046 aprint_error_dev(sc->sc_dev, "failed to send IOC init2 " 3047 "command at 0x%" PRIx64 "\n", 3048 (uint64_t)ccb->ccb_pframe); 3049 splx(s); 3050 return 1; 3051 } 3052 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_verbuf), 0, 3053 MFIMEM_MAP(sc->sc_tbolt_verbuf)->dm_mapsize, BUS_DMASYNC_POSTWRITE); 3054 bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_tbolt_ioc_init), 0, 3055 MFIMEM_MAP(sc->sc_tbolt_ioc_init)->dm_mapsize, 3056 BUS_DMASYNC_POSTWRITE); 3057 mfi_put_ccb(ccb); 3058 splx(s); 3059 3060 if (mfi_init->mif_header.mfh_cmd_status == 0) { 3061 sc->sc_MFA_enabled = 1; 3062 } 3063 else { 3064 aprint_error_dev(sc->sc_dev, "Init command Failed %x\n", 3065 mfi_init->mif_header.mfh_cmd_status); 3066 return 1; 3067 } 3068 3069 snprintf(wqbuf, sizeof(wqbuf), "%swq", DEVNAME(sc)); 3070 if (workqueue_create(&sc->sc_ldsync_wq, wqbuf, mfi_tbolt_sync_map_info, 3071 sc, PRIBIO, IPL_BIO, 0) != 0) { 3072 aprint_error_dev(sc->sc_dev, "workqueue_create failed\n"); 3073 return 1; 3074 } 3075 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL); 3076 return 0; 3077 } 3078 3079 int 3080 mfi_tbolt_intrh(void *arg) 3081 { 3082 struct mfi_softc *sc = arg; 3083 struct mfi_ccb *ccb; 3084 union mfi_mpi2_reply_descriptor *desc; 3085 int smid, num_completed; 3086 3087 if (!mfi_tbolt_intr(sc)) 3088 return 0; 3089 3090 DNPRINTF(MFI_D_INTR, "%s: mfi_tbolt_intrh %#lx %#lx\n", DEVNAME(sc), 3091 (u_long)sc, (u_long)sc->sc_last_reply_idx); 3092 3093 KASSERT(sc->sc_last_reply_idx < sc->sc_reply_pool_size); 3094 3095 desc = (union mfi_mpi2_reply_descriptor *) 3096 ((uintptr_t)sc->sc_reply_frame_pool + 3097 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE); 3098 3099 bus_dmamap_sync(sc->sc_dmat, 3100 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 3101 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1), 3102 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size, 3103 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3104 num_completed = 0; 3105 while ((desc->header.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK) != 3106 MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { 3107 smid = desc->header.SMID; 3108 KASSERT(smid > 0 && smid <= sc->sc_max_cmds); 3109 ccb = &sc->sc_ccb[smid - 1]; 3110 DNPRINTF(MFI_D_INTR, 3111 "%s: mfi_tbolt_intr SMID %#x reply_idx %#x " 3112 "desc %#" PRIx64 " ccb %p\n", DEVNAME(sc), smid, 3113 sc->sc_last_reply_idx, desc->words, ccb); 3114 KASSERT(ccb->ccb_state == MFI_CCB_RUNNING); 3115 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO && 3116 ccb->ccb_tb_io_request->ChainOffset != 0) { 3117 bus_dmamap_sync(sc->sc_dmat, 3118 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 3119 ccb->ccb_tb_psg_frame - 3120 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool), 3121 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_POSTREAD); 3122 } 3123 if (ccb->ccb_flags & MFI_CCB_F_TBOLT_IO) { 3124 bus_dmamap_sync(sc->sc_dmat, 3125 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 3126 ccb->ccb_tb_pio_request - 3127 MFIMEM_DVA(sc->sc_tbolt_reqmsgpool), 3128 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE, 3129 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3130 } 3131 if (ccb->ccb_done) 3132 ccb->ccb_done(ccb); 3133 else 3134 ccb->ccb_state = MFI_CCB_DONE; 3135 sc->sc_last_reply_idx++; 3136 if (sc->sc_last_reply_idx >= sc->sc_reply_pool_size) { 3137 sc->sc_last_reply_idx = 0; 3138 } 3139 desc->words = ~0x0; 3140 /* Get the next reply descriptor */ 3141 desc = (union mfi_mpi2_reply_descriptor *) 3142 ((uintptr_t)sc->sc_reply_frame_pool + 3143 sc->sc_last_reply_idx * MEGASAS_THUNDERBOLT_REPLY_SIZE); 3144 num_completed++; 3145 } 3146 if (num_completed == 0) 3147 return 0; 3148 3149 bus_dmamap_sync(sc->sc_dmat, 3150 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 3151 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE * (sc->sc_max_cmds + 1), 3152 MEGASAS_THUNDERBOLT_REPLY_SIZE * sc->sc_reply_pool_size, 3153 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3154 mfi_write(sc, MFI_RPI, sc->sc_last_reply_idx); 3155 return 1; 3156 } 3157 3158 3159 int 3160 mfi_tbolt_scsi_ld_io(struct mfi_ccb *ccb, struct scsipi_xfer *xs, 3161 uint64_t blockno, uint32_t blockcnt) 3162 { 3163 struct scsipi_periph *periph = xs->xs_periph; 3164 struct mfi_mpi2_request_raid_scsi_io *io_req; 3165 int sge_count; 3166 3167 DNPRINTF(MFI_D_CMD, "%s: mfi_tbolt_scsi_ld_io: %d\n", 3168 device_xname(periph->periph_channel->chan_adapter->adapt_dev), 3169 periph->periph_target); 3170 3171 if (!xs->data) 3172 return 1; 3173 3174 ccb->ccb_done = mfi_tbolt_scsi_ld_done; 3175 ccb->ccb_xs = xs; 3176 ccb->ccb_data = xs->data; 3177 ccb->ccb_len = xs->datalen; 3178 3179 io_req = ccb->ccb_tb_io_request; 3180 3181 /* Just the CDB length,rest of the Flags are zero */ 3182 io_req->IoFlags = xs->cmdlen; 3183 memset(io_req->CDB.CDB32, 0, 32); 3184 memcpy(io_req->CDB.CDB32, &xs->cmdstore, xs->cmdlen); 3185 3186 io_req->RaidContext.TargetID = periph->periph_target; 3187 io_req->RaidContext.Status = 0; 3188 io_req->RaidContext.exStatus = 0; 3189 io_req->RaidContext.timeoutValue = MFI_FUSION_FP_DEFAULT_TIMEOUT; 3190 io_req->Function = MPI2_FUNCTION_LD_IO_REQUEST; 3191 io_req->DevHandle = periph->periph_target; 3192 3193 ccb->ccb_tb_request_desc.header.RequestFlags = 3194 (MFI_REQ_DESCRIPT_FLAGS_LD_IO << MFI_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3195 io_req->DataLength = blockcnt * MFI_SECTOR_LEN; 3196 3197 if (xs->xs_control & XS_CTL_DATA_IN) { 3198 io_req->Control = MPI2_SCSIIO_CONTROL_READ; 3199 ccb->ccb_direction = MFI_DATA_IN; 3200 } else { 3201 io_req->Control = MPI2_SCSIIO_CONTROL_WRITE; 3202 ccb->ccb_direction = MFI_DATA_OUT; 3203 } 3204 3205 sge_count = mfi_tbolt_create_sgl(ccb, 3206 (xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK 3207 ); 3208 if (sge_count < 0) 3209 return 1; 3210 KASSERT(sge_count <= ccb->ccb_sc->sc_max_sgl); 3211 io_req->RaidContext.numSGE = sge_count; 3212 io_req->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 3213 io_req->SGLOffset0 = 3214 offsetof(struct mfi_mpi2_request_raid_scsi_io, SGL) / 4; 3215 3216 io_req->SenseBufferLowAddress = htole32(ccb->ccb_psense); 3217 io_req->SenseBufferLength = MFI_SENSE_SIZE; 3218 3219 ccb->ccb_flags |= MFI_CCB_F_TBOLT | MFI_CCB_F_TBOLT_IO; 3220 bus_dmamap_sync(ccb->ccb_sc->sc_dmat, 3221 MFIMEM_MAP(ccb->ccb_sc->sc_tbolt_reqmsgpool), 3222 ccb->ccb_tb_pio_request - 3223 MFIMEM_DVA(ccb->ccb_sc->sc_tbolt_reqmsgpool), 3224 MEGASAS_THUNDERBOLT_NEW_MSG_SIZE, 3225 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3226 3227 return 0; 3228 } 3229 3230 3231 static void 3232 mfi_tbolt_scsi_ld_done(struct mfi_ccb *ccb) 3233 { 3234 struct mfi_mpi2_request_raid_scsi_io *io_req = ccb->ccb_tb_io_request; 3235 mfi_scsi_xs_done(ccb, io_req->RaidContext.Status, 3236 io_req->RaidContext.exStatus); 3237 } 3238 3239 static int 3240 mfi_tbolt_create_sgl(struct mfi_ccb *ccb, int flags) 3241 { 3242 struct mfi_softc *sc = ccb->ccb_sc; 3243 bus_dma_segment_t *sgd; 3244 int error, i, sge_idx, sge_count; 3245 struct mfi_mpi2_request_raid_scsi_io *io_req; 3246 struct mpi25_ieee_sge_chain64 *sgl_ptr; 3247 3248 DNPRINTF(MFI_D_DMA, "%s: mfi_tbolt_create_sgl %#lx\n", DEVNAME(sc), 3249 (u_long)ccb->ccb_data); 3250 3251 if (!ccb->ccb_data) 3252 return -1; 3253 3254 KASSERT(flags == BUS_DMA_NOWAIT || !cpu_intr_p()); 3255 error = bus_dmamap_load(sc->sc_datadmat, ccb->ccb_dmamap, 3256 ccb->ccb_data, ccb->ccb_len, NULL, flags); 3257 if (error) { 3258 if (error == EFBIG) 3259 aprint_error_dev(sc->sc_dev, "more than %d dma segs\n", 3260 sc->sc_max_sgl); 3261 else 3262 aprint_error_dev(sc->sc_dev, 3263 "error %d loading dma map\n", error); 3264 return -1; 3265 } 3266 3267 io_req = ccb->ccb_tb_io_request; 3268 sgl_ptr = &io_req->SGL.IeeeChain.Chain64; 3269 sge_count = ccb->ccb_dmamap->dm_nsegs; 3270 sgd = ccb->ccb_dmamap->dm_segs; 3271 KASSERT(sge_count <= sc->sc_max_sgl); 3272 KASSERT(sge_count <= 3273 (MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1 + 3274 MEGASAS_THUNDERBOLT_MAX_SGE_IN_CHAINMSG)); 3275 3276 if (sge_count > MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG) { 3277 /* One element to store the chain info */ 3278 sge_idx = MEGASAS_THUNDERBOLT_MAX_SGE_IN_MAINMSG - 1; 3279 DNPRINTF(MFI_D_DMA, 3280 "mfi sge_idx %d sge_count %d io_req paddr 0x%" PRIx64 "\n", 3281 sge_idx, sge_count, ccb->ccb_tb_pio_request); 3282 } else { 3283 sge_idx = sge_count; 3284 } 3285 3286 for (i = 0; i < sge_idx; i++) { 3287 sgl_ptr->Address = htole64(sgd[i].ds_addr); 3288 sgl_ptr->Length = htole32(sgd[i].ds_len); 3289 sgl_ptr->Flags = 0; 3290 if (sge_idx < sge_count) { 3291 DNPRINTF(MFI_D_DMA, 3292 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32 3293 " flags 0x%x\n", sgl_ptr, i, 3294 sgl_ptr->Address, sgl_ptr->Length, 3295 sgl_ptr->Flags); 3296 } 3297 sgl_ptr++; 3298 } 3299 io_req->ChainOffset = 0; 3300 if (sge_idx < sge_count) { 3301 struct mpi25_ieee_sge_chain64 *sg_chain; 3302 io_req->ChainOffset = MEGASAS_THUNDERBOLT_CHAIN_OFF_MAINMSG; 3303 sg_chain = sgl_ptr; 3304 /* Prepare chain element */ 3305 sg_chain->NextChainOffset = 0; 3306 sg_chain->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3307 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 3308 sg_chain->Length = (sizeof(mpi2_sge_io_union) * 3309 (sge_count - sge_idx)); 3310 sg_chain->Address = ccb->ccb_tb_psg_frame; 3311 DNPRINTF(MFI_D_DMA, 3312 "sgl %p chain 0x%" PRIx64 " len 0x%" PRIx32 3313 " flags 0x%x\n", sg_chain, sg_chain->Address, 3314 sg_chain->Length, sg_chain->Flags); 3315 sgl_ptr = &ccb->ccb_tb_sg_frame->IeeeChain.Chain64; 3316 for (; i < sge_count; i++) { 3317 sgl_ptr->Address = htole64(sgd[i].ds_addr); 3318 sgl_ptr->Length = htole32(sgd[i].ds_len); 3319 sgl_ptr->Flags = 0; 3320 DNPRINTF(MFI_D_DMA, 3321 "sgl %p %d 0x%" PRIx64 " len 0x%" PRIx32 3322 " flags 0x%x\n", sgl_ptr, i, sgl_ptr->Address, 3323 sgl_ptr->Length, sgl_ptr->Flags); 3324 sgl_ptr++; 3325 } 3326 bus_dmamap_sync(sc->sc_dmat, 3327 MFIMEM_MAP(sc->sc_tbolt_reqmsgpool), 3328 ccb->ccb_tb_psg_frame - MFIMEM_DVA(sc->sc_tbolt_reqmsgpool), 3329 MEGASAS_MAX_SZ_CHAIN_FRAME, BUS_DMASYNC_PREREAD); 3330 } 3331 3332 if (ccb->ccb_direction == MFI_DATA_IN) { 3333 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, 3334 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3335 } else { 3336 bus_dmamap_sync(sc->sc_datadmat, ccb->ccb_dmamap, 0, 3337 ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE); 3338 } 3339 return sge_count; 3340 } 3341 3342 /* 3343 * The ThunderBolt HW has an option for the driver to directly 3344 * access the underlying disks and operate on the RAID. To 3345 * do this there needs to be a capability to keep the RAID controller 3346 * and driver in sync. The FreeBSD driver does not take advantage 3347 * of this feature since it adds a lot of complexity and slows down 3348 * performance. Performance is gained by using the controller's 3349 * cache etc. 3350 * 3351 * Even though this driver doesn't access the disks directly, an 3352 * AEN like command is used to inform the RAID firmware to "sync" 3353 * with all LD's via the MFI_DCMD_LD_MAP_GET_INFO command. This 3354 * command in write mode will return when the RAID firmware has 3355 * detected a change to the RAID state. Examples of this type 3356 * of change are removing a disk. Once the command returns then 3357 * the driver needs to acknowledge this and "sync" all LD's again. 3358 * This repeats until we shutdown. Then we need to cancel this 3359 * pending command. 3360 * 3361 * If this is not done right the RAID firmware will not remove a 3362 * pulled drive and the RAID won't go degraded etc. Effectively, 3363 * stopping any RAID mangement to functions. 3364 * 3365 * Doing another LD sync, requires the use of an event since the 3366 * driver needs to do a mfi_wait_command and can't do that in an 3367 * interrupt thread. 3368 * 3369 * The driver could get the RAID state via the MFI_DCMD_LD_MAP_GET_INFO 3370 * That requires a bunch of structure and it is simplier to just do 3371 * the MFI_DCMD_LD_GET_LIST versus walking the RAID map. 3372 */ 3373 3374 void 3375 mfi_tbolt_sync_map_info(struct work *w, void *v) 3376 { 3377 struct mfi_softc *sc = v; 3378 int i; 3379 struct mfi_ccb *ccb = NULL; 3380 uint8_t mbox[MFI_MBOX_SIZE]; 3381 struct mfi_ld *ld_sync; 3382 size_t ld_size; 3383 int s; 3384 3385 DNPRINTF(MFI_D_SYNC, "%s: mfi_tbolt_sync_map_info\n", DEVNAME(sc)); 3386 again: 3387 ld_sync = NULL; 3388 s = splbio(); 3389 if (sc->sc_ldsync_ccb != NULL) { 3390 splx(s); 3391 return; 3392 } 3393 3394 if (mfi_mgmt_internal(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN, 3395 sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL, false)) { 3396 aprint_error_dev(sc->sc_dev, "MR_DCMD_LD_GET_LIST failed\n"); 3397 goto err; 3398 } 3399 3400 ld_size = sizeof(*ld_sync) * sc->sc_ld_list.mll_no_ld; 3401 3402 ld_sync = malloc(ld_size, M_DEVBUF, M_WAITOK | M_ZERO); 3403 if (ld_sync == NULL) { 3404 aprint_error_dev(sc->sc_dev, "Failed to allocate sync\n"); 3405 goto err; 3406 } 3407 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) { 3408 ld_sync[i] = sc->sc_ld_list.mll_list[i].mll_ld; 3409 } 3410 3411 if ((ccb = mfi_get_ccb(sc)) == NULL) { 3412 aprint_error_dev(sc->sc_dev, "Failed to get sync command\n"); 3413 goto err; 3414 } 3415 sc->sc_ldsync_ccb = ccb; 3416 3417 memset(mbox, 0, MFI_MBOX_SIZE); 3418 mbox[0] = sc->sc_ld_list.mll_no_ld; 3419 mbox[1] = MFI_DCMD_MBOX_PEND_FLAG; 3420 if (mfi_mgmt(ccb, NULL, MR_DCMD_LD_MAP_GET_INFO, MFI_DATA_OUT, 3421 ld_size, ld_sync, mbox)) { 3422 aprint_error_dev(sc->sc_dev, "Failed to create sync command\n"); 3423 goto err; 3424 } 3425 /* 3426 * we won't sleep on this command, so we have to override 3427 * the callback set up by mfi_mgmt() 3428 */ 3429 ccb->ccb_done = mfi_sync_map_complete; 3430 3431 mfi_post(sc, ccb); 3432 splx(s); 3433 return; 3434 3435 err: 3436 if (ld_sync) 3437 free(ld_sync, M_DEVBUF); 3438 if (ccb) 3439 mfi_put_ccb(ccb); 3440 sc->sc_ldsync_ccb = NULL; 3441 splx(s); 3442 kpause("ldsyncp", 0, hz, NULL); 3443 goto again; 3444 } 3445 3446 static void 3447 mfi_sync_map_complete(struct mfi_ccb *ccb) 3448 { 3449 struct mfi_softc *sc = ccb->ccb_sc; 3450 bool aborted = !sc->sc_running; 3451 3452 DNPRINTF(MFI_D_SYNC, "%s: mfi_sync_map_complete\n", 3453 DEVNAME(ccb->ccb_sc)); 3454 KASSERT(sc->sc_ldsync_ccb == ccb); 3455 mfi_mgmt_done(ccb); 3456 free(ccb->ccb_data, M_DEVBUF); 3457 if (ccb->ccb_flags & MFI_CCB_F_ERR) { 3458 aprint_error_dev(sc->sc_dev, "sync command failed\n"); 3459 aborted = true; 3460 } 3461 mfi_put_ccb(ccb); 3462 sc->sc_ldsync_ccb = NULL; 3463 3464 /* set it up again so the driver can catch more events */ 3465 if (!aborted) { 3466 workqueue_enqueue(sc->sc_ldsync_wq, &sc->sc_ldsync_wk, NULL); 3467 } 3468 } 3469 3470 static int 3471 mfifopen(dev_t dev, int flag, int mode, struct lwp *l) 3472 { 3473 struct mfi_softc *sc; 3474 3475 if ((sc = device_lookup_private(&mfi_cd, minor(dev))) == NULL) 3476 return (ENXIO); 3477 return (0); 3478 } 3479 3480 static int 3481 mfifclose(dev_t dev, int flag, int mode, struct lwp *l) 3482 { 3483 return (0); 3484 } 3485 3486 static int 3487 mfifioctl(dev_t dev, u_long cmd, void *data, int flag, 3488 struct lwp *l) 3489 { 3490 struct mfi_softc *sc; 3491 struct mfi_ioc_packet *ioc = data; 3492 uint8_t *udata; 3493 struct mfi_ccb *ccb = NULL; 3494 int ctx, i, s, error; 3495 union mfi_sense_ptr sense_ptr; 3496 3497 switch(cmd) { 3498 case MFI_CMD: 3499 sc = device_lookup_private(&mfi_cd, ioc->mfi_adapter_no); 3500 break; 3501 default: 3502 return ENOTTY; 3503 } 3504 if (sc == NULL) 3505 return (ENXIO); 3506 if (sc->sc_opened) 3507 return (EBUSY); 3508 3509 switch(cmd) { 3510 case MFI_CMD: 3511 error = kauth_authorize_device_passthru(l->l_cred, dev, 3512 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data); 3513 if (error) 3514 return error; 3515 if (ioc->mfi_sge_count > MAX_IOCTL_SGE) 3516 return EINVAL; 3517 s = splbio(); 3518 if ((ccb = mfi_get_ccb(sc)) == NULL) 3519 return ENOMEM; 3520 ccb->ccb_data = NULL; 3521 ctx = ccb->ccb_frame->mfr_header.mfh_context; 3522 memcpy(ccb->ccb_frame, ioc->mfi_frame.raw, 3523 sizeof(*ccb->ccb_frame)); 3524 ccb->ccb_frame->mfr_header.mfh_context = ctx; 3525 ccb->ccb_frame->mfr_header.mfh_scsi_status = 0; 3526 ccb->ccb_frame->mfr_header.mfh_pad0 = 0; 3527 ccb->ccb_frame_size = 3528 (sizeof(union mfi_sgl) * ioc->mfi_sge_count) + 3529 ioc->mfi_sgl_off; 3530 if (ioc->mfi_sge_count > 0) { 3531 ccb->ccb_sgl = (union mfi_sgl *) 3532 &ccb->ccb_frame->mfr_bytes[ioc->mfi_sgl_off]; 3533 } 3534 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_READ) 3535 ccb->ccb_direction = MFI_DATA_IN; 3536 if (ccb->ccb_frame->mfr_header.mfh_flags & MFI_FRAME_DIR_WRITE) 3537 ccb->ccb_direction = MFI_DATA_OUT; 3538 ccb->ccb_len = ccb->ccb_frame->mfr_header.mfh_data_len; 3539 if (ccb->ccb_len > MAXPHYS) { 3540 error = ENOMEM; 3541 goto out; 3542 } 3543 if (ccb->ccb_len && 3544 (ccb->ccb_direction & (MFI_DATA_IN | MFI_DATA_OUT)) != 0) { 3545 udata = malloc(ccb->ccb_len, M_DEVBUF, M_WAITOK|M_ZERO); 3546 if (udata == NULL) { 3547 error = ENOMEM; 3548 goto out; 3549 } 3550 ccb->ccb_data = udata; 3551 if (ccb->ccb_direction & MFI_DATA_OUT) { 3552 for (i = 0; i < ioc->mfi_sge_count; i++) { 3553 error = copyin(ioc->mfi_sgl[i].iov_base, 3554 udata, ioc->mfi_sgl[i].iov_len); 3555 if (error) 3556 goto out; 3557 udata = &udata[ 3558 ioc->mfi_sgl[i].iov_len]; 3559 } 3560 } 3561 if (mfi_create_sgl(ccb, BUS_DMA_WAITOK)) { 3562 error = EIO; 3563 goto out; 3564 } 3565 } 3566 if (ccb->ccb_frame->mfr_header.mfh_cmd == MFI_CMD_PD_SCSI_IO) { 3567 ccb->ccb_frame->mfr_io.mif_sense_addr_lo = 3568 htole32(ccb->ccb_psense); 3569 ccb->ccb_frame->mfr_io.mif_sense_addr_hi = 0; 3570 } 3571 ccb->ccb_done = mfi_mgmt_done; 3572 mfi_post(sc, ccb); 3573 while (ccb->ccb_state != MFI_CCB_DONE) 3574 tsleep(ccb, PRIBIO, "mfi_fioc", 0); 3575 3576 if (ccb->ccb_direction & MFI_DATA_IN) { 3577 udata = ccb->ccb_data; 3578 for (i = 0; i < ioc->mfi_sge_count; i++) { 3579 error = copyout(udata, 3580 ioc->mfi_sgl[i].iov_base, 3581 ioc->mfi_sgl[i].iov_len); 3582 if (error) 3583 goto out; 3584 udata = &udata[ 3585 ioc->mfi_sgl[i].iov_len]; 3586 } 3587 } 3588 if (ioc->mfi_sense_len) { 3589 memcpy(&sense_ptr.sense_ptr_data[0], 3590 &ioc->mfi_frame.raw[ioc->mfi_sense_off], 3591 sizeof(sense_ptr.sense_ptr_data)); 3592 error = copyout(ccb->ccb_sense, 3593 sense_ptr.user_space, 3594 sizeof(sense_ptr.sense_ptr_data)); 3595 if (error) 3596 goto out; 3597 } 3598 memcpy(ioc->mfi_frame.raw, ccb->ccb_frame, 3599 sizeof(*ccb->ccb_frame)); 3600 break; 3601 default: 3602 printf("mfifioctl unhandled cmd 0x%lx\n", cmd); 3603 return ENOTTY; 3604 } 3605 3606 out: 3607 if (ccb->ccb_data) 3608 free(ccb->ccb_data, M_DEVBUF); 3609 if (ccb) 3610 mfi_put_ccb(ccb); 3611 splx(s); 3612 return error; 3613 } 3614