1 /* $OpenBSD: mpii.c,v 1.115 2018/08/14 05:22:21 jmatthew Exp $ */ 2 /* 3 * Copyright (c) 2010, 2012 Mike Belopuhov 4 * Copyright (c) 2009 James Giannoules 5 * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org> 6 * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include "bio.h" 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/device.h> 26 #include <sys/ioctl.h> 27 #include <sys/malloc.h> 28 #include <sys/kernel.h> 29 #include <sys/rwlock.h> 30 #include <sys/sensors.h> 31 #include <sys/dkio.h> 32 #include <sys/tree.h> 33 #include <sys/task.h> 34 35 #include <machine/bus.h> 36 37 #include <dev/pci/pcireg.h> 38 #include <dev/pci/pcivar.h> 39 #include <dev/pci/pcidevs.h> 40 41 #include <scsi/scsi_all.h> 42 #include <scsi/scsiconf.h> 43 44 #include <dev/biovar.h> 45 46 #include <dev/pci/mpiireg.h> 47 48 /* #define MPII_DEBUG */ 49 #ifdef MPII_DEBUG 50 #define DPRINTF(x...) do { if (mpii_debug) printf(x); } while(0) 51 #define DNPRINTF(n,x...) do { if (mpii_debug & (n)) printf(x); } while(0) 52 #define MPII_D_CMD (0x0001) 53 #define MPII_D_INTR (0x0002) 54 #define MPII_D_MISC (0x0004) 55 #define MPII_D_DMA (0x0008) 56 #define MPII_D_IOCTL (0x0010) 57 #define MPII_D_RW (0x0020) 58 #define MPII_D_MEM (0x0040) 59 #define MPII_D_CCB (0x0080) 60 #define MPII_D_PPR (0x0100) 61 #define MPII_D_RAID (0x0200) 62 #define MPII_D_EVT (0x0400) 63 #define MPII_D_CFG (0x0800) 64 #define MPII_D_MAP (0x1000) 65 66 u_int32_t mpii_debug = 0 67 | MPII_D_CMD 68 | MPII_D_INTR 69 | MPII_D_MISC 70 | MPII_D_DMA 71 | MPII_D_IOCTL 72 | MPII_D_RW 73 | MPII_D_MEM 74 | MPII_D_CCB 75 | MPII_D_PPR 76 | MPII_D_RAID 77 | MPII_D_EVT 78 | MPII_D_CFG 79 | MPII_D_MAP 80 ; 81 #else 82 #define DPRINTF(x...) 83 #define DNPRINTF(n,x...) 84 #endif 85 86 #define MPII_REQUEST_SIZE (512) 87 #define MPII_REQUEST_CREDIT (128) 88 89 struct mpii_dmamem { 90 bus_dmamap_t mdm_map; 91 bus_dma_segment_t mdm_seg; 92 size_t mdm_size; 93 caddr_t mdm_kva; 94 }; 95 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map) 96 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr) 97 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva) 98 99 struct mpii_softc; 100 101 struct mpii_rcb { 102 SIMPLEQ_ENTRY(mpii_rcb) rcb_link; 103 void *rcb_reply; 104 u_int32_t rcb_reply_dva; 105 }; 106 107 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb); 108 109 struct mpii_device { 110 int flags; 111 #define MPII_DF_ATTACH (0x0001) 112 #define MPII_DF_DETACH (0x0002) 113 #define MPII_DF_HIDDEN (0x0004) 114 #define MPII_DF_UNUSED (0x0008) 115 #define MPII_DF_VOLUME (0x0010) 116 #define MPII_DF_VOLUME_DISK (0x0020) 117 #define MPII_DF_HOT_SPARE (0x0040) 118 short slot; 119 short percent; 120 u_int16_t dev_handle; 121 u_int16_t enclosure; 122 u_int16_t expander; 123 u_int8_t phy_num; 124 u_int8_t physical_port; 125 }; 126 127 struct mpii_ccb { 128 struct mpii_softc *ccb_sc; 129 130 void * ccb_cookie; 131 bus_dmamap_t ccb_dmamap; 132 133 bus_addr_t ccb_offset; 134 void *ccb_cmd; 135 bus_addr_t ccb_cmd_dva; 136 u_int16_t ccb_dev_handle; 137 u_int16_t ccb_smid; 138 139 volatile enum { 140 MPII_CCB_FREE, 141 MPII_CCB_READY, 142 MPII_CCB_QUEUED, 143 MPII_CCB_TIMEOUT 144 } ccb_state; 145 146 void (*ccb_done)(struct mpii_ccb *); 147 struct mpii_rcb *ccb_rcb; 148 149 SIMPLEQ_ENTRY(mpii_ccb) ccb_link; 150 }; 151 152 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb); 153 154 struct mpii_softc { 155 struct device sc_dev; 156 157 pci_chipset_tag_t sc_pc; 158 pcitag_t sc_tag; 159 160 void *sc_ih; 161 162 struct scsi_link sc_link; 163 164 int sc_flags; 165 #define MPII_F_RAID (1<<1) 166 #define MPII_F_SAS3 (1<<2) 167 #define MPII_F_CONFIG_PENDING (1<<3) 168 169 struct scsibus_softc *sc_scsibus; 170 171 struct mpii_device **sc_devs; 172 173 bus_space_tag_t sc_iot; 174 bus_space_handle_t sc_ioh; 175 bus_size_t sc_ios; 176 bus_dma_tag_t sc_dmat; 177 178 struct mutex sc_req_mtx; 179 struct mutex sc_rep_mtx; 180 181 ushort sc_reply_size; 182 ushort sc_request_size; 183 184 ushort sc_max_cmds; 185 ushort sc_num_reply_frames; 186 u_int sc_reply_free_qdepth; 187 u_int sc_reply_post_qdepth; 188 189 ushort sc_chain_sge; 190 ushort sc_max_sgl; 191 192 u_int8_t sc_ioc_event_replay; 193 194 u_int8_t sc_porttype; 195 u_int8_t sc_max_volumes; 196 u_int16_t sc_max_devices; 197 u_int16_t sc_vd_count; 198 u_int16_t sc_vd_id_low; 199 u_int16_t sc_pd_id_start; 200 int sc_ioc_number; 201 u_int8_t sc_vf_id; 202 203 struct mpii_ccb *sc_ccbs; 204 struct mpii_ccb_list sc_ccb_free; 205 struct mutex sc_ccb_free_mtx; 206 207 struct mutex sc_ccb_mtx; 208 /* 209 * this protects the ccb state and list entry 210 * between mpii_scsi_cmd and scsidone. 211 */ 212 213 struct mpii_ccb_list sc_ccb_tmos; 214 struct scsi_iohandler sc_ccb_tmo_handler; 215 216 struct scsi_iopool sc_iopool; 217 218 struct mpii_dmamem *sc_requests; 219 220 struct mpii_dmamem *sc_replies; 221 struct mpii_rcb *sc_rcbs; 222 223 struct mpii_dmamem *sc_reply_postq; 224 struct mpii_reply_descr *sc_reply_postq_kva; 225 u_int sc_reply_post_host_index; 226 227 struct mpii_dmamem *sc_reply_freeq; 228 u_int sc_reply_free_host_index; 229 230 struct mpii_rcb_list sc_evt_sas_queue; 231 struct mutex sc_evt_sas_mtx; 232 struct task sc_evt_sas_task; 233 234 struct mpii_rcb_list sc_evt_ack_queue; 235 struct mutex sc_evt_ack_mtx; 236 struct scsi_iohandler sc_evt_ack_handler; 237 238 /* scsi ioctl from sd device */ 239 int (*sc_ioctl)(struct device *, u_long, caddr_t); 240 241 int sc_nsensors; 242 struct ksensor *sc_sensors; 243 struct ksensordev sc_sensordev; 244 }; 245 246 int mpii_match(struct device *, void *, void *); 247 void mpii_attach(struct device *, struct device *, void *); 248 int mpii_detach(struct device *, int); 249 250 int mpii_intr(void *); 251 252 struct cfattach mpii_ca = { 253 sizeof(struct mpii_softc), 254 mpii_match, 255 mpii_attach, 256 mpii_detach 257 }; 258 259 struct cfdriver mpii_cd = { 260 NULL, 261 "mpii", 262 DV_DULL 263 }; 264 265 void mpii_scsi_cmd(struct scsi_xfer *); 266 void mpii_scsi_cmd_done(struct mpii_ccb *); 267 int mpii_scsi_probe(struct scsi_link *); 268 int mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int); 269 270 struct scsi_adapter mpii_switch = { 271 mpii_scsi_cmd, 272 scsi_minphys, 273 mpii_scsi_probe, 274 NULL, 275 mpii_scsi_ioctl 276 }; 277 278 struct mpii_dmamem * 279 mpii_dmamem_alloc(struct mpii_softc *, size_t); 280 void mpii_dmamem_free(struct mpii_softc *, 281 struct mpii_dmamem *); 282 int mpii_alloc_ccbs(struct mpii_softc *); 283 void * mpii_get_ccb(void *); 284 void mpii_put_ccb(void *, void *); 285 int mpii_alloc_replies(struct mpii_softc *); 286 int mpii_alloc_queues(struct mpii_softc *); 287 void mpii_push_reply(struct mpii_softc *, struct mpii_rcb *); 288 void mpii_push_replies(struct mpii_softc *); 289 290 void mpii_scsi_cmd_tmo(void *); 291 void mpii_scsi_cmd_tmo_handler(void *, void *); 292 void mpii_scsi_cmd_tmo_done(struct mpii_ccb *); 293 294 int mpii_insert_dev(struct mpii_softc *, struct mpii_device *); 295 int mpii_remove_dev(struct mpii_softc *, struct mpii_device *); 296 struct mpii_device * 297 mpii_find_dev(struct mpii_softc *, u_int16_t); 298 299 void mpii_start(struct mpii_softc *, struct mpii_ccb *); 300 int mpii_poll(struct mpii_softc *, struct mpii_ccb *); 301 void mpii_poll_done(struct mpii_ccb *); 302 struct mpii_rcb * 303 mpii_reply(struct mpii_softc *, struct mpii_reply_descr *); 304 305 void mpii_wait(struct mpii_softc *, struct mpii_ccb *); 306 void mpii_wait_done(struct mpii_ccb *); 307 308 void mpii_init_queues(struct mpii_softc *); 309 310 int mpii_load_xs(struct mpii_ccb *); 311 int mpii_load_xs_sas3(struct mpii_ccb *); 312 313 u_int32_t mpii_read(struct mpii_softc *, bus_size_t); 314 void mpii_write(struct mpii_softc *, bus_size_t, u_int32_t); 315 int mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t, 316 u_int32_t); 317 int mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t, 318 u_int32_t); 319 320 int mpii_init(struct mpii_softc *); 321 int mpii_reset_soft(struct mpii_softc *); 322 int mpii_reset_hard(struct mpii_softc *); 323 324 int mpii_handshake_send(struct mpii_softc *, void *, size_t); 325 int mpii_handshake_recv_dword(struct mpii_softc *, 326 u_int32_t *); 327 int mpii_handshake_recv(struct mpii_softc *, void *, size_t); 328 329 void mpii_empty_done(struct mpii_ccb *); 330 331 int mpii_iocinit(struct mpii_softc *); 332 int mpii_iocfacts(struct mpii_softc *); 333 int mpii_portfacts(struct mpii_softc *); 334 int mpii_portenable(struct mpii_softc *); 335 int mpii_cfg_coalescing(struct mpii_softc *); 336 int mpii_board_info(struct mpii_softc *); 337 int mpii_target_map(struct mpii_softc *); 338 339 int mpii_eventnotify(struct mpii_softc *); 340 void mpii_eventnotify_done(struct mpii_ccb *); 341 void mpii_eventack(void *, void *); 342 void mpii_eventack_done(struct mpii_ccb *); 343 void mpii_event_process(struct mpii_softc *, struct mpii_rcb *); 344 void mpii_event_done(struct mpii_softc *, struct mpii_rcb *); 345 void mpii_event_sas(void *); 346 void mpii_event_raid(struct mpii_softc *, 347 struct mpii_msg_event_reply *); 348 void mpii_event_discovery(struct mpii_softc *, 349 struct mpii_msg_event_reply *); 350 351 void mpii_sas_remove_device(struct mpii_softc *, u_int16_t); 352 353 int mpii_req_cfg_header(struct mpii_softc *, u_int8_t, 354 u_int8_t, u_int32_t, int, void *); 355 int mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int, 356 void *, int, void *, size_t); 357 358 int mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *); 359 360 #if NBIO > 0 361 int mpii_ioctl(struct device *, u_long, caddr_t); 362 int mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *); 363 int mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *); 364 int mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *); 365 int mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int, 366 int, int *); 367 int mpii_bio_disk(struct mpii_softc *, struct bioc_disk *, 368 u_int8_t); 369 struct mpii_device * 370 mpii_find_vol(struct mpii_softc *, int); 371 #ifndef SMALL_KERNEL 372 int mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *); 373 int mpii_create_sensors(struct mpii_softc *); 374 void mpii_refresh_sensors(void *); 375 #endif /* SMALL_KERNEL */ 376 #endif /* NBIO > 0 */ 377 378 #define DEVNAME(s) ((s)->sc_dev.dv_xname) 379 380 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t)) 381 382 #define mpii_read_db(s) mpii_read((s), MPII_DOORBELL) 383 #define mpii_write_db(s, v) mpii_write((s), MPII_DOORBELL, (v)) 384 #define mpii_read_intr(s) mpii_read((s), MPII_INTR_STATUS) 385 #define mpii_write_intr(s, v) mpii_write((s), MPII_INTR_STATUS, (v)) 386 #define mpii_reply_waiting(s) ((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\ 387 == MPII_INTR_STATUS_REPLY) 388 389 #define mpii_write_reply_free(s, v) \ 390 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \ 391 MPII_REPLY_FREE_HOST_INDEX, (v)) 392 #define mpii_write_reply_post(s, v) \ 393 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \ 394 MPII_REPLY_POST_HOST_INDEX, (v)) 395 396 #define mpii_wait_db_int(s) mpii_wait_ne((s), MPII_INTR_STATUS, \ 397 MPII_INTR_STATUS_IOC2SYSDB, 0) 398 #define mpii_wait_db_ack(s) mpii_wait_eq((s), MPII_INTR_STATUS, \ 399 MPII_INTR_STATUS_SYS2IOCDB, 0) 400 401 static inline void 402 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva) 403 { 404 htolem32(&sge->sg_addr_lo, dva); 405 htolem32(&sge->sg_addr_hi, dva >> 32); 406 } 407 408 #define MPII_PG_EXTENDED (1<<0) 409 #define MPII_PG_POLL (1<<1) 410 #define MPII_PG_FMT "\020" "\002POLL" "\001EXTENDED" 411 412 static const struct pci_matchid mpii_devices[] = { 413 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2004 }, 414 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2008 }, 415 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_3 }, 416 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_4 }, 417 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_5 }, 418 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_1 }, 419 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_2 }, 420 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_1 }, 421 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_2 }, 422 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_3 }, 423 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_4 }, 424 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_5 }, 425 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_6 }, 426 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_1 }, 427 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_2 }, 428 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_3 }, 429 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3004 }, 430 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3008 }, 431 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_1 }, 432 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_2 }, 433 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_3 }, 434 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_4 }, 435 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3408 }, 436 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3416 }, 437 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508 }, 438 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508_1 }, 439 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516 }, 440 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516_1 } 441 }; 442 443 int 444 mpii_match(struct device *parent, void *match, void *aux) 445 { 446 return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices))); 447 } 448 449 void 450 mpii_attach(struct device *parent, struct device *self, void *aux) 451 { 452 struct mpii_softc *sc = (struct mpii_softc *)self; 453 struct pci_attach_args *pa = aux; 454 pcireg_t memtype; 455 int r; 456 pci_intr_handle_t ih; 457 struct scsibus_attach_args saa; 458 struct mpii_ccb *ccb; 459 460 sc->sc_pc = pa->pa_pc; 461 sc->sc_tag = pa->pa_tag; 462 sc->sc_dmat = pa->pa_dmat; 463 464 mtx_init(&sc->sc_req_mtx, IPL_BIO); 465 mtx_init(&sc->sc_rep_mtx, IPL_BIO); 466 467 /* find the appropriate memory base */ 468 for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) { 469 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r); 470 if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM) 471 break; 472 } 473 if (r >= PCI_MAPREG_END) { 474 printf(": unable to locate system interface registers\n"); 475 return; 476 } 477 478 if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh, 479 NULL, &sc->sc_ios, 0xFF) != 0) { 480 printf(": unable to map system interface registers\n"); 481 return; 482 } 483 484 /* disable the expansion rom */ 485 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG, 486 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) & 487 ~PCI_ROM_ENABLE); 488 489 /* disable interrupts */ 490 mpii_write(sc, MPII_INTR_MASK, 491 MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY | 492 MPII_INTR_MASK_DOORBELL); 493 494 /* hook up the interrupt */ 495 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 496 printf(": unable to map interrupt\n"); 497 goto unmap; 498 } 499 printf(": %s\n", pci_intr_string(sc->sc_pc, ih)); 500 501 if (mpii_iocfacts(sc) != 0) { 502 printf("%s: unable to get iocfacts\n", DEVNAME(sc)); 503 goto unmap; 504 } 505 506 if (mpii_init(sc) != 0) { 507 printf("%s: unable to initialize ioc\n", DEVNAME(sc)); 508 goto unmap; 509 } 510 511 if (mpii_alloc_ccbs(sc) != 0) { 512 /* error already printed */ 513 goto unmap; 514 } 515 516 if (mpii_alloc_replies(sc) != 0) { 517 printf("%s: unable to allocated reply space\n", DEVNAME(sc)); 518 goto free_ccbs; 519 } 520 521 if (mpii_alloc_queues(sc) != 0) { 522 printf("%s: unable to allocate reply queues\n", DEVNAME(sc)); 523 goto free_replies; 524 } 525 526 if (mpii_iocinit(sc) != 0) { 527 printf("%s: unable to send iocinit\n", DEVNAME(sc)); 528 goto free_queues; 529 } 530 531 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE, 532 MPII_DOORBELL_STATE_OPER) != 0) { 533 printf("%s: state: 0x%08x\n", DEVNAME(sc), 534 mpii_read_db(sc) & MPII_DOORBELL_STATE); 535 printf("%s: operational state timeout\n", DEVNAME(sc)); 536 goto free_queues; 537 } 538 539 mpii_push_replies(sc); 540 mpii_init_queues(sc); 541 542 if (mpii_board_info(sc) != 0) { 543 printf("%s: unable to get manufacturing page 0\n", 544 DEVNAME(sc)); 545 goto free_queues; 546 } 547 548 if (mpii_portfacts(sc) != 0) { 549 printf("%s: unable to get portfacts\n", DEVNAME(sc)); 550 goto free_queues; 551 } 552 553 if (mpii_target_map(sc) != 0) { 554 printf("%s: unable to setup target mappings\n", DEVNAME(sc)); 555 goto free_queues; 556 } 557 558 if (mpii_cfg_coalescing(sc) != 0) { 559 printf("%s: unable to configure coalescing\n", DEVNAME(sc)); 560 goto free_queues; 561 } 562 563 /* XXX bail on unsupported porttype? */ 564 if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) || 565 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) || 566 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) { 567 if (mpii_eventnotify(sc) != 0) { 568 printf("%s: unable to enable events\n", DEVNAME(sc)); 569 goto free_queues; 570 } 571 } 572 573 sc->sc_devs = mallocarray(sc->sc_max_devices, 574 sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO); 575 if (sc->sc_devs == NULL) { 576 printf("%s: unable to allocate memory for mpii_device\n", 577 DEVNAME(sc)); 578 goto free_queues; 579 } 580 581 if (mpii_portenable(sc) != 0) { 582 printf("%s: unable to enable port\n", DEVNAME(sc)); 583 goto free_devs; 584 } 585 586 /* we should be good to go now, attach scsibus */ 587 sc->sc_link.adapter = &mpii_switch; 588 sc->sc_link.adapter_softc = sc; 589 sc->sc_link.adapter_target = -1; 590 sc->sc_link.adapter_buswidth = sc->sc_max_devices; 591 sc->sc_link.luns = 1; 592 sc->sc_link.openings = sc->sc_max_cmds - 1; 593 sc->sc_link.pool = &sc->sc_iopool; 594 595 memset(&saa, 0, sizeof(saa)); 596 saa.saa_sc_link = &sc->sc_link; 597 598 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO, 599 mpii_intr, sc, sc->sc_dev.dv_xname); 600 if (sc->sc_ih == NULL) 601 goto free_devs; 602 603 /* force autoconf to wait for the first sas discovery to complete */ 604 SET(sc->sc_flags, MPII_F_CONFIG_PENDING); 605 config_pending_incr(); 606 607 /* config_found() returns the scsibus attached to us */ 608 sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev, 609 &saa, scsiprint); 610 611 /* enable interrupts */ 612 mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL 613 | MPII_INTR_MASK_RESET); 614 615 #if NBIO > 0 616 if (ISSET(sc->sc_flags, MPII_F_RAID)) { 617 if (bio_register(&sc->sc_dev, mpii_ioctl) != 0) 618 panic("%s: controller registration failed", 619 DEVNAME(sc)); 620 else 621 sc->sc_ioctl = mpii_ioctl; 622 623 #ifndef SMALL_KERNEL 624 if (mpii_create_sensors(sc) != 0) 625 printf("%s: unable to create sensors\n", DEVNAME(sc)); 626 #endif 627 } 628 #endif 629 630 return; 631 632 free_devs: 633 free(sc->sc_devs, M_DEVBUF, 0); 634 sc->sc_devs = NULL; 635 636 free_queues: 637 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq), 638 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD); 639 mpii_dmamem_free(sc, sc->sc_reply_freeq); 640 641 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq), 642 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD); 643 mpii_dmamem_free(sc, sc->sc_reply_postq); 644 645 free_replies: 646 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies), 647 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD); 648 mpii_dmamem_free(sc, sc->sc_replies); 649 650 free_ccbs: 651 while ((ccb = mpii_get_ccb(sc)) != NULL) 652 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 653 mpii_dmamem_free(sc, sc->sc_requests); 654 free(sc->sc_ccbs, M_DEVBUF, 0); 655 656 unmap: 657 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 658 sc->sc_ios = 0; 659 } 660 661 int 662 mpii_detach(struct device *self, int flags) 663 { 664 struct mpii_softc *sc = (struct mpii_softc *)self; 665 666 if (sc->sc_ih != NULL) { 667 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 668 sc->sc_ih = NULL; 669 } 670 if (sc->sc_ios != 0) { 671 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 672 sc->sc_ios = 0; 673 } 674 675 return (0); 676 } 677 678 int 679 mpii_intr(void *arg) 680 { 681 struct mpii_rcb_list evts = SIMPLEQ_HEAD_INITIALIZER(evts); 682 struct mpii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs); 683 struct mpii_softc *sc = arg; 684 struct mpii_reply_descr *postq = sc->sc_reply_postq_kva, *rdp; 685 struct mpii_ccb *ccb; 686 struct mpii_rcb *rcb; 687 int smid; 688 u_int idx; 689 int rv = 0; 690 691 mtx_enter(&sc->sc_rep_mtx); 692 bus_dmamap_sync(sc->sc_dmat, 693 MPII_DMA_MAP(sc->sc_reply_postq), 694 0, sc->sc_reply_post_qdepth * sizeof(*rdp), 695 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 696 697 idx = sc->sc_reply_post_host_index; 698 for (;;) { 699 rdp = &postq[idx]; 700 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) == 701 MPII_REPLY_DESCR_UNUSED) 702 break; 703 if (rdp->data == 0xffffffff) { 704 /* 705 * ioc is still writing to the reply post queue 706 * race condition - bail! 707 */ 708 break; 709 } 710 711 smid = lemtoh16(&rdp->smid); 712 rcb = mpii_reply(sc, rdp); 713 714 if (smid) { 715 ccb = &sc->sc_ccbs[smid - 1]; 716 ccb->ccb_state = MPII_CCB_READY; 717 ccb->ccb_rcb = rcb; 718 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link); 719 } else 720 SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link); 721 722 if (++idx >= sc->sc_reply_post_qdepth) 723 idx = 0; 724 725 rv = 1; 726 } 727 728 bus_dmamap_sync(sc->sc_dmat, 729 MPII_DMA_MAP(sc->sc_reply_postq), 730 0, sc->sc_reply_post_qdepth * sizeof(*rdp), 731 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 732 733 if (rv) 734 mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx); 735 736 mtx_leave(&sc->sc_rep_mtx); 737 738 if (rv == 0) 739 return (0); 740 741 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) { 742 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link); 743 ccb->ccb_done(ccb); 744 } 745 while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) { 746 SIMPLEQ_REMOVE_HEAD(&evts, rcb_link); 747 mpii_event_process(sc, rcb); 748 } 749 750 return (1); 751 } 752 753 int 754 mpii_load_xs_sas3(struct mpii_ccb *ccb) 755 { 756 struct mpii_softc *sc = ccb->ccb_sc; 757 struct scsi_xfer *xs = ccb->ccb_cookie; 758 struct mpii_msg_scsi_io *io = ccb->ccb_cmd; 759 struct mpii_ieee_sge *csge, *nsge, *sge; 760 bus_dmamap_t dmap = ccb->ccb_dmamap; 761 int i, error; 762 763 /* Request frame structure is described in the mpii_iocfacts */ 764 nsge = (struct mpii_ieee_sge *)(io + 1); 765 csge = nsge + sc->sc_chain_sge; 766 767 /* zero length transfer still requires an SGE */ 768 if (xs->datalen == 0) { 769 nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST; 770 return (0); 771 } 772 773 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL, 774 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 775 if (error) { 776 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 777 return (1); 778 } 779 780 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) { 781 if (nsge == csge) { 782 nsge++; 783 /* offset to the chain sge from the beginning */ 784 io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4; 785 csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT | 786 MPII_IEEE_SGE_ADDR_SYSTEM; 787 /* address of the next sge */ 788 csge->sg_addr = htole64(ccb->ccb_cmd_dva + 789 ((caddr_t)nsge - (caddr_t)io)); 790 csge->sg_len = htole32((dmap->dm_nsegs - i) * 791 sizeof(*sge)); 792 } 793 794 sge = nsge; 795 sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM; 796 sge->sg_len = htole32(dmap->dm_segs[i].ds_len); 797 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr); 798 } 799 800 /* terminate list */ 801 sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST; 802 803 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 804 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 805 BUS_DMASYNC_PREWRITE); 806 807 return (0); 808 } 809 810 int 811 mpii_load_xs(struct mpii_ccb *ccb) 812 { 813 struct mpii_softc *sc = ccb->ccb_sc; 814 struct scsi_xfer *xs = ccb->ccb_cookie; 815 struct mpii_msg_scsi_io *io = ccb->ccb_cmd; 816 struct mpii_sge *csge, *nsge, *sge; 817 bus_dmamap_t dmap = ccb->ccb_dmamap; 818 u_int32_t flags; 819 u_int16_t len; 820 int i, error; 821 822 /* Request frame structure is described in the mpii_iocfacts */ 823 nsge = (struct mpii_sge *)(io + 1); 824 csge = nsge + sc->sc_chain_sge; 825 826 /* zero length transfer still requires an SGE */ 827 if (xs->datalen == 0) { 828 nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE | 829 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL); 830 return (0); 831 } 832 833 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL, 834 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 835 if (error) { 836 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 837 return (1); 838 } 839 840 /* safe default starting flags */ 841 flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64; 842 if (xs->flags & SCSI_DATA_OUT) 843 flags |= MPII_SGE_FL_DIR_OUT; 844 845 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) { 846 if (nsge == csge) { 847 nsge++; 848 /* offset to the chain sge from the beginning */ 849 io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4; 850 /* length of the sgl segment we're pointing to */ 851 len = (dmap->dm_nsegs - i) * sizeof(*sge); 852 csge->sg_hdr = htole32(MPII_SGE_FL_TYPE_CHAIN | 853 MPII_SGE_FL_SIZE_64 | len); 854 /* address of the next sge */ 855 mpii_dvatosge(csge, ccb->ccb_cmd_dva + 856 ((caddr_t)nsge - (caddr_t)io)); 857 } 858 859 sge = nsge; 860 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len); 861 mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr); 862 } 863 864 /* terminate list */ 865 sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | 866 MPII_SGE_FL_EOL); 867 868 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 869 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 870 BUS_DMASYNC_PREWRITE); 871 872 return (0); 873 } 874 875 int 876 mpii_scsi_probe(struct scsi_link *link) 877 { 878 struct mpii_softc *sc = link->adapter_softc; 879 struct mpii_cfg_sas_dev_pg0 pg0; 880 struct mpii_ecfg_hdr ehdr; 881 struct mpii_device *dev; 882 uint32_t address; 883 int flags; 884 885 if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) && 886 (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) && 887 (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_TRI_MODE)) 888 return (ENXIO); 889 890 dev = sc->sc_devs[link->target]; 891 if (dev == NULL) 892 return (1); 893 894 flags = dev->flags; 895 if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED)) 896 return (1); 897 898 if (ISSET(flags, MPII_DF_VOLUME)) 899 return (0); 900 901 memset(&ehdr, 0, sizeof(ehdr)); 902 ehdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED; 903 ehdr.page_number = 0; 904 ehdr.page_version = 0; 905 ehdr.ext_page_type = MPII_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE; 906 ehdr.ext_page_length = htole16(sizeof(pg0) / 4); /* dwords */ 907 908 address = MPII_PGAD_SAS_DEVICE_FORM_HANDLE | (uint32_t)dev->dev_handle; 909 if (mpii_req_cfg_page(sc, address, MPII_PG_EXTENDED, 910 &ehdr, 1, &pg0, sizeof(pg0)) != 0) { 911 printf("%s: unable to fetch SAS device page 0 for target %u\n", 912 DEVNAME(sc), link->target); 913 914 return (0); /* the handle should still work */ 915 } 916 917 link->port_wwn = letoh64(pg0.sas_addr); 918 link->node_wwn = letoh64(pg0.device_name); 919 920 if (ISSET(lemtoh32(&pg0.device_info), 921 MPII_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) { 922 link->flags |= SDEV_ATAPI; 923 link->quirks |= SDEV_ONLYBIG; 924 } 925 926 return (0); 927 } 928 929 u_int32_t 930 mpii_read(struct mpii_softc *sc, bus_size_t r) 931 { 932 u_int32_t rv; 933 934 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 935 BUS_SPACE_BARRIER_READ); 936 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 937 938 DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv); 939 940 return (rv); 941 } 942 943 void 944 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v) 945 { 946 DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v); 947 948 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 949 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 950 BUS_SPACE_BARRIER_WRITE); 951 } 952 953 954 int 955 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask, 956 u_int32_t target) 957 { 958 int i; 959 960 DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r, 961 mask, target); 962 963 for (i = 0; i < 15000; i++) { 964 if ((mpii_read(sc, r) & mask) == target) 965 return (0); 966 delay(1000); 967 } 968 969 return (1); 970 } 971 972 int 973 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask, 974 u_int32_t target) 975 { 976 int i; 977 978 DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r, 979 mask, target); 980 981 for (i = 0; i < 15000; i++) { 982 if ((mpii_read(sc, r) & mask) != target) 983 return (0); 984 delay(1000); 985 } 986 987 return (1); 988 } 989 990 int 991 mpii_init(struct mpii_softc *sc) 992 { 993 u_int32_t db; 994 int i; 995 996 /* spin until the ioc leaves the reset state */ 997 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE, 998 MPII_DOORBELL_STATE_RESET) != 0) { 999 DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave " 1000 "reset state\n", DEVNAME(sc)); 1001 return (1); 1002 } 1003 1004 /* check current ownership */ 1005 db = mpii_read_db(sc); 1006 if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) { 1007 DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n", 1008 DEVNAME(sc)); 1009 return (0); 1010 } 1011 1012 for (i = 0; i < 5; i++) { 1013 switch (db & MPII_DOORBELL_STATE) { 1014 case MPII_DOORBELL_STATE_READY: 1015 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n", 1016 DEVNAME(sc)); 1017 return (0); 1018 1019 case MPII_DOORBELL_STATE_OPER: 1020 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n", 1021 DEVNAME(sc)); 1022 if (sc->sc_ioc_event_replay) 1023 mpii_reset_soft(sc); 1024 else 1025 mpii_reset_hard(sc); 1026 break; 1027 1028 case MPII_DOORBELL_STATE_FAULT: 1029 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being " 1030 "reset hard\n" , DEVNAME(sc)); 1031 mpii_reset_hard(sc); 1032 break; 1033 1034 case MPII_DOORBELL_STATE_RESET: 1035 DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come " 1036 "out of reset\n", DEVNAME(sc)); 1037 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE, 1038 MPII_DOORBELL_STATE_RESET) != 0) 1039 return (1); 1040 break; 1041 } 1042 db = mpii_read_db(sc); 1043 } 1044 1045 return (1); 1046 } 1047 1048 int 1049 mpii_reset_soft(struct mpii_softc *sc) 1050 { 1051 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc)); 1052 1053 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) { 1054 return (1); 1055 } 1056 1057 mpii_write_db(sc, 1058 MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET)); 1059 1060 /* XXX LSI waits 15 sec */ 1061 if (mpii_wait_db_ack(sc) != 0) 1062 return (1); 1063 1064 /* XXX LSI waits 15 sec */ 1065 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE, 1066 MPII_DOORBELL_STATE_READY) != 0) 1067 return (1); 1068 1069 /* XXX wait for Sys2IOCDB bit to clear in HIS?? */ 1070 1071 return (0); 1072 } 1073 1074 int 1075 mpii_reset_hard(struct mpii_softc *sc) 1076 { 1077 u_int16_t i; 1078 1079 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc)); 1080 1081 mpii_write_intr(sc, 0); 1082 1083 /* enable diagnostic register */ 1084 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH); 1085 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1); 1086 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2); 1087 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3); 1088 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4); 1089 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5); 1090 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6); 1091 1092 delay(100); 1093 1094 if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) { 1095 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable " 1096 "diagnostic read/write\n", DEVNAME(sc)); 1097 return(1); 1098 } 1099 1100 /* reset ioc */ 1101 mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER); 1102 1103 /* 240 milliseconds */ 1104 delay(240000); 1105 1106 1107 /* XXX this whole function should be more robust */ 1108 1109 /* XXX read the host diagnostic reg until reset adapter bit clears ? */ 1110 for (i = 0; i < 30000; i++) { 1111 if ((mpii_read(sc, MPII_HOSTDIAG) & 1112 MPII_HOSTDIAG_RESET_ADAPTER) == 0) 1113 break; 1114 delay(10000); 1115 } 1116 1117 /* disable diagnostic register */ 1118 mpii_write(sc, MPII_WRITESEQ, 0xff); 1119 1120 /* XXX what else? */ 1121 1122 DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc)); 1123 1124 return(0); 1125 } 1126 1127 int 1128 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords) 1129 { 1130 u_int32_t *query = buf; 1131 int i; 1132 1133 /* make sure the doorbell is not in use. */ 1134 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) 1135 return (1); 1136 1137 /* clear pending doorbell interrupts */ 1138 if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB) 1139 mpii_write_intr(sc, 0); 1140 1141 /* 1142 * first write the doorbell with the handshake function and the 1143 * dword count. 1144 */ 1145 mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) | 1146 MPII_DOORBELL_DWORDS(dwords)); 1147 1148 /* 1149 * the doorbell used bit will be set because a doorbell function has 1150 * started. wait for the interrupt and then ack it. 1151 */ 1152 if (mpii_wait_db_int(sc) != 0) 1153 return (1); 1154 mpii_write_intr(sc, 0); 1155 1156 /* poll for the acknowledgement. */ 1157 if (mpii_wait_db_ack(sc) != 0) 1158 return (1); 1159 1160 /* write the query through the doorbell. */ 1161 for (i = 0; i < dwords; i++) { 1162 mpii_write_db(sc, htole32(query[i])); 1163 if (mpii_wait_db_ack(sc) != 0) 1164 return (1); 1165 } 1166 1167 return (0); 1168 } 1169 1170 int 1171 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword) 1172 { 1173 u_int16_t *words = (u_int16_t *)dword; 1174 int i; 1175 1176 for (i = 0; i < 2; i++) { 1177 if (mpii_wait_db_int(sc) != 0) 1178 return (1); 1179 words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK); 1180 mpii_write_intr(sc, 0); 1181 } 1182 1183 return (0); 1184 } 1185 1186 int 1187 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords) 1188 { 1189 struct mpii_msg_reply *reply = buf; 1190 u_int32_t *dbuf = buf, dummy; 1191 int i; 1192 1193 /* get the first dword so we can read the length out of the header. */ 1194 if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0) 1195 return (1); 1196 1197 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n", 1198 DEVNAME(sc), dwords, reply->msg_length); 1199 1200 /* 1201 * the total length, in dwords, is in the message length field of the 1202 * reply header. 1203 */ 1204 for (i = 1; i < MIN(dwords, reply->msg_length); i++) { 1205 if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0) 1206 return (1); 1207 } 1208 1209 /* if there's extra stuff to come off the ioc, discard it */ 1210 while (i++ < reply->msg_length) { 1211 if (mpii_handshake_recv_dword(sc, &dummy) != 0) 1212 return (1); 1213 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: " 1214 "0x%08x\n", DEVNAME(sc), dummy); 1215 } 1216 1217 /* wait for the doorbell used bit to be reset and clear the intr */ 1218 if (mpii_wait_db_int(sc) != 0) 1219 return (1); 1220 1221 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0) 1222 return (1); 1223 1224 mpii_write_intr(sc, 0); 1225 1226 return (0); 1227 } 1228 1229 void 1230 mpii_empty_done(struct mpii_ccb *ccb) 1231 { 1232 /* nothing to do */ 1233 } 1234 1235 int 1236 mpii_iocfacts(struct mpii_softc *sc) 1237 { 1238 struct mpii_msg_iocfacts_request ifq; 1239 struct mpii_msg_iocfacts_reply ifp; 1240 int irs; 1241 int sge_size; 1242 u_int qdepth; 1243 1244 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc)); 1245 1246 memset(&ifq, 0, sizeof(ifq)); 1247 memset(&ifp, 0, sizeof(ifp)); 1248 1249 ifq.function = MPII_FUNCTION_IOC_FACTS; 1250 1251 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 1252 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n", 1253 DEVNAME(sc)); 1254 return (1); 1255 } 1256 1257 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 1258 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n", 1259 DEVNAME(sc)); 1260 return (1); 1261 } 1262 1263 sc->sc_ioc_number = ifp.ioc_number; 1264 sc->sc_vf_id = ifp.vf_id; 1265 1266 sc->sc_max_volumes = ifp.max_volumes; 1267 sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets); 1268 1269 if (ISSET(lemtoh32(&ifp.ioc_capabilities), 1270 MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 1271 SET(sc->sc_flags, MPII_F_RAID); 1272 if (ISSET(lemtoh32(&ifp.ioc_capabilities), 1273 MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY)) 1274 sc->sc_ioc_event_replay = 1; 1275 1276 sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit), 1277 MPII_REQUEST_CREDIT); 1278 1279 /* SAS3 and 3.5 controllers have different sgl layouts */ 1280 if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5) 1281 || (ifp.msg_version_min == 6))) 1282 SET(sc->sc_flags, MPII_F_SAS3); 1283 1284 /* 1285 * The host driver must ensure that there is at least one 1286 * unused entry in the Reply Free Queue. One way to ensure 1287 * that this requirement is met is to never allocate a number 1288 * of reply frames that is a multiple of 16. 1289 */ 1290 sc->sc_num_reply_frames = sc->sc_max_cmds + 32; 1291 if (!(sc->sc_num_reply_frames % 16)) 1292 sc->sc_num_reply_frames--; 1293 1294 /* must be multiple of 16 */ 1295 sc->sc_reply_post_qdepth = sc->sc_max_cmds + 1296 sc->sc_num_reply_frames; 1297 sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16); 1298 1299 qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth); 1300 if (sc->sc_reply_post_qdepth > qdepth) { 1301 sc->sc_reply_post_qdepth = qdepth; 1302 if (sc->sc_reply_post_qdepth < 16) { 1303 printf("%s: RDPQ is too shallow\n", DEVNAME(sc)); 1304 return (1); 1305 } 1306 sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4; 1307 sc->sc_num_reply_frames = sc->sc_max_cmds + 4; 1308 } 1309 1310 sc->sc_reply_free_qdepth = sc->sc_num_reply_frames + 1311 16 - (sc->sc_num_reply_frames % 16); 1312 1313 /* 1314 * Our request frame for an I/O operation looks like this: 1315 * 1316 * +-------------------+ -. 1317 * | mpii_msg_scsi_io | | 1318 * +-------------------| | 1319 * | mpii_sge | | 1320 * + - - - - - - - - - + | 1321 * | ... | > ioc_request_frame_size 1322 * + - - - - - - - - - + | 1323 * | mpii_sge (tail) | | 1324 * + - - - - - - - - - + | 1325 * | mpii_sge (csge) | | --. 1326 * + - - - - - - - - - + -' | chain sge points to the next sge 1327 * | mpii_sge |<-----' 1328 * + - - - - - - - - - + 1329 * | ... | 1330 * + - - - - - - - - - + 1331 * | mpii_sge (tail) | 1332 * +-------------------+ 1333 * | | 1334 * ~~~~~~~~~~~~~~~~~~~~~ 1335 * | | 1336 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data) 1337 * | scsi_sense_data | 1338 * +-------------------+ 1339 */ 1340 1341 /* both sizes are in 32-bit words */ 1342 sc->sc_reply_size = ifp.reply_frame_size * 4; 1343 irs = lemtoh16(&ifp.ioc_request_frame_size) * 4; 1344 sc->sc_request_size = MPII_REQUEST_SIZE; 1345 /* make sure we have enough space for scsi sense data */ 1346 if (irs > sc->sc_request_size) { 1347 sc->sc_request_size = irs + sizeof(struct scsi_sense_data); 1348 sc->sc_request_size += 16 - (sc->sc_request_size % 16); 1349 } 1350 1351 if (ISSET(sc->sc_flags, MPII_F_SAS3)) { 1352 sge_size = sizeof(struct mpii_ieee_sge); 1353 } else { 1354 sge_size = sizeof(struct mpii_sge); 1355 } 1356 1357 /* offset to the chain sge */ 1358 sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) / 1359 sge_size - 1; 1360 1361 /* 1362 * A number of simple scatter-gather elements we can fit into the 1363 * request buffer after the I/O command minus the chain element. 1364 */ 1365 sc->sc_max_sgl = (sc->sc_request_size - 1366 sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) / 1367 sge_size - 1; 1368 1369 return (0); 1370 } 1371 1372 int 1373 mpii_iocinit(struct mpii_softc *sc) 1374 { 1375 struct mpii_msg_iocinit_request iiq; 1376 struct mpii_msg_iocinit_reply iip; 1377 1378 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc)); 1379 1380 memset(&iiq, 0, sizeof(iiq)); 1381 memset(&iip, 0, sizeof(iip)); 1382 1383 iiq.function = MPII_FUNCTION_IOC_INIT; 1384 iiq.whoinit = MPII_WHOINIT_HOST_DRIVER; 1385 1386 /* XXX JPG do something about vf_id */ 1387 iiq.vf_id = 0; 1388 1389 iiq.msg_version_maj = 0x02; 1390 iiq.msg_version_min = 0x00; 1391 1392 /* XXX JPG ensure compliance with some level and hard-code? */ 1393 iiq.hdr_version_unit = 0x00; 1394 iiq.hdr_version_dev = 0x00; 1395 1396 htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4); 1397 1398 htolem16(&iiq.reply_descriptor_post_queue_depth, 1399 sc->sc_reply_post_qdepth); 1400 1401 htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth); 1402 1403 htolem32(&iiq.sense_buffer_address_high, 1404 MPII_DMA_DVA(sc->sc_requests) >> 32); 1405 1406 htolem32(&iiq.system_reply_address_high, 1407 MPII_DMA_DVA(sc->sc_replies) >> 32); 1408 1409 htolem32(&iiq.system_request_frame_base_address_lo, 1410 MPII_DMA_DVA(sc->sc_requests)); 1411 htolem32(&iiq.system_request_frame_base_address_hi, 1412 MPII_DMA_DVA(sc->sc_requests) >> 32); 1413 1414 htolem32(&iiq.reply_descriptor_post_queue_address_lo, 1415 MPII_DMA_DVA(sc->sc_reply_postq)); 1416 htolem32(&iiq.reply_descriptor_post_queue_address_hi, 1417 MPII_DMA_DVA(sc->sc_reply_postq) >> 32); 1418 1419 htolem32(&iiq.reply_free_queue_address_lo, 1420 MPII_DMA_DVA(sc->sc_reply_freeq)); 1421 htolem32(&iiq.reply_free_queue_address_hi, 1422 MPII_DMA_DVA(sc->sc_reply_freeq) >> 32); 1423 1424 if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) { 1425 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n", 1426 DEVNAME(sc)); 1427 return (1); 1428 } 1429 1430 if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) { 1431 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n", 1432 DEVNAME(sc)); 1433 return (1); 1434 } 1435 1436 DNPRINTF(MPII_D_MISC, "%s: function: 0x%02x msg_length: %d " 1437 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function, 1438 iip.msg_length, iip.whoinit); 1439 DNPRINTF(MPII_D_MISC, "%s: msg_flags: 0x%02x\n", DEVNAME(sc), 1440 iip.msg_flags); 1441 DNPRINTF(MPII_D_MISC, "%s: vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc), 1442 iip.vf_id, iip.vp_id); 1443 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 1444 letoh16(iip.ioc_status)); 1445 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1446 letoh32(iip.ioc_loginfo)); 1447 1448 if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS || 1449 lemtoh32(&iip.ioc_loginfo)) 1450 return (1); 1451 1452 return (0); 1453 } 1454 1455 void 1456 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb) 1457 { 1458 u_int32_t *rfp; 1459 u_int idx; 1460 1461 if (rcb == NULL) 1462 return; 1463 1464 idx = sc->sc_reply_free_host_index; 1465 1466 rfp = MPII_DMA_KVA(sc->sc_reply_freeq); 1467 htolem32(&rfp[idx], rcb->rcb_reply_dva); 1468 1469 if (++idx >= sc->sc_reply_free_qdepth) 1470 idx = 0; 1471 1472 mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx); 1473 } 1474 1475 int 1476 mpii_portfacts(struct mpii_softc *sc) 1477 { 1478 struct mpii_msg_portfacts_request *pfq; 1479 struct mpii_msg_portfacts_reply *pfp; 1480 struct mpii_ccb *ccb; 1481 int rv = 1; 1482 1483 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc)); 1484 1485 ccb = scsi_io_get(&sc->sc_iopool, 0); 1486 if (ccb == NULL) { 1487 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n", 1488 DEVNAME(sc)); 1489 return (rv); 1490 } 1491 1492 ccb->ccb_done = mpii_empty_done; 1493 pfq = ccb->ccb_cmd; 1494 1495 memset(pfq, 0, sizeof(*pfq)); 1496 1497 pfq->function = MPII_FUNCTION_PORT_FACTS; 1498 pfq->chain_offset = 0; 1499 pfq->msg_flags = 0; 1500 pfq->port_number = 0; 1501 pfq->vp_id = 0; 1502 pfq->vf_id = 0; 1503 1504 if (mpii_poll(sc, ccb) != 0) { 1505 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n", 1506 DEVNAME(sc)); 1507 goto err; 1508 } 1509 1510 if (ccb->ccb_rcb == NULL) { 1511 DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n", 1512 DEVNAME(sc)); 1513 goto err; 1514 } 1515 1516 pfp = ccb->ccb_rcb->rcb_reply; 1517 sc->sc_porttype = pfp->port_type; 1518 1519 mpii_push_reply(sc, ccb->ccb_rcb); 1520 rv = 0; 1521 err: 1522 scsi_io_put(&sc->sc_iopool, ccb); 1523 1524 return (rv); 1525 } 1526 1527 void 1528 mpii_eventack(void *cookie, void *io) 1529 { 1530 struct mpii_softc *sc = cookie; 1531 struct mpii_ccb *ccb = io; 1532 struct mpii_rcb *rcb, *next; 1533 struct mpii_msg_event_reply *enp; 1534 struct mpii_msg_eventack_request *eaq; 1535 1536 mtx_enter(&sc->sc_evt_ack_mtx); 1537 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue); 1538 if (rcb != NULL) { 1539 next = SIMPLEQ_NEXT(rcb, rcb_link); 1540 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link); 1541 } 1542 mtx_leave(&sc->sc_evt_ack_mtx); 1543 1544 if (rcb == NULL) { 1545 scsi_io_put(&sc->sc_iopool, ccb); 1546 return; 1547 } 1548 1549 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply; 1550 1551 ccb->ccb_done = mpii_eventack_done; 1552 eaq = ccb->ccb_cmd; 1553 1554 eaq->function = MPII_FUNCTION_EVENT_ACK; 1555 1556 eaq->event = enp->event; 1557 eaq->event_context = enp->event_context; 1558 1559 mpii_push_reply(sc, rcb); 1560 1561 mpii_start(sc, ccb); 1562 1563 if (next != NULL) 1564 scsi_ioh_add(&sc->sc_evt_ack_handler); 1565 } 1566 1567 void 1568 mpii_eventack_done(struct mpii_ccb *ccb) 1569 { 1570 struct mpii_softc *sc = ccb->ccb_sc; 1571 1572 DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc)); 1573 1574 mpii_push_reply(sc, ccb->ccb_rcb); 1575 scsi_io_put(&sc->sc_iopool, ccb); 1576 } 1577 1578 int 1579 mpii_portenable(struct mpii_softc *sc) 1580 { 1581 struct mpii_msg_portenable_request *peq; 1582 struct mpii_ccb *ccb; 1583 1584 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc)); 1585 1586 ccb = scsi_io_get(&sc->sc_iopool, 0); 1587 if (ccb == NULL) { 1588 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n", 1589 DEVNAME(sc)); 1590 return (1); 1591 } 1592 1593 ccb->ccb_done = mpii_empty_done; 1594 peq = ccb->ccb_cmd; 1595 1596 peq->function = MPII_FUNCTION_PORT_ENABLE; 1597 peq->vf_id = sc->sc_vf_id; 1598 1599 if (mpii_poll(sc, ccb) != 0) { 1600 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n", 1601 DEVNAME(sc)); 1602 return (1); 1603 } 1604 1605 if (ccb->ccb_rcb == NULL) { 1606 DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n", 1607 DEVNAME(sc)); 1608 return (1); 1609 } 1610 1611 mpii_push_reply(sc, ccb->ccb_rcb); 1612 scsi_io_put(&sc->sc_iopool, ccb); 1613 1614 return (0); 1615 } 1616 1617 int 1618 mpii_cfg_coalescing(struct mpii_softc *sc) 1619 { 1620 struct mpii_cfg_hdr hdr; 1621 struct mpii_cfg_ioc_pg1 ipg; 1622 1623 hdr.page_version = 0; 1624 hdr.page_length = sizeof(ipg) / 4; 1625 hdr.page_number = 1; 1626 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC; 1627 memset(&ipg, 0, sizeof(ipg)); 1628 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg, 1629 sizeof(ipg)) != 0) { 1630 DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n" 1631 "page 1\n", DEVNAME(sc)); 1632 return (1); 1633 } 1634 1635 if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING)) 1636 return (0); 1637 1638 /* Disable coalescing */ 1639 CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING)); 1640 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg, 1641 sizeof(ipg)) != 0) { 1642 DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n", 1643 DEVNAME(sc)); 1644 return (1); 1645 } 1646 1647 return (0); 1648 } 1649 1650 #define MPII_EVENT_MASKALL(enq) do { \ 1651 enq->event_masks[0] = 0xffffffff; \ 1652 enq->event_masks[1] = 0xffffffff; \ 1653 enq->event_masks[2] = 0xffffffff; \ 1654 enq->event_masks[3] = 0xffffffff; \ 1655 } while (0) 1656 1657 #define MPII_EVENT_UNMASK(enq, evt) do { \ 1658 enq->event_masks[evt / 32] &= \ 1659 htole32(~(1 << (evt % 32))); \ 1660 } while (0) 1661 1662 int 1663 mpii_eventnotify(struct mpii_softc *sc) 1664 { 1665 struct mpii_msg_event_request *enq; 1666 struct mpii_ccb *ccb; 1667 1668 ccb = scsi_io_get(&sc->sc_iopool, 0); 1669 if (ccb == NULL) { 1670 DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n", 1671 DEVNAME(sc)); 1672 return (1); 1673 } 1674 1675 SIMPLEQ_INIT(&sc->sc_evt_sas_queue); 1676 mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO); 1677 task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc); 1678 1679 SIMPLEQ_INIT(&sc->sc_evt_ack_queue); 1680 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO); 1681 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool, 1682 mpii_eventack, sc); 1683 1684 ccb->ccb_done = mpii_eventnotify_done; 1685 enq = ccb->ccb_cmd; 1686 1687 enq->function = MPII_FUNCTION_EVENT_NOTIFICATION; 1688 1689 /* 1690 * Enable reporting of the following events: 1691 * 1692 * MPII_EVENT_SAS_DISCOVERY 1693 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST 1694 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE 1695 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE 1696 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST 1697 * MPII_EVENT_IR_VOLUME 1698 * MPII_EVENT_IR_PHYSICAL_DISK 1699 * MPII_EVENT_IR_OPERATION_STATUS 1700 */ 1701 1702 MPII_EVENT_MASKALL(enq); 1703 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY); 1704 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 1705 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE); 1706 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 1707 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST); 1708 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME); 1709 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK); 1710 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS); 1711 1712 mpii_start(sc, ccb); 1713 1714 return (0); 1715 } 1716 1717 void 1718 mpii_eventnotify_done(struct mpii_ccb *ccb) 1719 { 1720 struct mpii_softc *sc = ccb->ccb_sc; 1721 struct mpii_rcb *rcb = ccb->ccb_rcb; 1722 1723 DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc)); 1724 1725 scsi_io_put(&sc->sc_iopool, ccb); 1726 mpii_event_process(sc, rcb); 1727 } 1728 1729 void 1730 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp) 1731 { 1732 struct mpii_evt_ir_cfg_change_list *ccl; 1733 struct mpii_evt_ir_cfg_element *ce; 1734 struct mpii_device *dev; 1735 u_int16_t type; 1736 int i; 1737 1738 ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1); 1739 if (ccl->num_elements == 0) 1740 return; 1741 1742 if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) { 1743 /* bail on foreign configurations */ 1744 return; 1745 } 1746 1747 ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1); 1748 1749 for (i = 0; i < ccl->num_elements; i++, ce++) { 1750 type = (lemtoh16(&ce->element_flags) & 1751 MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK); 1752 1753 switch (type) { 1754 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME: 1755 switch (ce->reason_code) { 1756 case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED: 1757 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED: 1758 if (mpii_find_dev(sc, 1759 lemtoh16(&ce->vol_dev_handle))) { 1760 printf("%s: device %#x is already " 1761 "configured\n", DEVNAME(sc), 1762 lemtoh16(&ce->vol_dev_handle)); 1763 break; 1764 } 1765 dev = malloc(sizeof(*dev), M_DEVBUF, 1766 M_NOWAIT | M_ZERO); 1767 if (!dev) { 1768 printf("%s: failed to allocate a " 1769 "device structure\n", DEVNAME(sc)); 1770 break; 1771 } 1772 SET(dev->flags, MPII_DF_VOLUME); 1773 dev->slot = sc->sc_vd_id_low; 1774 dev->dev_handle = lemtoh16(&ce->vol_dev_handle); 1775 if (mpii_insert_dev(sc, dev)) { 1776 free(dev, M_DEVBUF, sizeof *dev); 1777 break; 1778 } 1779 sc->sc_vd_count++; 1780 break; 1781 case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED: 1782 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED: 1783 if (!(dev = mpii_find_dev(sc, 1784 lemtoh16(&ce->vol_dev_handle)))) 1785 break; 1786 mpii_remove_dev(sc, dev); 1787 sc->sc_vd_count--; 1788 break; 1789 } 1790 break; 1791 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK: 1792 if (ce->reason_code == 1793 MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED || 1794 ce->reason_code == 1795 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) { 1796 /* there should be an underlying sas drive */ 1797 if (!(dev = mpii_find_dev(sc, 1798 lemtoh16(&ce->phys_disk_dev_handle)))) 1799 break; 1800 /* promoted from a hot spare? */ 1801 CLR(dev->flags, MPII_DF_HOT_SPARE); 1802 SET(dev->flags, MPII_DF_VOLUME_DISK | 1803 MPII_DF_HIDDEN); 1804 } 1805 break; 1806 case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE: 1807 if (ce->reason_code == 1808 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) { 1809 /* there should be an underlying sas drive */ 1810 if (!(dev = mpii_find_dev(sc, 1811 lemtoh16(&ce->phys_disk_dev_handle)))) 1812 break; 1813 SET(dev->flags, MPII_DF_HOT_SPARE | 1814 MPII_DF_HIDDEN); 1815 } 1816 break; 1817 } 1818 } 1819 } 1820 1821 void 1822 mpii_event_sas(void *xsc) 1823 { 1824 struct mpii_softc *sc = xsc; 1825 struct mpii_rcb *rcb, *next; 1826 struct mpii_msg_event_reply *enp; 1827 struct mpii_evt_sas_tcl *tcl; 1828 struct mpii_evt_phy_entry *pe; 1829 struct mpii_device *dev; 1830 int i; 1831 u_int16_t handle; 1832 1833 mtx_enter(&sc->sc_evt_sas_mtx); 1834 rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue); 1835 if (rcb != NULL) { 1836 next = SIMPLEQ_NEXT(rcb, rcb_link); 1837 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link); 1838 } 1839 mtx_leave(&sc->sc_evt_sas_mtx); 1840 1841 if (rcb == NULL) 1842 return; 1843 if (next != NULL) 1844 task_add(systq, &sc->sc_evt_sas_task); 1845 1846 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply; 1847 switch (lemtoh16(&enp->event)) { 1848 case MPII_EVENT_SAS_DISCOVERY: 1849 mpii_event_discovery(sc, enp); 1850 goto done; 1851 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1852 /* handle below */ 1853 break; 1854 default: 1855 panic("%s: unexpected event %#x in sas event queue", 1856 DEVNAME(sc), lemtoh16(&enp->event)); 1857 /* NOTREACHED */ 1858 } 1859 1860 tcl = (struct mpii_evt_sas_tcl *)(enp + 1); 1861 pe = (struct mpii_evt_phy_entry *)(tcl + 1); 1862 1863 for (i = 0; i < tcl->num_entries; i++, pe++) { 1864 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) { 1865 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED: 1866 handle = lemtoh16(&pe->dev_handle); 1867 if (mpii_find_dev(sc, handle)) { 1868 printf("%s: device %#x is already " 1869 "configured\n", DEVNAME(sc), handle); 1870 break; 1871 } 1872 1873 dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO); 1874 dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i; 1875 dev->dev_handle = handle; 1876 dev->phy_num = tcl->start_phy_num + i; 1877 if (tcl->enclosure_handle) 1878 dev->physical_port = tcl->physical_port; 1879 dev->enclosure = lemtoh16(&tcl->enclosure_handle); 1880 dev->expander = lemtoh16(&tcl->expander_handle); 1881 1882 if (mpii_insert_dev(sc, dev)) { 1883 free(dev, M_DEVBUF, sizeof *dev); 1884 break; 1885 } 1886 1887 if (sc->sc_scsibus != NULL) 1888 scsi_probe_target(sc->sc_scsibus, dev->slot); 1889 break; 1890 1891 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING: 1892 dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle)); 1893 if (dev == NULL) 1894 break; 1895 1896 mpii_remove_dev(sc, dev); 1897 mpii_sas_remove_device(sc, dev->dev_handle); 1898 if (sc->sc_scsibus != NULL && 1899 !ISSET(dev->flags, MPII_DF_HIDDEN)) { 1900 scsi_activate(sc->sc_scsibus, dev->slot, -1, 1901 DVACT_DEACTIVATE); 1902 scsi_detach_target(sc->sc_scsibus, dev->slot, 1903 DETACH_FORCE); 1904 } 1905 1906 free(dev, M_DEVBUF, sizeof *dev); 1907 break; 1908 } 1909 } 1910 1911 done: 1912 mpii_event_done(sc, rcb); 1913 } 1914 1915 void 1916 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp) 1917 { 1918 struct mpii_evt_sas_discovery *esd = 1919 (struct mpii_evt_sas_discovery *)(enp + 1); 1920 1921 if (esd->reason_code == MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED) { 1922 if (esd->discovery_status != 0) { 1923 printf("%s: sas discovery completed with status %#x\n", 1924 DEVNAME(sc), esd->discovery_status); 1925 } 1926 1927 if (ISSET(sc->sc_flags, MPII_F_CONFIG_PENDING)) { 1928 CLR(sc->sc_flags, MPII_F_CONFIG_PENDING); 1929 config_pending_decr(); 1930 } 1931 } 1932 } 1933 1934 void 1935 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb) 1936 { 1937 struct mpii_msg_event_reply *enp; 1938 1939 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply; 1940 1941 DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc), 1942 letoh16(enp->event)); 1943 1944 switch (lemtoh16(&enp->event)) { 1945 case MPII_EVENT_EVENT_CHANGE: 1946 /* should be properly ignored */ 1947 break; 1948 case MPII_EVENT_SAS_DISCOVERY: 1949 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1950 mtx_enter(&sc->sc_evt_sas_mtx); 1951 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link); 1952 mtx_leave(&sc->sc_evt_sas_mtx); 1953 task_add(systq, &sc->sc_evt_sas_task); 1954 return; 1955 case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE: 1956 break; 1957 case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 1958 break; 1959 case MPII_EVENT_IR_VOLUME: { 1960 struct mpii_evt_ir_volume *evd = 1961 (struct mpii_evt_ir_volume *)(enp + 1); 1962 struct mpii_device *dev; 1963 #if NBIO > 0 1964 const char *vol_states[] = { 1965 BIOC_SVINVALID_S, 1966 BIOC_SVOFFLINE_S, 1967 BIOC_SVBUILDING_S, 1968 BIOC_SVONLINE_S, 1969 BIOC_SVDEGRADED_S, 1970 BIOC_SVONLINE_S, 1971 }; 1972 #endif 1973 1974 if (cold) 1975 break; 1976 KERNEL_LOCK(); 1977 dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle)); 1978 KERNEL_UNLOCK(); 1979 if (dev == NULL) 1980 break; 1981 #if NBIO > 0 1982 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED) 1983 printf("%s: volume %d state changed from %s to %s\n", 1984 DEVNAME(sc), dev->slot - sc->sc_vd_id_low, 1985 vol_states[evd->prev_value], 1986 vol_states[evd->new_value]); 1987 #endif 1988 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED && 1989 ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) && 1990 !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) 1991 printf("%s: started resync on a volume %d\n", 1992 DEVNAME(sc), dev->slot - sc->sc_vd_id_low); 1993 } 1994 break; 1995 case MPII_EVENT_IR_PHYSICAL_DISK: 1996 break; 1997 case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST: 1998 mpii_event_raid(sc, enp); 1999 break; 2000 case MPII_EVENT_IR_OPERATION_STATUS: { 2001 struct mpii_evt_ir_status *evs = 2002 (struct mpii_evt_ir_status *)(enp + 1); 2003 struct mpii_device *dev; 2004 2005 KERNEL_LOCK(); 2006 dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle)); 2007 KERNEL_UNLOCK(); 2008 if (dev != NULL && 2009 evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC) 2010 dev->percent = evs->percent; 2011 break; 2012 } 2013 default: 2014 DNPRINTF(MPII_D_EVT, "%s: unhandled event 0x%02x\n", 2015 DEVNAME(sc), lemtoh16(&enp->event)); 2016 } 2017 2018 mpii_event_done(sc, rcb); 2019 } 2020 2021 void 2022 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb) 2023 { 2024 struct mpii_msg_event_reply *enp = rcb->rcb_reply; 2025 2026 if (enp->ack_required) { 2027 mtx_enter(&sc->sc_evt_ack_mtx); 2028 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link); 2029 mtx_leave(&sc->sc_evt_ack_mtx); 2030 scsi_ioh_add(&sc->sc_evt_ack_handler); 2031 } else 2032 mpii_push_reply(sc, rcb); 2033 } 2034 2035 void 2036 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle) 2037 { 2038 struct mpii_msg_scsi_task_request *stq; 2039 struct mpii_msg_sas_oper_request *soq; 2040 struct mpii_ccb *ccb; 2041 2042 ccb = scsi_io_get(&sc->sc_iopool, 0); 2043 if (ccb == NULL) 2044 return; 2045 2046 stq = ccb->ccb_cmd; 2047 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT; 2048 stq->task_type = MPII_SCSI_TASK_TARGET_RESET; 2049 htolem16(&stq->dev_handle, handle); 2050 2051 ccb->ccb_done = mpii_empty_done; 2052 mpii_wait(sc, ccb); 2053 2054 if (ccb->ccb_rcb != NULL) 2055 mpii_push_reply(sc, ccb->ccb_rcb); 2056 2057 /* reuse a ccb */ 2058 ccb->ccb_state = MPII_CCB_READY; 2059 ccb->ccb_rcb = NULL; 2060 2061 soq = ccb->ccb_cmd; 2062 memset(soq, 0, sizeof(*soq)); 2063 soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL; 2064 soq->operation = MPII_SAS_OP_REMOVE_DEVICE; 2065 htolem16(&soq->dev_handle, handle); 2066 2067 ccb->ccb_done = mpii_empty_done; 2068 mpii_wait(sc, ccb); 2069 if (ccb->ccb_rcb != NULL) 2070 mpii_push_reply(sc, ccb->ccb_rcb); 2071 2072 scsi_io_put(&sc->sc_iopool, ccb); 2073 } 2074 2075 int 2076 mpii_board_info(struct mpii_softc *sc) 2077 { 2078 struct mpii_msg_iocfacts_request ifq; 2079 struct mpii_msg_iocfacts_reply ifp; 2080 struct mpii_cfg_manufacturing_pg0 mpg; 2081 struct mpii_cfg_hdr hdr; 2082 2083 memset(&ifq, 0, sizeof(ifq)); 2084 memset(&ifp, 0, sizeof(ifp)); 2085 2086 ifq.function = MPII_FUNCTION_IOC_FACTS; 2087 2088 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 2089 DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n", 2090 DEVNAME(sc)); 2091 return (1); 2092 } 2093 2094 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 2095 DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n", 2096 DEVNAME(sc)); 2097 return (1); 2098 } 2099 2100 hdr.page_version = 0; 2101 hdr.page_length = sizeof(mpg) / 4; 2102 hdr.page_number = 0; 2103 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING; 2104 memset(&mpg, 0, sizeof(mpg)); 2105 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg, 2106 sizeof(mpg)) != 0) { 2107 printf("%s: unable to fetch manufacturing page 0\n", 2108 DEVNAME(sc)); 2109 return (EINVAL); 2110 } 2111 2112 printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc), 2113 mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min, 2114 ifp.fw_version_unit, ifp.fw_version_dev, 2115 ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "", 2116 ifp.msg_version_maj, ifp.msg_version_min); 2117 2118 return (0); 2119 } 2120 2121 int 2122 mpii_target_map(struct mpii_softc *sc) 2123 { 2124 struct mpii_cfg_hdr hdr; 2125 struct mpii_cfg_ioc_pg8 ipg; 2126 int flags, pad = 0; 2127 2128 hdr.page_version = 0; 2129 hdr.page_length = sizeof(ipg) / 4; 2130 hdr.page_number = 8; 2131 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC; 2132 memset(&ipg, 0, sizeof(ipg)); 2133 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg, 2134 sizeof(ipg)) != 0) { 2135 printf("%s: unable to fetch ioc page 8\n", 2136 DEVNAME(sc)); 2137 return (EINVAL); 2138 } 2139 2140 if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0) 2141 pad = 1; 2142 2143 flags = lemtoh16(&ipg.ir_volume_mapping_flags) & 2144 MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK; 2145 if (ISSET(sc->sc_flags, MPII_F_RAID)) { 2146 if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) { 2147 sc->sc_vd_id_low += pad; 2148 pad = sc->sc_max_volumes; /* for sc_pd_id_start */ 2149 } else 2150 sc->sc_vd_id_low = sc->sc_max_devices - 2151 sc->sc_max_volumes; 2152 } 2153 2154 sc->sc_pd_id_start += pad; 2155 2156 return (0); 2157 } 2158 2159 int 2160 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number, 2161 u_int32_t address, int flags, void *p) 2162 { 2163 struct mpii_msg_config_request *cq; 2164 struct mpii_msg_config_reply *cp; 2165 struct mpii_ccb *ccb; 2166 struct mpii_cfg_hdr *hdr = p; 2167 struct mpii_ecfg_hdr *ehdr = p; 2168 int etype = 0; 2169 int rv = 0; 2170 2171 DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x " 2172 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number, 2173 address, flags, MPII_PG_FMT); 2174 2175 ccb = scsi_io_get(&sc->sc_iopool, 2176 ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0); 2177 if (ccb == NULL) { 2178 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n", 2179 DEVNAME(sc)); 2180 return (1); 2181 } 2182 2183 if (ISSET(flags, MPII_PG_EXTENDED)) { 2184 etype = type; 2185 type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED; 2186 } 2187 2188 cq = ccb->ccb_cmd; 2189 2190 cq->function = MPII_FUNCTION_CONFIG; 2191 2192 cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER; 2193 2194 cq->config_header.page_number = number; 2195 cq->config_header.page_type = type; 2196 cq->ext_page_type = etype; 2197 htolem32(&cq->page_address, address); 2198 htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE | 2199 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL); 2200 2201 ccb->ccb_done = mpii_empty_done; 2202 if (ISSET(flags, MPII_PG_POLL)) { 2203 if (mpii_poll(sc, ccb) != 0) { 2204 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n", 2205 DEVNAME(sc)); 2206 return (1); 2207 } 2208 } else 2209 mpii_wait(sc, ccb); 2210 2211 if (ccb->ccb_rcb == NULL) { 2212 scsi_io_put(&sc->sc_iopool, ccb); 2213 return (1); 2214 } 2215 cp = ccb->ccb_rcb->rcb_reply; 2216 2217 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x sgl_flags: 0x%02x " 2218 "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action, 2219 cp->sgl_flags, cp->msg_length, cp->function); 2220 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2221 "msg_flags: 0x%02x\n", DEVNAME(sc), 2222 letoh16(cp->ext_page_length), cp->ext_page_type, 2223 cp->msg_flags); 2224 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc), 2225 cp->vp_id, cp->vf_id); 2226 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2227 letoh16(cp->ioc_status)); 2228 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2229 letoh32(cp->ioc_loginfo)); 2230 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2231 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2232 cp->config_header.page_version, 2233 cp->config_header.page_length, 2234 cp->config_header.page_number, 2235 cp->config_header.page_type); 2236 2237 if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS) 2238 rv = 1; 2239 else if (ISSET(flags, MPII_PG_EXTENDED)) { 2240 memset(ehdr, 0, sizeof(*ehdr)); 2241 ehdr->page_version = cp->config_header.page_version; 2242 ehdr->page_number = cp->config_header.page_number; 2243 ehdr->page_type = cp->config_header.page_type; 2244 ehdr->ext_page_length = cp->ext_page_length; 2245 ehdr->ext_page_type = cp->ext_page_type; 2246 } else 2247 *hdr = cp->config_header; 2248 2249 mpii_push_reply(sc, ccb->ccb_rcb); 2250 scsi_io_put(&sc->sc_iopool, ccb); 2251 2252 return (rv); 2253 } 2254 2255 int 2256 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags, 2257 void *p, int read, void *page, size_t len) 2258 { 2259 struct mpii_msg_config_request *cq; 2260 struct mpii_msg_config_reply *cp; 2261 struct mpii_ccb *ccb; 2262 struct mpii_cfg_hdr *hdr = p; 2263 struct mpii_ecfg_hdr *ehdr = p; 2264 caddr_t kva; 2265 int page_length; 2266 int rv = 0; 2267 2268 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d " 2269 "type: %x\n", DEVNAME(sc), address, read, hdr->page_type); 2270 2271 page_length = ISSET(flags, MPII_PG_EXTENDED) ? 2272 lemtoh16(&ehdr->ext_page_length) : hdr->page_length; 2273 2274 if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4) 2275 return (1); 2276 2277 ccb = scsi_io_get(&sc->sc_iopool, 2278 ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0); 2279 if (ccb == NULL) { 2280 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n", 2281 DEVNAME(sc)); 2282 return (1); 2283 } 2284 2285 cq = ccb->ccb_cmd; 2286 2287 cq->function = MPII_FUNCTION_CONFIG; 2288 2289 cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT : 2290 MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT); 2291 2292 if (ISSET(flags, MPII_PG_EXTENDED)) { 2293 cq->config_header.page_version = ehdr->page_version; 2294 cq->config_header.page_number = ehdr->page_number; 2295 cq->config_header.page_type = ehdr->page_type; 2296 cq->ext_page_len = ehdr->ext_page_length; 2297 cq->ext_page_type = ehdr->ext_page_type; 2298 } else 2299 cq->config_header = *hdr; 2300 cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK; 2301 htolem32(&cq->page_address, address); 2302 htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE | 2303 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL | 2304 MPII_SGE_FL_SIZE_64 | (page_length * 4) | 2305 (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT)); 2306 2307 /* bounce the page via the request space to avoid more bus_dma games */ 2308 mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva + 2309 sizeof(struct mpii_msg_config_request)); 2310 2311 kva = ccb->ccb_cmd; 2312 kva += sizeof(struct mpii_msg_config_request); 2313 2314 if (!read) 2315 memcpy(kva, page, len); 2316 2317 ccb->ccb_done = mpii_empty_done; 2318 if (ISSET(flags, MPII_PG_POLL)) { 2319 if (mpii_poll(sc, ccb) != 0) { 2320 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n", 2321 DEVNAME(sc)); 2322 return (1); 2323 } 2324 } else 2325 mpii_wait(sc, ccb); 2326 2327 if (ccb->ccb_rcb == NULL) { 2328 scsi_io_put(&sc->sc_iopool, ccb); 2329 return (1); 2330 } 2331 cp = ccb->ccb_rcb->rcb_reply; 2332 2333 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x msg_length: %d " 2334 "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, 2335 cp->function); 2336 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2337 "msg_flags: 0x%02x\n", DEVNAME(sc), 2338 letoh16(cp->ext_page_length), cp->ext_page_type, 2339 cp->msg_flags); 2340 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc), 2341 cp->vp_id, cp->vf_id); 2342 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2343 letoh16(cp->ioc_status)); 2344 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2345 letoh32(cp->ioc_loginfo)); 2346 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2347 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2348 cp->config_header.page_version, 2349 cp->config_header.page_length, 2350 cp->config_header.page_number, 2351 cp->config_header.page_type); 2352 2353 if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS) 2354 rv = 1; 2355 else if (read) 2356 memcpy(page, kva, len); 2357 2358 mpii_push_reply(sc, ccb->ccb_rcb); 2359 scsi_io_put(&sc->sc_iopool, ccb); 2360 2361 return (rv); 2362 } 2363 2364 struct mpii_rcb * 2365 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp) 2366 { 2367 struct mpii_rcb *rcb = NULL; 2368 u_int32_t rfid; 2369 2370 DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc)); 2371 2372 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) == 2373 MPII_REPLY_DESCR_ADDRESS_REPLY) { 2374 rfid = (lemtoh32(&rdp->frame_addr) - 2375 (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) / 2376 sc->sc_reply_size; 2377 2378 bus_dmamap_sync(sc->sc_dmat, 2379 MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid, 2380 sc->sc_reply_size, BUS_DMASYNC_POSTREAD); 2381 2382 rcb = &sc->sc_rcbs[rfid]; 2383 } 2384 2385 memset(rdp, 0xff, sizeof(*rdp)); 2386 2387 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq), 2388 8 * sc->sc_reply_post_host_index, 8, 2389 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2390 2391 return (rcb); 2392 } 2393 2394 struct mpii_dmamem * 2395 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size) 2396 { 2397 struct mpii_dmamem *mdm; 2398 int nsegs; 2399 2400 mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO); 2401 if (mdm == NULL) 2402 return (NULL); 2403 2404 mdm->mdm_size = size; 2405 2406 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 2407 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0) 2408 goto mdmfree; 2409 2410 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg, 2411 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0) 2412 goto destroy; 2413 2414 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size, 2415 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0) 2416 goto free; 2417 2418 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size, 2419 NULL, BUS_DMA_NOWAIT) != 0) 2420 goto unmap; 2421 2422 return (mdm); 2423 2424 unmap: 2425 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size); 2426 free: 2427 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 2428 destroy: 2429 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 2430 mdmfree: 2431 free(mdm, M_DEVBUF, sizeof *mdm); 2432 2433 return (NULL); 2434 } 2435 2436 void 2437 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm) 2438 { 2439 DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm); 2440 2441 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map); 2442 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size); 2443 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 2444 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 2445 free(mdm, M_DEVBUF, sizeof *mdm); 2446 } 2447 2448 int 2449 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev) 2450 { 2451 int slot; /* initial hint */ 2452 2453 if (dev == NULL || dev->slot < 0) 2454 return (1); 2455 slot = dev->slot; 2456 2457 while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL) 2458 slot++; 2459 2460 if (slot >= sc->sc_max_devices) 2461 return (1); 2462 2463 dev->slot = slot; 2464 sc->sc_devs[slot] = dev; 2465 2466 return (0); 2467 } 2468 2469 int 2470 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev) 2471 { 2472 int i; 2473 2474 if (dev == NULL) 2475 return (1); 2476 2477 for (i = 0; i < sc->sc_max_devices; i++) { 2478 if (sc->sc_devs[i] == NULL) 2479 continue; 2480 2481 if (sc->sc_devs[i]->dev_handle == dev->dev_handle) { 2482 sc->sc_devs[i] = NULL; 2483 return (0); 2484 } 2485 } 2486 2487 return (1); 2488 } 2489 2490 struct mpii_device * 2491 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle) 2492 { 2493 int i; 2494 2495 for (i = 0; i < sc->sc_max_devices; i++) { 2496 if (sc->sc_devs[i] == NULL) 2497 continue; 2498 2499 if (sc->sc_devs[i]->dev_handle == handle) 2500 return (sc->sc_devs[i]); 2501 } 2502 2503 return (NULL); 2504 } 2505 2506 int 2507 mpii_alloc_ccbs(struct mpii_softc *sc) 2508 { 2509 struct mpii_ccb *ccb; 2510 u_int8_t *cmd; 2511 int i; 2512 2513 SIMPLEQ_INIT(&sc->sc_ccb_free); 2514 SIMPLEQ_INIT(&sc->sc_ccb_tmos); 2515 mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO); 2516 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 2517 scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool, 2518 mpii_scsi_cmd_tmo_handler, sc); 2519 2520 sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb), 2521 M_DEVBUF, M_NOWAIT | M_ZERO); 2522 if (sc->sc_ccbs == NULL) { 2523 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 2524 return (1); 2525 } 2526 2527 sc->sc_requests = mpii_dmamem_alloc(sc, 2528 sc->sc_request_size * sc->sc_max_cmds); 2529 if (sc->sc_requests == NULL) { 2530 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 2531 goto free_ccbs; 2532 } 2533 cmd = MPII_DMA_KVA(sc->sc_requests); 2534 2535 /* 2536 * we have sc->sc_max_cmds system request message 2537 * frames, but smid zero cannot be used. so we then 2538 * have (sc->sc_max_cmds - 1) number of ccbs 2539 */ 2540 for (i = 1; i < sc->sc_max_cmds; i++) { 2541 ccb = &sc->sc_ccbs[i - 1]; 2542 2543 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl, 2544 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 2545 &ccb->ccb_dmamap) != 0) { 2546 printf("%s: unable to create dma map\n", DEVNAME(sc)); 2547 goto free_maps; 2548 } 2549 2550 ccb->ccb_sc = sc; 2551 htolem16(&ccb->ccb_smid, i); 2552 ccb->ccb_offset = sc->sc_request_size * i; 2553 2554 ccb->ccb_cmd = &cmd[ccb->ccb_offset]; 2555 ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) + 2556 ccb->ccb_offset; 2557 2558 DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p " 2559 "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n", 2560 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc, 2561 ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd, 2562 ccb->ccb_cmd_dva); 2563 2564 mpii_put_ccb(sc, ccb); 2565 } 2566 2567 scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb); 2568 2569 return (0); 2570 2571 free_maps: 2572 while ((ccb = mpii_get_ccb(sc)) != NULL) 2573 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 2574 2575 mpii_dmamem_free(sc, sc->sc_requests); 2576 free_ccbs: 2577 free(sc->sc_ccbs, M_DEVBUF, (sc->sc_max_cmds-1) * sizeof(*ccb)); 2578 2579 return (1); 2580 } 2581 2582 void 2583 mpii_put_ccb(void *cookie, void *io) 2584 { 2585 struct mpii_softc *sc = cookie; 2586 struct mpii_ccb *ccb = io; 2587 2588 DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb); 2589 2590 ccb->ccb_state = MPII_CCB_FREE; 2591 ccb->ccb_cookie = NULL; 2592 ccb->ccb_done = NULL; 2593 ccb->ccb_rcb = NULL; 2594 memset(ccb->ccb_cmd, 0, sc->sc_request_size); 2595 2596 KERNEL_UNLOCK(); 2597 mtx_enter(&sc->sc_ccb_free_mtx); 2598 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link); 2599 mtx_leave(&sc->sc_ccb_free_mtx); 2600 KERNEL_LOCK(); 2601 } 2602 2603 void * 2604 mpii_get_ccb(void *cookie) 2605 { 2606 struct mpii_softc *sc = cookie; 2607 struct mpii_ccb *ccb; 2608 2609 KERNEL_UNLOCK(); 2610 2611 mtx_enter(&sc->sc_ccb_free_mtx); 2612 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free); 2613 if (ccb != NULL) { 2614 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link); 2615 ccb->ccb_state = MPII_CCB_READY; 2616 } 2617 mtx_leave(&sc->sc_ccb_free_mtx); 2618 2619 KERNEL_LOCK(); 2620 2621 DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb); 2622 2623 return (ccb); 2624 } 2625 2626 int 2627 mpii_alloc_replies(struct mpii_softc *sc) 2628 { 2629 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc)); 2630 2631 sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames, 2632 sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT); 2633 if (sc->sc_rcbs == NULL) 2634 return (1); 2635 2636 sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size * 2637 sc->sc_num_reply_frames); 2638 if (sc->sc_replies == NULL) { 2639 free(sc->sc_rcbs, M_DEVBUF, 2640 sc->sc_num_reply_frames * sizeof(struct mpii_rcb)); 2641 return (1); 2642 } 2643 2644 return (0); 2645 } 2646 2647 void 2648 mpii_push_replies(struct mpii_softc *sc) 2649 { 2650 struct mpii_rcb *rcb; 2651 caddr_t kva = MPII_DMA_KVA(sc->sc_replies); 2652 int i; 2653 2654 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies), 2655 0, sc->sc_reply_size * sc->sc_num_reply_frames, 2656 BUS_DMASYNC_PREREAD); 2657 2658 for (i = 0; i < sc->sc_num_reply_frames; i++) { 2659 rcb = &sc->sc_rcbs[i]; 2660 2661 rcb->rcb_reply = kva + sc->sc_reply_size * i; 2662 rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) + 2663 sc->sc_reply_size * i; 2664 mpii_push_reply(sc, rcb); 2665 } 2666 } 2667 2668 void 2669 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb) 2670 { 2671 struct mpii_request_header *rhp; 2672 struct mpii_request_descr descr; 2673 u_long *rdp = (u_long *)&descr; 2674 2675 DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc), 2676 ccb->ccb_cmd_dva); 2677 2678 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests), 2679 ccb->ccb_offset, sc->sc_request_size, 2680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2681 2682 ccb->ccb_state = MPII_CCB_QUEUED; 2683 2684 rhp = ccb->ccb_cmd; 2685 2686 memset(&descr, 0, sizeof(descr)); 2687 2688 switch (rhp->function) { 2689 case MPII_FUNCTION_SCSI_IO_REQUEST: 2690 descr.request_flags = MPII_REQ_DESCR_SCSI_IO; 2691 descr.dev_handle = htole16(ccb->ccb_dev_handle); 2692 break; 2693 case MPII_FUNCTION_SCSI_TASK_MGMT: 2694 descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY; 2695 break; 2696 default: 2697 descr.request_flags = MPII_REQ_DESCR_DEFAULT; 2698 } 2699 2700 descr.vf_id = sc->sc_vf_id; 2701 descr.smid = ccb->ccb_smid; 2702 2703 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write " 2704 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp); 2705 2706 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_HIGH (0x%08x) write " 2707 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1)); 2708 2709 #if defined(__LP64__) 2710 bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, 2711 MPII_REQ_DESCR_POST_LOW, *rdp); 2712 #else 2713 mtx_enter(&sc->sc_req_mtx); 2714 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, 2715 MPII_REQ_DESCR_POST_LOW, rdp[0]); 2716 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 2717 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE); 2718 2719 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, 2720 MPII_REQ_DESCR_POST_HIGH, rdp[1]); 2721 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 2722 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE); 2723 mtx_leave(&sc->sc_req_mtx); 2724 #endif 2725 } 2726 2727 int 2728 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb) 2729 { 2730 void (*done)(struct mpii_ccb *); 2731 void *cookie; 2732 int rv = 1; 2733 2734 DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc)); 2735 2736 done = ccb->ccb_done; 2737 cookie = ccb->ccb_cookie; 2738 2739 ccb->ccb_done = mpii_poll_done; 2740 ccb->ccb_cookie = &rv; 2741 2742 mpii_start(sc, ccb); 2743 2744 while (rv == 1) { 2745 /* avoid excessive polling */ 2746 if (mpii_reply_waiting(sc)) 2747 mpii_intr(sc); 2748 else 2749 delay(10); 2750 } 2751 2752 ccb->ccb_cookie = cookie; 2753 done(ccb); 2754 2755 return (0); 2756 } 2757 2758 void 2759 mpii_poll_done(struct mpii_ccb *ccb) 2760 { 2761 int *rv = ccb->ccb_cookie; 2762 2763 *rv = 0; 2764 } 2765 2766 int 2767 mpii_alloc_queues(struct mpii_softc *sc) 2768 { 2769 u_int32_t *rfp; 2770 int i; 2771 2772 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc)); 2773 2774 sc->sc_reply_freeq = mpii_dmamem_alloc(sc, 2775 sc->sc_reply_free_qdepth * sizeof(*rfp)); 2776 if (sc->sc_reply_freeq == NULL) 2777 return (1); 2778 rfp = MPII_DMA_KVA(sc->sc_reply_freeq); 2779 for (i = 0; i < sc->sc_num_reply_frames; i++) { 2780 rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) + 2781 sc->sc_reply_size * i; 2782 } 2783 2784 sc->sc_reply_postq = mpii_dmamem_alloc(sc, 2785 sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr)); 2786 if (sc->sc_reply_postq == NULL) 2787 goto free_reply_freeq; 2788 sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq); 2789 memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth * 2790 sizeof(struct mpii_reply_descr)); 2791 2792 return (0); 2793 2794 free_reply_freeq: 2795 mpii_dmamem_free(sc, sc->sc_reply_freeq); 2796 return (1); 2797 } 2798 2799 void 2800 mpii_init_queues(struct mpii_softc *sc) 2801 { 2802 DNPRINTF(MPII_D_MISC, "%s: mpii_init_queues\n", DEVNAME(sc)); 2803 2804 sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1; 2805 sc->sc_reply_post_host_index = 0; 2806 mpii_write_reply_free(sc, sc->sc_reply_free_host_index); 2807 mpii_write_reply_post(sc, sc->sc_reply_post_host_index); 2808 } 2809 2810 void 2811 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb) 2812 { 2813 struct mutex mtx = MUTEX_INITIALIZER(IPL_BIO); 2814 void (*done)(struct mpii_ccb *); 2815 void *cookie; 2816 2817 done = ccb->ccb_done; 2818 cookie = ccb->ccb_cookie; 2819 2820 ccb->ccb_done = mpii_wait_done; 2821 ccb->ccb_cookie = &mtx; 2822 2823 /* XXX this will wait forever for the ccb to complete */ 2824 2825 mpii_start(sc, ccb); 2826 2827 mtx_enter(&mtx); 2828 while (ccb->ccb_cookie != NULL) 2829 msleep(ccb, &mtx, PRIBIO, "mpiiwait", 0); 2830 mtx_leave(&mtx); 2831 2832 ccb->ccb_cookie = cookie; 2833 done(ccb); 2834 } 2835 2836 void 2837 mpii_wait_done(struct mpii_ccb *ccb) 2838 { 2839 struct mutex *mtx = ccb->ccb_cookie; 2840 2841 mtx_enter(mtx); 2842 ccb->ccb_cookie = NULL; 2843 mtx_leave(mtx); 2844 2845 wakeup_one(ccb); 2846 } 2847 2848 void 2849 mpii_scsi_cmd(struct scsi_xfer *xs) 2850 { 2851 struct scsi_link *link = xs->sc_link; 2852 struct mpii_softc *sc = link->adapter_softc; 2853 struct mpii_ccb *ccb = xs->io; 2854 struct mpii_msg_scsi_io *io; 2855 struct mpii_device *dev; 2856 int ret; 2857 2858 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc)); 2859 2860 if (xs->cmdlen > MPII_CDB_LEN) { 2861 DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n", 2862 DEVNAME(sc), xs->cmdlen); 2863 memset(&xs->sense, 0, sizeof(xs->sense)); 2864 xs->sense.error_code = SSD_ERRCODE_VALID | 0x70; 2865 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 2866 xs->sense.add_sense_code = 0x20; 2867 xs->error = XS_SENSE; 2868 scsi_done(xs); 2869 return; 2870 } 2871 2872 if ((dev = sc->sc_devs[link->target]) == NULL) { 2873 /* device no longer exists */ 2874 xs->error = XS_SELTIMEOUT; 2875 scsi_done(xs); 2876 return; 2877 } 2878 2879 KERNEL_UNLOCK(); 2880 2881 DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n", 2882 DEVNAME(sc), ccb->ccb_smid, xs->flags); 2883 2884 ccb->ccb_cookie = xs; 2885 ccb->ccb_done = mpii_scsi_cmd_done; 2886 ccb->ccb_dev_handle = dev->dev_handle; 2887 2888 io = ccb->ccb_cmd; 2889 memset(io, 0, sizeof(*io)); 2890 io->function = MPII_FUNCTION_SCSI_IO_REQUEST; 2891 io->sense_buffer_length = sizeof(xs->sense); 2892 io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4; 2893 htolem16(&io->io_flags, xs->cmdlen); 2894 htolem16(&io->dev_handle, ccb->ccb_dev_handle); 2895 htobem16(&io->lun[0], link->lun); 2896 2897 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 2898 case SCSI_DATA_IN: 2899 io->direction = MPII_SCSIIO_DIR_READ; 2900 break; 2901 case SCSI_DATA_OUT: 2902 io->direction = MPII_SCSIIO_DIR_WRITE; 2903 break; 2904 default: 2905 io->direction = MPII_SCSIIO_DIR_NONE; 2906 break; 2907 } 2908 2909 io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q; 2910 2911 memcpy(io->cdb, xs->cmd, xs->cmdlen); 2912 2913 htolem32(&io->data_length, xs->datalen); 2914 2915 /* sense data is at the end of a request */ 2916 htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva + 2917 sc->sc_request_size - sizeof(struct scsi_sense_data)); 2918 2919 if (ISSET(sc->sc_flags, MPII_F_SAS3)) 2920 ret = mpii_load_xs_sas3(ccb); 2921 else 2922 ret = mpii_load_xs(ccb); 2923 2924 if (ret != 0) { 2925 xs->error = XS_DRIVER_STUFFUP; 2926 goto done; 2927 } 2928 2929 timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb); 2930 if (xs->flags & SCSI_POLL) { 2931 if (mpii_poll(sc, ccb) != 0) { 2932 xs->error = XS_DRIVER_STUFFUP; 2933 goto done; 2934 } 2935 } else { 2936 timeout_add_msec(&xs->stimeout, xs->timeout); 2937 mpii_start(sc, ccb); 2938 } 2939 2940 KERNEL_LOCK(); 2941 return; 2942 2943 done: 2944 KERNEL_LOCK(); 2945 scsi_done(xs); 2946 } 2947 2948 void 2949 mpii_scsi_cmd_tmo(void *xccb) 2950 { 2951 struct mpii_ccb *ccb = xccb; 2952 struct mpii_softc *sc = ccb->ccb_sc; 2953 2954 printf("%s: mpii_scsi_cmd_tmo\n", DEVNAME(sc)); 2955 2956 mtx_enter(&sc->sc_ccb_mtx); 2957 if (ccb->ccb_state == MPII_CCB_QUEUED) { 2958 ccb->ccb_state = MPII_CCB_TIMEOUT; 2959 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link); 2960 } 2961 mtx_leave(&sc->sc_ccb_mtx); 2962 2963 scsi_ioh_add(&sc->sc_ccb_tmo_handler); 2964 } 2965 2966 void 2967 mpii_scsi_cmd_tmo_handler(void *cookie, void *io) 2968 { 2969 struct mpii_softc *sc = cookie; 2970 struct mpii_ccb *tccb = io; 2971 struct mpii_ccb *ccb; 2972 struct mpii_msg_scsi_task_request *stq; 2973 2974 mtx_enter(&sc->sc_ccb_mtx); 2975 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos); 2976 if (ccb != NULL) { 2977 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link); 2978 ccb->ccb_state = MPII_CCB_QUEUED; 2979 } 2980 /* should remove any other ccbs for the same dev handle */ 2981 mtx_leave(&sc->sc_ccb_mtx); 2982 2983 if (ccb == NULL) { 2984 scsi_io_put(&sc->sc_iopool, tccb); 2985 return; 2986 } 2987 2988 stq = tccb->ccb_cmd; 2989 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT; 2990 stq->task_type = MPII_SCSI_TASK_TARGET_RESET; 2991 htolem16(&stq->dev_handle, ccb->ccb_dev_handle); 2992 2993 tccb->ccb_done = mpii_scsi_cmd_tmo_done; 2994 mpii_start(sc, tccb); 2995 } 2996 2997 void 2998 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb) 2999 { 3000 mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb); 3001 } 3002 3003 void 3004 mpii_scsi_cmd_done(struct mpii_ccb *ccb) 3005 { 3006 struct mpii_ccb *tccb; 3007 struct mpii_msg_scsi_io_error *sie; 3008 struct mpii_softc *sc = ccb->ccb_sc; 3009 struct scsi_xfer *xs = ccb->ccb_cookie; 3010 struct scsi_sense_data *sense; 3011 bus_dmamap_t dmap = ccb->ccb_dmamap; 3012 3013 timeout_del(&xs->stimeout); 3014 mtx_enter(&sc->sc_ccb_mtx); 3015 if (ccb->ccb_state == MPII_CCB_TIMEOUT) { 3016 /* ENOSIMPLEQ_REMOVE :( */ 3017 if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos)) 3018 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link); 3019 else { 3020 SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) { 3021 if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) { 3022 SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos, 3023 tccb, ccb_link); 3024 break; 3025 } 3026 } 3027 } 3028 } 3029 3030 ccb->ccb_state = MPII_CCB_READY; 3031 mtx_leave(&sc->sc_ccb_mtx); 3032 3033 if (xs->datalen != 0) { 3034 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 3035 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 3036 BUS_DMASYNC_POSTWRITE); 3037 3038 bus_dmamap_unload(sc->sc_dmat, dmap); 3039 } 3040 3041 xs->error = XS_NOERROR; 3042 xs->resid = 0; 3043 3044 if (ccb->ccb_rcb == NULL) { 3045 /* no scsi error, we're ok so drop out early */ 3046 xs->status = SCSI_OK; 3047 goto done; 3048 } 3049 3050 sie = ccb->ccb_rcb->rcb_reply; 3051 3052 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d " 3053 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen, 3054 xs->flags); 3055 DNPRINTF(MPII_D_CMD, "%s: dev_handle: %d msg_length: %d " 3056 "function: 0x%02x\n", DEVNAME(sc), letoh16(sie->dev_handle), 3057 sie->msg_length, sie->function); 3058 DNPRINTF(MPII_D_CMD, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc), 3059 sie->vp_id, sie->vf_id); 3060 DNPRINTF(MPII_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x " 3061 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status, 3062 sie->scsi_state, letoh16(sie->ioc_status)); 3063 DNPRINTF(MPII_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 3064 letoh32(sie->ioc_loginfo)); 3065 DNPRINTF(MPII_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc), 3066 letoh32(sie->transfer_count)); 3067 DNPRINTF(MPII_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc), 3068 letoh32(sie->sense_count)); 3069 DNPRINTF(MPII_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc), 3070 letoh32(sie->response_info)); 3071 DNPRINTF(MPII_D_CMD, "%s: task_tag: 0x%04x\n", DEVNAME(sc), 3072 letoh16(sie->task_tag)); 3073 DNPRINTF(MPII_D_CMD, "%s: bidirectional_transfer_count: 0x%08x\n", 3074 DEVNAME(sc), letoh32(sie->bidirectional_transfer_count)); 3075 3076 if (sie->scsi_state & MPII_SCSIIO_STATE_NO_SCSI_STATUS) 3077 xs->status = SCSI_TERMINATED; 3078 else 3079 xs->status = sie->scsi_status; 3080 xs->resid = 0; 3081 3082 switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) { 3083 case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN: 3084 xs->resid = xs->datalen - lemtoh32(&sie->transfer_count); 3085 /* FALLTHROUGH */ 3086 3087 case MPII_IOCSTATUS_SUCCESS: 3088 case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR: 3089 switch (xs->status) { 3090 case SCSI_OK: 3091 xs->error = XS_NOERROR; 3092 break; 3093 3094 case SCSI_CHECK: 3095 xs->error = XS_SENSE; 3096 break; 3097 3098 case SCSI_BUSY: 3099 case SCSI_QUEUE_FULL: 3100 xs->error = XS_BUSY; 3101 break; 3102 3103 default: 3104 xs->error = XS_DRIVER_STUFFUP; 3105 } 3106 break; 3107 3108 case MPII_IOCSTATUS_BUSY: 3109 case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES: 3110 xs->error = XS_BUSY; 3111 break; 3112 3113 case MPII_IOCSTATUS_SCSI_IOC_TERMINATED: 3114 case MPII_IOCSTATUS_SCSI_TASK_TERMINATED: 3115 xs->error = XS_RESET; 3116 break; 3117 3118 case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 3119 case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3120 xs->error = XS_SELTIMEOUT; 3121 break; 3122 3123 default: 3124 xs->error = XS_DRIVER_STUFFUP; 3125 break; 3126 } 3127 3128 sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd + 3129 sc->sc_request_size - sizeof(*sense)); 3130 if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID) 3131 memcpy(&xs->sense, sense, sizeof(xs->sense)); 3132 3133 DNPRINTF(MPII_D_CMD, "%s: xs err: %d status: %#x\n", DEVNAME(sc), 3134 xs->error, xs->status); 3135 3136 mpii_push_reply(sc, ccb->ccb_rcb); 3137 done: 3138 KERNEL_LOCK(); 3139 scsi_done(xs); 3140 KERNEL_UNLOCK(); 3141 } 3142 3143 int 3144 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag) 3145 { 3146 struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc; 3147 struct mpii_device *dev = sc->sc_devs[link->target]; 3148 3149 DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc)); 3150 3151 switch (cmd) { 3152 case DIOCGCACHE: 3153 case DIOCSCACHE: 3154 if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) { 3155 return (mpii_ioctl_cache(link, cmd, 3156 (struct dk_cache *)addr)); 3157 } 3158 break; 3159 3160 default: 3161 if (sc->sc_ioctl) 3162 return (sc->sc_ioctl(link->adapter_softc, cmd, addr)); 3163 3164 break; 3165 } 3166 3167 return (ENOTTY); 3168 } 3169 3170 int 3171 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc) 3172 { 3173 struct mpii_softc *sc = (struct mpii_softc *)link->adapter_softc; 3174 struct mpii_device *dev = sc->sc_devs[link->target]; 3175 struct mpii_cfg_raid_vol_pg0 *vpg; 3176 struct mpii_msg_raid_action_request *req; 3177 struct mpii_msg_raid_action_reply *rep; 3178 struct mpii_cfg_hdr hdr; 3179 struct mpii_ccb *ccb; 3180 u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle; 3181 size_t pagelen; 3182 int rv = 0; 3183 int enabled; 3184 3185 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3186 addr, MPII_PG_POLL, &hdr) != 0) 3187 return (EINVAL); 3188 3189 pagelen = hdr.page_length * 4; 3190 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3191 if (vpg == NULL) 3192 return (ENOMEM); 3193 3194 if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1, 3195 vpg, pagelen) != 0) { 3196 rv = EINVAL; 3197 goto done; 3198 } 3199 3200 enabled = ((lemtoh16(&vpg->volume_settings) & 3201 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) == 3202 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0; 3203 3204 if (cmd == DIOCGCACHE) { 3205 dc->wrcache = enabled; 3206 dc->rdcache = 0; 3207 goto done; 3208 } /* else DIOCSCACHE */ 3209 3210 if (dc->rdcache) { 3211 rv = EOPNOTSUPP; 3212 goto done; 3213 } 3214 3215 if (((dc->wrcache) ? 1 : 0) == enabled) 3216 goto done; 3217 3218 ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL); 3219 if (ccb == NULL) { 3220 rv = ENOMEM; 3221 goto done; 3222 } 3223 3224 ccb->ccb_done = mpii_empty_done; 3225 3226 req = ccb->ccb_cmd; 3227 memset(req, 0, sizeof(*req)); 3228 req->function = MPII_FUNCTION_RAID_ACTION; 3229 req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE; 3230 htolem16(&req->vol_dev_handle, dev->dev_handle); 3231 htolem32(&req->action_data, dc->wrcache ? 3232 MPII_RAID_VOL_WRITE_CACHE_ENABLE : 3233 MPII_RAID_VOL_WRITE_CACHE_DISABLE); 3234 3235 if (mpii_poll(sc, ccb) != 0) { 3236 rv = EIO; 3237 goto done; 3238 } 3239 3240 if (ccb->ccb_rcb != NULL) { 3241 rep = ccb->ccb_rcb->rcb_reply; 3242 if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) || 3243 ((rep->action_data[0] & 3244 MPII_RAID_VOL_WRITE_CACHE_MASK) != 3245 (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE : 3246 MPII_RAID_VOL_WRITE_CACHE_DISABLE))) 3247 rv = EINVAL; 3248 mpii_push_reply(sc, ccb->ccb_rcb); 3249 } 3250 3251 scsi_io_put(&sc->sc_iopool, ccb); 3252 3253 done: 3254 free(vpg, M_TEMP, pagelen); 3255 return (rv); 3256 } 3257 3258 #if NBIO > 0 3259 int 3260 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr) 3261 { 3262 struct mpii_softc *sc = (struct mpii_softc *)dev; 3263 int error = 0; 3264 3265 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc)); 3266 3267 switch (cmd) { 3268 case BIOCINQ: 3269 DNPRINTF(MPII_D_IOCTL, "inq\n"); 3270 error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr); 3271 break; 3272 case BIOCVOL: 3273 DNPRINTF(MPII_D_IOCTL, "vol\n"); 3274 error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr); 3275 break; 3276 case BIOCDISK: 3277 DNPRINTF(MPII_D_IOCTL, "disk\n"); 3278 error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr); 3279 break; 3280 default: 3281 DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n"); 3282 error = ENOTTY; 3283 } 3284 3285 return (error); 3286 } 3287 3288 int 3289 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi) 3290 { 3291 int i; 3292 3293 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc)); 3294 3295 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 3296 for (i = 0; i < sc->sc_max_devices; i++) 3297 if (sc->sc_devs[i] && 3298 ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME)) 3299 bi->bi_novol++; 3300 return (0); 3301 } 3302 3303 int 3304 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv) 3305 { 3306 struct mpii_cfg_raid_vol_pg0 *vpg; 3307 struct mpii_cfg_hdr hdr; 3308 struct mpii_device *dev; 3309 struct scsi_link *lnk; 3310 struct device *scdev; 3311 size_t pagelen; 3312 u_int16_t volh; 3313 int rv, hcnt = 0; 3314 3315 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n", 3316 DEVNAME(sc), bv->bv_volid); 3317 3318 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) 3319 return (ENODEV); 3320 volh = dev->dev_handle; 3321 3322 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3323 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) { 3324 printf("%s: unable to fetch header for raid volume page 0\n", 3325 DEVNAME(sc)); 3326 return (EINVAL); 3327 } 3328 3329 pagelen = hdr.page_length * 4; 3330 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3331 if (vpg == NULL) { 3332 printf("%s: unable to allocate space for raid " 3333 "volume page 0\n", DEVNAME(sc)); 3334 return (ENOMEM); 3335 } 3336 3337 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, 3338 &hdr, 1, vpg, pagelen) != 0) { 3339 printf("%s: unable to fetch raid volume page 0\n", 3340 DEVNAME(sc)); 3341 free(vpg, M_TEMP, pagelen); 3342 return (EINVAL); 3343 } 3344 3345 switch (vpg->volume_state) { 3346 case MPII_CFG_RAID_VOL_0_STATE_ONLINE: 3347 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL: 3348 bv->bv_status = BIOC_SVONLINE; 3349 break; 3350 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED: 3351 if (ISSET(lemtoh32(&vpg->volume_status), 3352 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) { 3353 bv->bv_status = BIOC_SVREBUILD; 3354 bv->bv_percent = dev->percent; 3355 } else 3356 bv->bv_status = BIOC_SVDEGRADED; 3357 break; 3358 case MPII_CFG_RAID_VOL_0_STATE_FAILED: 3359 bv->bv_status = BIOC_SVOFFLINE; 3360 break; 3361 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING: 3362 bv->bv_status = BIOC_SVBUILDING; 3363 break; 3364 case MPII_CFG_RAID_VOL_0_STATE_MISSING: 3365 default: 3366 bv->bv_status = BIOC_SVINVALID; 3367 break; 3368 } 3369 3370 switch (vpg->volume_type) { 3371 case MPII_CFG_RAID_VOL_0_TYPE_RAID0: 3372 bv->bv_level = 0; 3373 break; 3374 case MPII_CFG_RAID_VOL_0_TYPE_RAID1: 3375 bv->bv_level = 1; 3376 break; 3377 case MPII_CFG_RAID_VOL_0_TYPE_RAID1E: 3378 case MPII_CFG_RAID_VOL_0_TYPE_RAID10: 3379 bv->bv_level = 10; 3380 break; 3381 default: 3382 bv->bv_level = -1; 3383 } 3384 3385 if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) { 3386 free(vpg, M_TEMP, pagelen); 3387 return (rv); 3388 } 3389 3390 bv->bv_nodisk = vpg->num_phys_disks + hcnt; 3391 3392 bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size); 3393 3394 lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0); 3395 if (lnk != NULL) { 3396 scdev = lnk->device_softc; 3397 strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev)); 3398 } 3399 3400 free(vpg, M_TEMP, pagelen); 3401 return (0); 3402 } 3403 3404 int 3405 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd) 3406 { 3407 struct mpii_cfg_raid_vol_pg0 *vpg; 3408 struct mpii_cfg_raid_vol_pg0_physdisk *pd; 3409 struct mpii_cfg_hdr hdr; 3410 struct mpii_device *dev; 3411 size_t pagelen; 3412 u_int16_t volh; 3413 u_int8_t dn; 3414 3415 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n", 3416 DEVNAME(sc), bd->bd_volid, bd->bd_diskid); 3417 3418 if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL) 3419 return (ENODEV); 3420 volh = dev->dev_handle; 3421 3422 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3423 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) { 3424 printf("%s: unable to fetch header for raid volume page 0\n", 3425 DEVNAME(sc)); 3426 return (EINVAL); 3427 } 3428 3429 pagelen = hdr.page_length * 4; 3430 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3431 if (vpg == NULL) { 3432 printf("%s: unable to allocate space for raid " 3433 "volume page 0\n", DEVNAME(sc)); 3434 return (ENOMEM); 3435 } 3436 3437 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, 3438 &hdr, 1, vpg, pagelen) != 0) { 3439 printf("%s: unable to fetch raid volume page 0\n", 3440 DEVNAME(sc)); 3441 free(vpg, M_TEMP, pagelen); 3442 return (EINVAL); 3443 } 3444 3445 if (bd->bd_diskid >= vpg->num_phys_disks) { 3446 int nvdsk = vpg->num_phys_disks; 3447 int hsmap = vpg->hot_spare_pool; 3448 3449 free(vpg, M_TEMP, pagelen); 3450 return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL)); 3451 } 3452 3453 pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) + 3454 bd->bd_diskid; 3455 dn = pd->phys_disk_num; 3456 3457 free(vpg, M_TEMP, pagelen); 3458 return (mpii_bio_disk(sc, bd, dn)); 3459 } 3460 3461 int 3462 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk, 3463 int hsmap, int *hscnt) 3464 { 3465 struct mpii_cfg_raid_config_pg0 *cpg; 3466 struct mpii_raid_config_element *el; 3467 struct mpii_ecfg_hdr ehdr; 3468 size_t pagelen; 3469 int i, nhs = 0; 3470 3471 if (bd) 3472 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc), 3473 bd->bd_diskid - nvdsk); 3474 else 3475 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc)); 3476 3477 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG, 3478 0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED, 3479 &ehdr) != 0) { 3480 printf("%s: unable to fetch header for raid config page 0\n", 3481 DEVNAME(sc)); 3482 return (EINVAL); 3483 } 3484 3485 pagelen = lemtoh16(&ehdr.ext_page_length) * 4; 3486 cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3487 if (cpg == NULL) { 3488 printf("%s: unable to allocate space for raid config page 0\n", 3489 DEVNAME(sc)); 3490 return (ENOMEM); 3491 } 3492 3493 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, 3494 MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) { 3495 printf("%s: unable to fetch raid config page 0\n", 3496 DEVNAME(sc)); 3497 free(cpg, M_TEMP, pagelen); 3498 return (EINVAL); 3499 } 3500 3501 el = (struct mpii_raid_config_element *)(cpg + 1); 3502 for (i = 0; i < cpg->num_elements; i++, el++) { 3503 if (ISSET(lemtoh16(&el->element_flags), 3504 MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) && 3505 el->hot_spare_pool == hsmap) { 3506 /* 3507 * diskid comparison is based on the idea that all 3508 * disks are counted by the bio(4) in sequence, thus 3509 * substracting the number of disks in the volume 3510 * from the diskid yields us a "relative" hotspare 3511 * number, which is good enough for us. 3512 */ 3513 if (bd != NULL && bd->bd_diskid == nhs + nvdsk) { 3514 u_int8_t dn = el->phys_disk_num; 3515 3516 free(cpg, M_TEMP, pagelen); 3517 return (mpii_bio_disk(sc, bd, dn)); 3518 } 3519 nhs++; 3520 } 3521 } 3522 3523 if (hscnt) 3524 *hscnt = nhs; 3525 3526 free(cpg, M_TEMP, pagelen); 3527 return (0); 3528 } 3529 3530 int 3531 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn) 3532 { 3533 struct mpii_cfg_raid_physdisk_pg0 *ppg; 3534 struct mpii_cfg_hdr hdr; 3535 struct mpii_device *dev; 3536 int len; 3537 3538 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc), 3539 bd->bd_diskid); 3540 3541 ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO); 3542 if (ppg == NULL) { 3543 printf("%s: unable to allocate space for raid physical disk " 3544 "page 0\n", DEVNAME(sc)); 3545 return (ENOMEM); 3546 } 3547 3548 hdr.page_version = 0; 3549 hdr.page_length = sizeof(*ppg) / 4; 3550 hdr.page_number = 0; 3551 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD; 3552 3553 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0, 3554 &hdr, 1, ppg, sizeof(*ppg)) != 0) { 3555 printf("%s: unable to fetch raid drive page 0\n", 3556 DEVNAME(sc)); 3557 free(ppg, M_TEMP, sizeof(*ppg)); 3558 return (EINVAL); 3559 } 3560 3561 bd->bd_target = ppg->phys_disk_num; 3562 3563 if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) { 3564 bd->bd_status = BIOC_SDINVALID; 3565 free(ppg, M_TEMP, sizeof(*ppg)); 3566 return (0); 3567 } 3568 3569 switch (ppg->phys_disk_state) { 3570 case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE: 3571 case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL: 3572 bd->bd_status = BIOC_SDONLINE; 3573 break; 3574 case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE: 3575 if (ppg->offline_reason == 3576 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED || 3577 ppg->offline_reason == 3578 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ) 3579 bd->bd_status = BIOC_SDFAILED; 3580 else 3581 bd->bd_status = BIOC_SDOFFLINE; 3582 break; 3583 case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED: 3584 bd->bd_status = BIOC_SDFAILED; 3585 break; 3586 case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING: 3587 bd->bd_status = BIOC_SDREBUILD; 3588 break; 3589 case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE: 3590 bd->bd_status = BIOC_SDHOTSPARE; 3591 break; 3592 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED: 3593 bd->bd_status = BIOC_SDUNUSED; 3594 break; 3595 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE: 3596 default: 3597 bd->bd_status = BIOC_SDINVALID; 3598 break; 3599 } 3600 3601 bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size); 3602 3603 scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id)); 3604 len = strlen(bd->bd_vendor); 3605 bd->bd_vendor[len] = ' '; 3606 scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id, 3607 sizeof(ppg->product_id)); 3608 scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial)); 3609 3610 free(ppg, M_TEMP, sizeof(*ppg)); 3611 return (0); 3612 } 3613 3614 struct mpii_device * 3615 mpii_find_vol(struct mpii_softc *sc, int volid) 3616 { 3617 struct mpii_device *dev = NULL; 3618 3619 if (sc->sc_vd_id_low + volid >= sc->sc_max_devices) 3620 return (NULL); 3621 dev = sc->sc_devs[sc->sc_vd_id_low + volid]; 3622 if (dev && ISSET(dev->flags, MPII_DF_VOLUME)) 3623 return (dev); 3624 return (NULL); 3625 } 3626 3627 #ifndef SMALL_KERNEL 3628 /* 3629 * Non-sleeping lightweight version of the mpii_ioctl_vol 3630 */ 3631 int 3632 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv) 3633 { 3634 struct mpii_cfg_raid_vol_pg0 *vpg; 3635 struct mpii_cfg_hdr hdr; 3636 struct mpii_device *dev = NULL; 3637 size_t pagelen; 3638 u_int16_t volh; 3639 3640 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL) 3641 return (ENODEV); 3642 volh = dev->dev_handle; 3643 3644 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0, 3645 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) { 3646 DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid " 3647 "volume page 0\n", DEVNAME(sc)); 3648 return (EINVAL); 3649 } 3650 3651 pagelen = hdr.page_length * 4; 3652 vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO); 3653 if (vpg == NULL) { 3654 DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid " 3655 "volume page 0\n", DEVNAME(sc)); 3656 return (ENOMEM); 3657 } 3658 3659 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 3660 MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) { 3661 DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume " 3662 "page 0\n", DEVNAME(sc)); 3663 free(vpg, M_TEMP, pagelen); 3664 return (EINVAL); 3665 } 3666 3667 switch (vpg->volume_state) { 3668 case MPII_CFG_RAID_VOL_0_STATE_ONLINE: 3669 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL: 3670 bv->bv_status = BIOC_SVONLINE; 3671 break; 3672 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED: 3673 if (ISSET(lemtoh32(&vpg->volume_status), 3674 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) 3675 bv->bv_status = BIOC_SVREBUILD; 3676 else 3677 bv->bv_status = BIOC_SVDEGRADED; 3678 break; 3679 case MPII_CFG_RAID_VOL_0_STATE_FAILED: 3680 bv->bv_status = BIOC_SVOFFLINE; 3681 break; 3682 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING: 3683 bv->bv_status = BIOC_SVBUILDING; 3684 break; 3685 case MPII_CFG_RAID_VOL_0_STATE_MISSING: 3686 default: 3687 bv->bv_status = BIOC_SVINVALID; 3688 break; 3689 } 3690 3691 free(vpg, M_TEMP, pagelen); 3692 return (0); 3693 } 3694 3695 int 3696 mpii_create_sensors(struct mpii_softc *sc) 3697 { 3698 struct scsibus_softc *ssc = sc->sc_scsibus; 3699 struct device *dev; 3700 struct scsi_link *link; 3701 int i; 3702 3703 sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor), 3704 M_DEVBUF, M_NOWAIT | M_ZERO); 3705 if (sc->sc_sensors == NULL) 3706 return (1); 3707 sc->sc_nsensors = sc->sc_vd_count; 3708 3709 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), 3710 sizeof(sc->sc_sensordev.xname)); 3711 3712 for (i = 0; i < sc->sc_vd_count; i++) { 3713 link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0); 3714 if (link == NULL) 3715 goto bad; 3716 3717 dev = link->device_softc; 3718 3719 sc->sc_sensors[i].type = SENSOR_DRIVE; 3720 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN; 3721 3722 strlcpy(sc->sc_sensors[i].desc, dev->dv_xname, 3723 sizeof(sc->sc_sensors[i].desc)); 3724 3725 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]); 3726 } 3727 3728 if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL) 3729 goto bad; 3730 3731 sensordev_install(&sc->sc_sensordev); 3732 3733 return (0); 3734 3735 bad: 3736 free(sc->sc_sensors, M_DEVBUF, 0); 3737 3738 return (1); 3739 } 3740 3741 void 3742 mpii_refresh_sensors(void *arg) 3743 { 3744 struct mpii_softc *sc = arg; 3745 struct bioc_vol bv; 3746 int i; 3747 3748 for (i = 0; i < sc->sc_nsensors; i++) { 3749 memset(&bv, 0, sizeof(bv)); 3750 bv.bv_volid = i; 3751 if (mpii_bio_volstate(sc, &bv)) 3752 return; 3753 switch(bv.bv_status) { 3754 case BIOC_SVOFFLINE: 3755 sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL; 3756 sc->sc_sensors[i].status = SENSOR_S_CRIT; 3757 break; 3758 case BIOC_SVDEGRADED: 3759 sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL; 3760 sc->sc_sensors[i].status = SENSOR_S_WARN; 3761 break; 3762 case BIOC_SVREBUILD: 3763 sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD; 3764 sc->sc_sensors[i].status = SENSOR_S_WARN; 3765 break; 3766 case BIOC_SVONLINE: 3767 sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE; 3768 sc->sc_sensors[i].status = SENSOR_S_OK; 3769 break; 3770 case BIOC_SVINVALID: 3771 /* FALLTHROUGH */ 3772 default: 3773 sc->sc_sensors[i].value = 0; /* unknown */ 3774 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN; 3775 } 3776 } 3777 } 3778 #endif /* SMALL_KERNEL */ 3779 #endif /* NBIO > 0 */ 3780