1 /* $OpenBSD: mpi.c,v 1.66 2006/09/16 07:50:46 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2005, 2006 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2005 Marco Peereboom <marco@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/param.h> 21 #include <sys/systm.h> 22 #include <sys/buf.h> 23 #include <sys/device.h> 24 #include <sys/proc.h> 25 #include <sys/malloc.h> 26 #include <sys/kernel.h> 27 28 #include <machine/bus.h> 29 30 #include <scsi/scsi_all.h> 31 #include <scsi/scsiconf.h> 32 33 #include <dev/ic/mpireg.h> 34 #include <dev/ic/mpivar.h> 35 36 #ifdef MPI_DEBUG 37 uint32_t mpi_debug = 0 38 /* | MPI_D_CMD */ 39 /* | MPI_D_INTR */ 40 /* | MPI_D_MISC */ 41 /* | MPI_D_DMA */ 42 /* | MPI_D_IOCTL */ 43 /* | MPI_D_RW */ 44 /* | MPI_D_MEM */ 45 /* | MPI_D_CCB */ 46 /* | MPI_D_PPR */ 47 /* | MPI_D_RAID */ 48 ; 49 #endif 50 51 struct cfdriver mpi_cd = { 52 NULL, "mpi", DV_DULL 53 }; 54 55 int mpi_scsi_cmd(struct scsi_xfer *); 56 void mpi_scsi_cmd_done(struct mpi_ccb *); 57 void mpi_minphys(struct buf *bp); 58 int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, 59 int, struct proc *); 60 61 struct scsi_adapter mpi_switch = { 62 mpi_scsi_cmd, mpi_minphys, NULL, NULL, mpi_scsi_ioctl 63 }; 64 65 struct scsi_device mpi_dev = { 66 NULL, NULL, NULL, NULL 67 }; 68 69 struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t); 70 void mpi_dmamem_free(struct mpi_softc *, 71 struct mpi_dmamem *); 72 int mpi_alloc_ccbs(struct mpi_softc *); 73 struct mpi_ccb *mpi_get_ccb(struct mpi_softc *); 74 void mpi_put_ccb(struct mpi_softc *, struct mpi_ccb *); 75 int mpi_alloc_replies(struct mpi_softc *); 76 void mpi_push_replies(struct mpi_softc *); 77 78 void mpi_start(struct mpi_softc *, struct mpi_ccb *); 79 int mpi_complete(struct mpi_softc *, struct mpi_ccb *, int); 80 int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int); 81 82 void mpi_fc_print(struct mpi_softc *); 83 void mpi_squash_ppr(struct mpi_softc *); 84 void mpi_run_ppr(struct mpi_softc *); 85 int mpi_ppr(struct mpi_softc *, struct scsi_link *, 86 struct mpi_cfg_raid_physdisk *, int, int, int); 87 int mpi_inq(struct mpi_softc *, u_int16_t, int); 88 89 void mpi_timeout_xs(void *); 90 int mpi_load_xs(struct mpi_ccb *); 91 92 u_int32_t mpi_read(struct mpi_softc *, bus_size_t); 93 void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t); 94 int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t, 95 u_int32_t); 96 int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t, 97 u_int32_t); 98 99 int mpi_init(struct mpi_softc *); 100 int mpi_reset_soft(struct mpi_softc *); 101 int mpi_reset_hard(struct mpi_softc *); 102 103 int mpi_handshake_send(struct mpi_softc *, void *, size_t); 104 int mpi_handshake_recv_dword(struct mpi_softc *, 105 u_int32_t *); 106 int mpi_handshake_recv(struct mpi_softc *, void *, size_t); 107 108 void mpi_empty_done(struct mpi_ccb *); 109 110 int mpi_iocinit(struct mpi_softc *); 111 int mpi_iocfacts(struct mpi_softc *); 112 int mpi_portfacts(struct mpi_softc *); 113 int mpi_eventnotify(struct mpi_softc *); 114 void mpi_eventnotify_done(struct mpi_ccb *); 115 int mpi_portenable(struct mpi_softc *); 116 void mpi_get_raid(struct mpi_softc *); 117 int mpi_fwupload(struct mpi_softc *); 118 119 int mpi_cfg_header(struct mpi_softc *, u_int8_t, u_int8_t, 120 u_int32_t, struct mpi_cfg_hdr *); 121 int mpi_cfg_page(struct mpi_softc *, u_int32_t, 122 struct mpi_cfg_hdr *, int, void *, size_t); 123 124 #define DEVNAME(s) ((s)->sc_dev.dv_xname) 125 126 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t)) 127 #define sizeofa(s) (sizeof(s) / sizeof((s)[0])) 128 129 #define mpi_read_db(s) mpi_read((s), MPI_DOORBELL) 130 #define mpi_write_db(s, v) mpi_write((s), MPI_DOORBELL, (v)) 131 #define mpi_read_intr(s) mpi_read((s), MPI_INTR_STATUS) 132 #define mpi_write_intr(s, v) mpi_write((s), MPI_INTR_STATUS, (v)) 133 #define mpi_pop_reply(s) mpi_read((s), MPI_REPLY_QUEUE) 134 #define mpi_push_reply(s, v) mpi_write((s), MPI_REPLY_QUEUE, (v)) 135 136 #define mpi_wait_db_int(s) mpi_wait_ne((s), MPI_INTR_STATUS, \ 137 MPI_INTR_STATUS_DOORBELL, 0) 138 #define mpi_wait_db_ack(s) mpi_wait_eq((s), MPI_INTR_STATUS, \ 139 MPI_INTR_STATUS_IOCDOORBELL, 0) 140 141 int 142 mpi_attach(struct mpi_softc *sc) 143 { 144 struct device *dev; 145 struct mpi_ccb *ccb; 146 147 printf("\n"); 148 149 /* disable interrupts */ 150 mpi_write(sc, MPI_INTR_MASK, 151 MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL); 152 153 if (mpi_init(sc) != 0) { 154 printf("%s: unable to initialise\n", DEVNAME(sc)); 155 return (1); 156 } 157 158 if (mpi_iocfacts(sc) != 0) { 159 printf("%s: unable to get iocfacts\n", DEVNAME(sc)); 160 return (1); 161 } 162 163 if (mpi_alloc_ccbs(sc) != 0) { 164 /* error already printed */ 165 return (1); 166 } 167 168 if (mpi_alloc_replies(sc) != 0) { 169 printf("%s: unable to allocate reply space\n", DEVNAME(sc)); 170 goto free_ccbs; 171 } 172 173 if (mpi_iocinit(sc) != 0) { 174 printf("%s: unable to send iocinit\n", DEVNAME(sc)); 175 goto free_ccbs; 176 } 177 178 /* spin until we're operational */ 179 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 180 MPI_DOORBELL_STATE_OPER) != 0) { 181 printf("%s: state: 0x%08x\n", DEVNAME(sc), 182 mpi_read_db(sc) & MPI_DOORBELL_STATE); 183 printf("%s: operational state timeout\n", DEVNAME(sc)); 184 goto free_ccbs; 185 } 186 187 mpi_push_replies(sc); 188 189 if (mpi_portfacts(sc) != 0) { 190 printf("%s: unable to get portfacts\n", DEVNAME(sc)); 191 goto free_replies; 192 } 193 194 #if notyet 195 if (mpi_eventnotify(sc) != 0) { 196 printf("%s: unable to get portfacts\n", DEVNAME(sc)); 197 goto free_replies; 198 } 199 #endif 200 201 if (mpi_portenable(sc) != 0) { 202 printf("%s: unable to enable port\n", DEVNAME(sc)); 203 goto free_replies; 204 } 205 206 if (mpi_fwupload(sc) != 0) { 207 printf("%s: unable to upload firmware\n", DEVNAME(sc)); 208 goto free_replies; 209 } 210 211 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI) 212 mpi_squash_ppr(sc); 213 214 /* we should be good to go now, attach scsibus */ 215 sc->sc_link.device = &mpi_dev; 216 sc->sc_link.adapter = &mpi_switch; 217 sc->sc_link.adapter_softc = sc; 218 sc->sc_link.adapter_target = sc->sc_target; 219 sc->sc_link.adapter_buswidth = sc->sc_buswidth; 220 sc->sc_link.openings = sc->sc_maxcmds / sc->sc_buswidth; 221 222 config_found(&sc->sc_dev, &sc->sc_link, scsiprint); 223 224 /* find our scsibus */ 225 TAILQ_FOREACH(dev, &alldevs, dv_list) { 226 if (dev->dv_parent == &sc->sc_dev) 227 break; 228 } 229 sc->sc_scsibus = (struct scsibus_softc *)dev; 230 231 /* get raid pages */ 232 mpi_get_raid(sc); 233 234 /* do domain validation */ 235 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI) 236 mpi_run_ppr(sc); 237 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC) 238 mpi_fc_print(sc); 239 240 /* enable interrupts */ 241 mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL); 242 243 return (0); 244 245 free_replies: 246 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 247 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD); 248 mpi_dmamem_free(sc, sc->sc_replies); 249 free_ccbs: 250 while ((ccb = mpi_get_ccb(sc)) != NULL) 251 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 252 mpi_dmamem_free(sc, sc->sc_requests); 253 free(sc->sc_ccbs, M_DEVBUF); 254 255 return(1); 256 } 257 258 void 259 mpi_fc_print(struct mpi_softc *sc) 260 { 261 struct mpi_cfg_hdr hdr; 262 struct mpi_cfg_fc_port_pg0 pg; 263 struct mpi_cfg_fc_device_pg0 dpg; 264 struct device *dev; 265 struct scsibus_softc *ssc; 266 struct scsi_link *link; 267 int i; 268 u_int32_t btid; 269 270 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0, 271 &hdr) != 0) { 272 DNPRINTF(MPI_D_MISC, "%s: mpi_fc_print unable to fetch " 273 "FC port header 0\n", DEVNAME(sc)); 274 return; 275 } 276 277 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) { 278 DNPRINTF(MPI_D_MISC, "%s: mpi_fc_print unable to fetch " 279 "FC port page 0\n", 280 DEVNAME(sc)); 281 return; 282 } 283 284 DNPRINTF(MPI_D_MISC, "%s: at: %dGHz WWNN: %016llx WWPN: %016llx\n", 285 DEVNAME(sc), letoh32(pg.current_speed), letoh64(pg.wwnn), 286 letoh64(pg.wwpn)); 287 288 TAILQ_FOREACH(dev, &alldevs, dv_list) { 289 if (dev->dv_parent == &sc->sc_dev) 290 break; 291 } 292 293 /* im too nice to punish idiots who don't configure scsibus */ 294 if (dev == NULL) 295 return; 296 297 ssc = (struct scsibus_softc *)dev; 298 for (i = 0; i < sc->sc_link.adapter_buswidth; i++) { 299 300 link = ssc->sc_link[i][0]; 301 302 if (link == NULL) 303 continue; 304 305 btid = i | MPI_PAGE_ADDRESS_FC_BTID; 306 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0, 307 btid, &hdr) != 0) { 308 DNPRINTF(MPI_D_MISC, "%s: mpi_fc_print unable to fetch " 309 "device header 0\n", DEVNAME(sc)); 310 return; 311 } 312 313 bzero(&dpg, sizeof(dpg)); 314 if (mpi_cfg_page(sc, btid, &hdr, 1, &dpg, sizeof(dpg)) != 0) { 315 DNPRINTF(MPI_D_MISC, "%s: mpi_fc_print unable to fetch " 316 "device page 0\n", DEVNAME(sc)); 317 continue; 318 } 319 320 link->port_wwn = letoh64(dpg.wwpn); 321 link->node_wwn = letoh64(dpg.wwnn); 322 323 DNPRINTF(MPI_D_MISC, "%s: target %d WWNN: %016llx " 324 "WWPN: %016llx\n", DEVNAME(sc), i, 325 letoh64(dpg.wwnn), letoh64(dpg.wwpn)); 326 } 327 } 328 329 void 330 mpi_squash_ppr(struct mpi_softc *sc) 331 { 332 struct mpi_cfg_hdr hdr; 333 struct mpi_cfg_spi_dev_pg1 page; 334 int i; 335 336 DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc)); 337 338 for (i = 0; i < sc->sc_buswidth; i++) { 339 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 340 1, i, &hdr) != 0) 341 return; 342 343 if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0) 344 return; 345 346 DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x " 347 "req_offset: 0x%02x req_period: 0x%02x " 348 "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i, 349 page.req_params1, page.req_offset, page.req_period, 350 page.req_params2, letoh32(page.configuration)); 351 352 page.req_params1 = 0x0; 353 page.req_offset = 0x0; 354 page.req_period = 0x0; 355 page.req_params2 = 0x0; 356 page.configuration = htole32(0x0); 357 358 if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0) 359 return; 360 } 361 } 362 363 void 364 mpi_run_ppr(struct mpi_softc *sc) 365 { 366 struct mpi_cfg_hdr hdr; 367 struct mpi_cfg_spi_port_pg0 port_pg; 368 struct mpi_cfg_ioc_pg3 *physdisk_pg; 369 struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk; 370 size_t pagelen; 371 struct scsi_link *link; 372 int i, tries; 373 374 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0, 375 &hdr) != 0) { 376 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n", 377 DEVNAME(sc)); 378 return; 379 } 380 381 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) { 382 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n", 383 DEVNAME(sc)); 384 return; 385 } 386 387 for (i = 0; i < sc->sc_buswidth; i++) { 388 link = sc->sc_scsibus->sc_link[i][0]; 389 if (link == NULL) 390 continue; 391 392 /* do not ppr volumes */ 393 if (link->flags & SDEV_VIRTUAL) 394 continue; 395 396 tries = 0; 397 while (mpi_ppr(sc, link, NULL, port_pg.min_period, 398 port_pg.max_offset, tries) == EAGAIN) 399 tries++; 400 } 401 402 if ((sc->sc_flags & MPI_F_RAID) == 0) 403 return; 404 405 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0, 406 &hdr) != 0) { 407 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 408 "fetch ioc pg 3 header\n", DEVNAME(sc)); 409 return; 410 } 411 412 pagelen = hdr.page_length * 4; /* dwords to bytes */ 413 physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK); 414 if (physdisk_pg == NULL) { 415 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to " 416 "allocate ioc pg 3\n", DEVNAME(sc)); 417 return; 418 } 419 physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1); 420 421 if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) { 422 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to " 423 "fetch ioc page 3\n", DEVNAME(sc)); 424 goto out; 425 } 426 427 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc), 428 physdisk_pg->no_phys_disks); 429 430 for (i = 0; i < physdisk_pg->no_phys_disks; i++) { 431 physdisk = &physdisk_list[i]; 432 433 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d " 434 "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id, 435 physdisk->phys_disk_bus, physdisk->phys_disk_ioc, 436 physdisk->phys_disk_num); 437 438 if (physdisk->phys_disk_ioc != sc->sc_ioc_number) 439 continue; 440 441 tries = 0; 442 while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period, 443 port_pg.max_offset, tries) == EAGAIN) 444 tries++; 445 } 446 447 out: 448 free(physdisk_pg, M_TEMP); 449 } 450 451 int 452 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link, 453 struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try) 454 { 455 struct mpi_cfg_hdr hdr0, hdr1; 456 struct mpi_cfg_spi_dev_pg0 pg0; 457 struct mpi_cfg_spi_dev_pg1 pg1; 458 u_int32_t address; 459 int id; 460 int raid = 0; 461 462 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d " 463 "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try, 464 link->quirks); 465 466 if (try >= 3) 467 return (EIO); 468 469 if (physdisk == NULL) { 470 if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR) 471 return (EIO); 472 473 address = link->target; 474 id = link->target; 475 } else { 476 raid = 1; 477 address = (physdisk->phys_disk_bus << 8) | 478 (physdisk->phys_disk_id); 479 id = physdisk->phys_disk_num; 480 } 481 482 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0, 483 address, &hdr0) != 0) { 484 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n", 485 DEVNAME(sc)); 486 return (EIO); 487 } 488 489 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1, 490 address, &hdr1) != 0) { 491 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n", 492 DEVNAME(sc)); 493 return (EIO); 494 } 495 496 #ifdef MPI_DEBUG 497 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 498 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n", 499 DEVNAME(sc)); 500 return (EIO); 501 } 502 503 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 504 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 505 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 506 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 507 #endif 508 509 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 510 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n", 511 DEVNAME(sc)); 512 return (EIO); 513 } 514 515 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 516 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 517 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 518 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 519 520 pg1.req_params1 = 0; 521 pg1.req_offset = offset; 522 pg1.req_period = period; 523 pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH; 524 525 if (raid || !(link->quirks & SDEV_NOSYNC)) { 526 pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE; 527 528 switch (try) { 529 case 0: /* U320 */ 530 break; 531 case 1: /* U160 */ 532 pg1.req_period = 0x09; 533 break; 534 case 2: /* U80 */ 535 pg1.req_period = 0x0a; 536 break; 537 } 538 539 if (pg1.req_period < 0x09) { 540 /* Ultra320: enable QAS & PACKETIZED */ 541 pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS | 542 MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED; 543 } 544 if (pg1.req_period < 0xa) { 545 /* >= Ultra160: enable dual xfers */ 546 pg1.req_params1 |= 547 MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS; 548 } 549 } 550 551 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 552 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 553 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 554 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 555 556 if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) { 557 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n", 558 DEVNAME(sc)); 559 return (EIO); 560 } 561 562 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) { 563 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n", 564 DEVNAME(sc)); 565 return (EIO); 566 } 567 568 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x " 569 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x " 570 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset, 571 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration)); 572 573 if (mpi_inq(sc, id, raid) != 0) { 574 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against " 575 "target %d\n", DEVNAME(sc), link->target); 576 return (EIO); 577 } 578 579 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) { 580 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after " 581 "inquiry\n", DEVNAME(sc)); 582 return (EIO); 583 } 584 585 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x " 586 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x " 587 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset, 588 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information)); 589 590 if (!(letoh32(pg0.information) & 0x07) && (try == 0)) { 591 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n", 592 DEVNAME(sc)); 593 return (EAGAIN); 594 } 595 596 if ((((letoh32(pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) { 597 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n", 598 DEVNAME(sc)); 599 return (EAGAIN); 600 } 601 602 if (letoh32(pg0.information) & 0x0e) { 603 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n", 604 DEVNAME(sc), letoh32(pg0.information)); 605 return (EAGAIN); 606 } 607 608 switch(pg0.neg_period) { 609 case 0x08: 610 period = 160; 611 break; 612 case 0x09: 613 period = 80; 614 break; 615 case 0x0a: 616 period = 40; 617 break; 618 case 0x0b: 619 period = 20; 620 break; 621 case 0x0c: 622 period = 10; 623 break; 624 default: 625 period = 0; 626 break; 627 } 628 629 printf("%s: %s %d %s at %dMHz width %dbit offset %d " 630 "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target", 631 id, period ? "Sync" : "Async", period, 632 (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8, 633 pg0.neg_offset, 634 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0, 635 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0, 636 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0); 637 638 return (0); 639 } 640 641 int 642 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk) 643 { 644 struct mpi_ccb *ccb; 645 struct scsi_inquiry inq; 646 struct { 647 struct mpi_msg_scsi_io io; 648 struct mpi_sge sge; 649 struct scsi_inquiry_data inqbuf; 650 struct scsi_sense_data sense; 651 } __packed *bundle; 652 struct mpi_msg_scsi_io *io; 653 struct mpi_sge *sge; 654 u_int64_t addr; 655 656 DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc)); 657 658 bzero(&inq, sizeof(inq)); 659 inq.opcode = INQUIRY; 660 661 ccb = mpi_get_ccb(sc); 662 if (ccb == NULL) 663 return (1); 664 665 ccb->ccb_done = mpi_empty_done; 666 667 bundle = ccb->ccb_cmd; 668 io = &bundle->io; 669 sge = &bundle->sge; 670 671 io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH : 672 MPI_FUNCTION_SCSI_IO_REQUEST; 673 /* 674 * bus is always 0 675 * io->bus = htole16(sc->sc_bus); 676 */ 677 io->target_id = target; 678 679 io->cdb_length = sizeof(inq); 680 io->sense_buf_len = sizeof(struct scsi_sense_data); 681 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 682 683 io->msg_context = htole32(ccb->ccb_id); 684 685 /* 686 * always lun 0 687 * io->lun[0] = htobe16(link->lun); 688 */ 689 690 io->direction = MPI_SCSIIO_DIR_READ; 691 io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT; 692 693 bcopy(&inq, io->cdb, sizeof(inq)); 694 695 io->data_length = htole32(sizeof(struct scsi_inquiry_data)); 696 697 io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva + 698 ((u_int8_t *)&bundle->sense - (u_int8_t *)bundle)); 699 700 sge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 | 701 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 702 (u_int32_t)sizeof(inq)); 703 704 addr = ccb->ccb_cmd_dva + 705 ((u_int8_t *)&bundle->inqbuf - (u_int8_t *)bundle); 706 sge->sg_hi_addr = htole32((u_int32_t)(addr >> 32)); 707 sge->sg_lo_addr = htole32((u_int32_t)addr); 708 709 if (mpi_poll(sc, ccb, 5000) != 0) 710 return (1); 711 712 if (ccb->ccb_reply != NULL) 713 mpi_push_reply(sc, ccb->ccb_reply_dva); 714 715 mpi_put_ccb(sc, ccb); 716 717 return (0); 718 } 719 720 void 721 mpi_detach(struct mpi_softc *sc) 722 { 723 724 } 725 726 int 727 mpi_intr(void *arg) 728 { 729 struct mpi_softc *sc = arg; 730 struct mpi_ccb *ccb; 731 struct mpi_msg_reply *reply; 732 u_int32_t reply_dva; 733 char *reply_addr; 734 u_int32_t reg, id; 735 int rv = 0; 736 737 while ((reg = mpi_pop_reply(sc)) != 0xffffffff) { 738 739 DNPRINTF(MPI_D_INTR, "%s: mpi_intr reply_queue: 0x%08x\n", 740 DEVNAME(sc), reg); 741 742 if (reg & MPI_REPLY_QUEUE_ADDRESS) { 743 bus_dmamap_sync(sc->sc_dmat, 744 MPI_DMA_MAP(sc->sc_replies), 0, PAGE_SIZE, 745 BUS_DMASYNC_POSTREAD); 746 747 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1; 748 749 reply_addr = MPI_DMA_KVA(sc->sc_replies); 750 reply_addr += reply_dva - 751 (u_int32_t)MPI_DMA_DVA(sc->sc_replies); 752 reply = (struct mpi_msg_reply *)reply_addr; 753 754 id = letoh32(reply->msg_context); 755 756 bus_dmamap_sync(sc->sc_dmat, 757 MPI_DMA_MAP(sc->sc_replies), 0, PAGE_SIZE, 758 BUS_DMASYNC_PREREAD); 759 } else { 760 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) { 761 case MPI_REPLY_QUEUE_TYPE_INIT: 762 id = reg & MPI_REPLY_QUEUE_CONTEXT; 763 break; 764 765 default: 766 panic("%s: unsupported context reply\n", 767 DEVNAME(sc)); 768 } 769 770 reply = NULL; 771 } 772 773 DNPRINTF(MPI_D_INTR, "%s: mpi_intr id: %d reply: %p\n", 774 DEVNAME(sc), id, reply); 775 776 ccb = &sc->sc_ccbs[id]; 777 778 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 779 ccb->ccb_offset, MPI_REQUEST_SIZE, 780 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 781 ccb->ccb_state = MPI_CCB_READY; 782 ccb->ccb_reply = reply; 783 ccb->ccb_reply_dva = reply_dva; 784 785 ccb->ccb_done(ccb); 786 rv = 1; 787 } 788 789 return (rv); 790 } 791 792 struct mpi_dmamem * 793 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size) 794 { 795 struct mpi_dmamem *mdm; 796 int nsegs; 797 798 mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT); 799 if (mdm == NULL) 800 return (NULL); 801 802 bzero(mdm, sizeof(struct mpi_dmamem)); 803 mdm->mdm_size = size; 804 805 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 806 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0) 807 goto mdmfree; 808 809 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg, 810 1, &nsegs, BUS_DMA_NOWAIT) != 0) 811 goto destroy; 812 813 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size, 814 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0) 815 goto free; 816 817 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size, 818 NULL, BUS_DMA_NOWAIT) != 0) 819 goto unmap; 820 821 bzero(mdm->mdm_kva, size); 822 823 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x " 824 "map: %#x nsegs: %d segs: %#x kva: %x\n", 825 DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva); 826 827 return (mdm); 828 829 unmap: 830 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size); 831 free: 832 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 833 destroy: 834 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 835 mdmfree: 836 free(mdm, M_DEVBUF); 837 838 return (NULL); 839 } 840 841 void 842 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm) 843 { 844 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm); 845 846 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map); 847 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size); 848 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1); 849 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map); 850 free(mdm, M_DEVBUF); 851 } 852 853 int 854 mpi_alloc_ccbs(struct mpi_softc *sc) 855 { 856 struct mpi_ccb *ccb; 857 u_int8_t *cmd; 858 int i; 859 860 TAILQ_INIT(&sc->sc_ccb_free); 861 862 sc->sc_ccbs = malloc(sizeof(struct mpi_ccb) * sc->sc_maxcmds, 863 M_DEVBUF, M_WAITOK); 864 if (sc->sc_ccbs == NULL) { 865 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)); 866 return (1); 867 } 868 bzero(sc->sc_ccbs, sizeof(struct mpi_ccb) * sc->sc_maxcmds); 869 870 sc->sc_requests = mpi_dmamem_alloc(sc, 871 MPI_REQUEST_SIZE * sc->sc_maxcmds); 872 if (sc->sc_requests == NULL) { 873 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)); 874 goto free_ccbs; 875 } 876 cmd = MPI_DMA_KVA(sc->sc_requests); 877 bzero(cmd, MPI_REQUEST_SIZE * sc->sc_maxcmds); 878 879 for (i = 0; i < sc->sc_maxcmds; i++) { 880 ccb = &sc->sc_ccbs[i]; 881 882 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, 883 sc->sc_max_sgl_len, MAXPHYS, 0, 0, 884 &ccb->ccb_dmamap) != 0) { 885 printf("%s: unable to create dma map\n", DEVNAME(sc)); 886 goto free_maps; 887 } 888 889 ccb->ccb_sc = sc; 890 ccb->ccb_id = i; 891 ccb->ccb_offset = MPI_REQUEST_SIZE * i; 892 893 ccb->ccb_cmd = &cmd[ccb->ccb_offset]; 894 ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) + 895 ccb->ccb_offset; 896 897 DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x " 898 "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n", 899 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc, 900 ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd, 901 ccb->ccb_cmd_dva); 902 903 mpi_put_ccb(sc, ccb); 904 } 905 906 return (0); 907 908 free_maps: 909 while ((ccb = mpi_get_ccb(sc)) != NULL) 910 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap); 911 912 mpi_dmamem_free(sc, sc->sc_requests); 913 free_ccbs: 914 free(sc->sc_ccbs, M_DEVBUF); 915 916 return (1); 917 } 918 919 struct mpi_ccb * 920 mpi_get_ccb(struct mpi_softc *sc) 921 { 922 struct mpi_ccb *ccb; 923 924 ccb = TAILQ_FIRST(&sc->sc_ccb_free); 925 if (ccb == NULL) { 926 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb == NULL\n", DEVNAME(sc)); 927 return (NULL); 928 } 929 930 TAILQ_REMOVE(&sc->sc_ccb_free, ccb, ccb_link); 931 932 ccb->ccb_state = MPI_CCB_READY; 933 934 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %#x\n", DEVNAME(sc), ccb); 935 936 return (ccb); 937 } 938 939 void 940 mpi_put_ccb(struct mpi_softc *sc, struct mpi_ccb *ccb) 941 { 942 DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %#x\n", DEVNAME(sc), ccb); 943 944 ccb->ccb_state = MPI_CCB_FREE; 945 ccb->ccb_xs = NULL; 946 ccb->ccb_done = NULL; 947 bzero(ccb->ccb_cmd, MPI_REQUEST_SIZE); 948 TAILQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_link); 949 } 950 951 int 952 mpi_alloc_replies(struct mpi_softc *sc) 953 { 954 DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc)); 955 956 sc->sc_replies = mpi_dmamem_alloc(sc, PAGE_SIZE); 957 if (sc->sc_replies == NULL) 958 return (1); 959 960 return (0); 961 } 962 963 void 964 mpi_push_replies(struct mpi_softc *sc) 965 { 966 paddr_t reply; 967 int i; 968 969 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 970 0, PAGE_SIZE, BUS_DMASYNC_PREREAD); 971 972 for (i = 0; i < PAGE_SIZE / MPI_REPLY_SIZE; i++) { 973 reply = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) + 974 MPI_REPLY_SIZE * i; 975 DNPRINTF(MPI_D_MEM, "%s: mpi_push_replies %#x\n", DEVNAME(sc), 976 reply); 977 mpi_push_reply(sc, reply); 978 } 979 } 980 981 void 982 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb) 983 { 984 DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc), 985 ccb->ccb_cmd_dva); 986 987 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 988 ccb->ccb_offset, MPI_REQUEST_SIZE, 989 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 990 991 ccb->ccb_state = MPI_CCB_QUEUED; 992 mpi_write(sc, MPI_REQ_QUEUE, ccb->ccb_cmd_dva); 993 } 994 995 int 996 mpi_complete(struct mpi_softc *sc, struct mpi_ccb *nccb, int timeout) 997 { 998 struct mpi_ccb *ccb; 999 struct mpi_msg_reply *reply; 1000 u_int32_t reply_dva; 1001 char *reply_addr; 1002 u_int32_t reg, id = 0xffffffff; 1003 1004 DNPRINTF(MPI_D_INTR, "%s: mpi_complete timeout %d\n", DEVNAME(sc), 1005 timeout); 1006 1007 do { 1008 reg = mpi_pop_reply(sc); 1009 if (reg == 0xffffffff) { 1010 if (timeout-- == 0) 1011 return (1); 1012 1013 delay(1000); 1014 continue; 1015 } 1016 1017 DNPRINTF(MPI_D_INTR, "%s: mpi_complete reply_queue: 0x%08x\n", 1018 DEVNAME(sc), reg); 1019 1020 if (reg & MPI_REPLY_QUEUE_ADDRESS) { 1021 bus_dmamap_sync(sc->sc_dmat, 1022 MPI_DMA_MAP(sc->sc_replies), 0, PAGE_SIZE, 1023 BUS_DMASYNC_POSTREAD); 1024 1025 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1; 1026 1027 reply_addr = MPI_DMA_KVA(sc->sc_replies); 1028 reply_addr += reply_dva - 1029 (u_int32_t)MPI_DMA_DVA(sc->sc_replies); 1030 reply = (struct mpi_msg_reply *)reply_addr; 1031 1032 id = letoh32(reply->msg_context); 1033 1034 bus_dmamap_sync(sc->sc_dmat, 1035 MPI_DMA_MAP(sc->sc_replies), 0, PAGE_SIZE, 1036 BUS_DMASYNC_PREREAD); 1037 } else { 1038 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) { 1039 case MPI_REPLY_QUEUE_TYPE_INIT: 1040 id = reg & MPI_REPLY_QUEUE_CONTEXT; 1041 break; 1042 1043 default: 1044 panic("%s: unsupported context reply\n", 1045 DEVNAME(sc)); 1046 } 1047 1048 reply = NULL; 1049 } 1050 1051 DNPRINTF(MPI_D_INTR, "%s: mpi_complete id: %d\n", 1052 DEVNAME(sc), id); 1053 1054 ccb = &sc->sc_ccbs[id]; 1055 1056 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests), 1057 ccb->ccb_offset, MPI_REQUEST_SIZE, 1058 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1059 ccb->ccb_state = MPI_CCB_READY; 1060 ccb->ccb_reply = reply; 1061 ccb->ccb_reply_dva = reply_dva; 1062 1063 ccb->ccb_done(ccb); 1064 1065 } while (nccb->ccb_id != id); 1066 1067 return (0); 1068 } 1069 1070 int 1071 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout) 1072 { 1073 int error; 1074 int s; 1075 1076 DNPRINTF(MPI_D_CMD, "%s: mpi_poll\n", DEVNAME(sc)); 1077 1078 s = splbio(); 1079 mpi_start(sc, ccb); 1080 error = mpi_complete(sc, ccb, timeout); 1081 splx(s); 1082 1083 return (error); 1084 } 1085 1086 int 1087 mpi_scsi_cmd(struct scsi_xfer *xs) 1088 { 1089 struct scsi_link *link = xs->sc_link; 1090 struct mpi_softc *sc = link->adapter_softc; 1091 struct mpi_ccb *ccb; 1092 struct mpi_ccb_bundle *mcb; 1093 struct mpi_msg_scsi_io *io; 1094 int s; 1095 1096 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc)); 1097 1098 if (xs->cmdlen > MPI_CDB_LEN) { 1099 DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n", 1100 DEVNAME(sc), xs->cmdlen); 1101 bzero(&xs->sense, sizeof(xs->sense)); 1102 xs->sense.error_code = SSD_ERRCODE_VALID | 0x70; 1103 xs->sense.flags = SKEY_ILLEGAL_REQUEST; 1104 xs->sense.add_sense_code = 0x20; 1105 xs->error = XS_SENSE; 1106 s = splbio(); 1107 scsi_done(xs); 1108 splx(s); 1109 return (COMPLETE); 1110 } 1111 1112 s = splbio(); 1113 ccb = mpi_get_ccb(sc); 1114 splx(s); 1115 if (ccb == NULL) { 1116 xs->error = XS_DRIVER_STUFFUP; 1117 s = splbio(); 1118 scsi_done(xs); 1119 splx(s); 1120 return (COMPLETE); 1121 } 1122 DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n", 1123 DEVNAME(sc), ccb->ccb_id, xs->flags); 1124 1125 ccb->ccb_xs = xs; 1126 ccb->ccb_done = mpi_scsi_cmd_done; 1127 1128 mcb = ccb->ccb_cmd; 1129 io = &mcb->mcb_io; 1130 1131 io->function = MPI_FUNCTION_SCSI_IO_REQUEST; 1132 /* 1133 * bus is always 0 1134 * io->bus = htole16(sc->sc_bus); 1135 */ 1136 io->target_id = link->target; 1137 1138 io->cdb_length = xs->cmdlen; 1139 io->sense_buf_len = sizeof(xs->sense); 1140 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64; 1141 1142 io->msg_context = htole32(ccb->ccb_id); 1143 1144 io->lun[0] = htobe16(link->lun); 1145 1146 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) { 1147 case SCSI_DATA_IN: 1148 io->direction = MPI_SCSIIO_DIR_READ; 1149 break; 1150 case SCSI_DATA_OUT: 1151 io->direction = MPI_SCSIIO_DIR_WRITE; 1152 break; 1153 default: 1154 io->direction = MPI_SCSIIO_DIR_NONE; 1155 break; 1156 } 1157 1158 if (link->quirks & SDEV_NOTAGS) 1159 io->tagging = MPI_SCSIIO_ATTR_UNTAGGED; 1160 else 1161 io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q; 1162 1163 bcopy(xs->cmd, io->cdb, xs->cmdlen); 1164 1165 io->data_length = htole32(xs->datalen); 1166 1167 io->sense_buf_low_addr = htole32(ccb->ccb_cmd_dva + 1168 ((u_int8_t *)&mcb->mcb_sense - (u_int8_t *)mcb)); 1169 1170 if (mpi_load_xs(ccb) != 0) { 1171 xs->error = XS_DRIVER_STUFFUP; 1172 s = splbio(); 1173 mpi_put_ccb(sc, ccb); 1174 scsi_done(xs); 1175 splx(s); 1176 return (COMPLETE); 1177 } 1178 1179 timeout_set(&xs->stimeout, mpi_timeout_xs, ccb); 1180 1181 if (xs->flags & SCSI_POLL) { 1182 if (mpi_poll(sc, ccb, xs->timeout) != 0) 1183 xs->error = XS_DRIVER_STUFFUP; 1184 return (COMPLETE); 1185 } 1186 1187 s = splbio(); 1188 mpi_start(sc, ccb); 1189 splx(s); 1190 return (SUCCESSFULLY_QUEUED); 1191 } 1192 1193 void 1194 mpi_scsi_cmd_done(struct mpi_ccb *ccb) 1195 { 1196 struct mpi_softc *sc = ccb->ccb_sc; 1197 struct scsi_xfer *xs = ccb->ccb_xs; 1198 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1199 bus_dmamap_t dmap = ccb->ccb_dmamap; 1200 struct mpi_msg_scsi_io_error *sie = ccb->ccb_reply; 1201 1202 if (xs->datalen != 0) { 1203 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1204 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1205 BUS_DMASYNC_POSTWRITE); 1206 1207 bus_dmamap_unload(sc->sc_dmat, dmap); 1208 } 1209 1210 /* timeout_del */ 1211 xs->error = XS_NOERROR; 1212 xs->resid = 0; 1213 xs->flags |= ITSDONE; 1214 1215 if (sie == NULL) { 1216 /* no scsi error, we're ok so drop out early */ 1217 xs->status = SCSI_OK; 1218 mpi_put_ccb(sc, ccb); 1219 scsi_done(xs); 1220 return; 1221 } 1222 1223 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d " 1224 "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen, 1225 xs->flags); 1226 DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d " 1227 "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus, 1228 sie->msg_length, sie->function); 1229 DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d " 1230 "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length, 1231 sie->sense_buf_len, sie->msg_flags); 1232 DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 1233 letoh32(sie->msg_context)); 1234 DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x " 1235 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status, 1236 sie->scsi_state, letoh16(sie->ioc_status)); 1237 DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1238 letoh32(sie->ioc_loginfo)); 1239 DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc), 1240 letoh32(sie->transfer_count)); 1241 DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc), 1242 letoh32(sie->sense_count)); 1243 DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc), 1244 letoh32(sie->response_info)); 1245 DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc), 1246 letoh16(sie->tag)); 1247 1248 xs->status = sie->scsi_status; 1249 switch (letoh16(sie->ioc_status)) { 1250 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 1251 xs->resid = xs->datalen - letoh32(sie->transfer_count); 1252 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS) { 1253 xs->error = XS_DRIVER_STUFFUP; 1254 break; 1255 } 1256 /* FALLTHROUGH */ 1257 case MPI_IOCSTATUS_SUCCESS: 1258 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 1259 switch (xs->status) { 1260 case SCSI_OK: 1261 xs->resid = 0; 1262 break; 1263 1264 case SCSI_CHECK: 1265 xs->error = XS_SENSE; 1266 break; 1267 1268 case SCSI_BUSY: 1269 case SCSI_QUEUE_FULL: 1270 xs->error = XS_BUSY; 1271 break; 1272 1273 default: 1274 xs->error = XS_DRIVER_STUFFUP; 1275 break; 1276 } 1277 break; 1278 1279 case MPI_IOCSTATUS_BUSY: 1280 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 1281 xs->error = XS_BUSY; 1282 break; 1283 1284 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 1285 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 1286 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1287 xs->error = XS_SELTIMEOUT; 1288 break; 1289 1290 default: 1291 xs->error = XS_DRIVER_STUFFUP; 1292 break; 1293 } 1294 1295 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID) 1296 bcopy(&mcb->mcb_sense, &xs->sense, sizeof(xs->sense)); 1297 1298 DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc), 1299 xs->error, xs->status); 1300 1301 mpi_push_reply(sc, ccb->ccb_reply_dva); 1302 mpi_put_ccb(sc, ccb); 1303 scsi_done(xs); 1304 } 1305 1306 void 1307 mpi_timeout_xs(void *arg) 1308 { 1309 /* XXX */ 1310 } 1311 1312 int 1313 mpi_load_xs(struct mpi_ccb *ccb) 1314 { 1315 struct mpi_softc *sc = ccb->ccb_sc; 1316 struct scsi_xfer *xs = ccb->ccb_xs; 1317 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd; 1318 struct mpi_msg_scsi_io *io = &mcb->mcb_io; 1319 struct mpi_sge *sge, *nsge = &mcb->mcb_sgl[0]; 1320 struct mpi_sge *ce = NULL, *nce; 1321 u_int64_t ce_dva; 1322 bus_dmamap_t dmap = ccb->ccb_dmamap; 1323 u_int32_t addr, flags; 1324 int i, error; 1325 1326 if (xs->datalen == 0) { 1327 nsge->sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 1328 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 1329 return (0); 1330 } 1331 1332 error = bus_dmamap_load(sc->sc_dmat, dmap, 1333 xs->data, xs->datalen, NULL, 1334 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 1335 if (error) { 1336 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error); 1337 return (1); 1338 } 1339 1340 flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64; 1341 if (xs->flags & SCSI_DATA_OUT) 1342 flags |= MPI_SGE_FL_DIR_OUT; 1343 1344 if (dmap->dm_nsegs > sc->sc_first_sgl_len) { 1345 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1]; 1346 io->chain_offset = ((u_int8_t *)ce - (u_int8_t *)io) / 4; 1347 } 1348 1349 for (i = 0; i < dmap->dm_nsegs; i++) { 1350 1351 if (nsge == ce) { 1352 nsge++; 1353 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST); 1354 1355 DNPRINTF(MPI_D_DMA, "%s: - 0x%08x 0x%08x 0x%08x\n", 1356 DEVNAME(sc), sge->sg_hdr, 1357 sge->sg_hi_addr, sge->sg_lo_addr); 1358 1359 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) { 1360 nce = &nsge[sc->sc_chain_len - 1]; 1361 addr = ((u_int8_t *)nce - (u_int8_t *)nsge) / 4; 1362 addr = addr << 16 | 1363 sizeof(struct mpi_sge) * sc->sc_chain_len; 1364 } else { 1365 nce = NULL; 1366 addr = sizeof(struct mpi_sge) * 1367 (dmap->dm_nsegs - i); 1368 } 1369 1370 ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN | 1371 MPI_SGE_FL_SIZE_64 | addr); 1372 1373 ce_dva = ccb->ccb_cmd_dva + 1374 ((u_int8_t *)nsge - (u_int8_t *)mcb); 1375 1376 addr = (u_int32_t)(ce_dva >> 32); 1377 ce->sg_hi_addr = htole32(addr); 1378 addr = (u_int32_t)ce_dva; 1379 ce->sg_lo_addr = htole32(addr); 1380 1381 DNPRINTF(MPI_D_DMA, "%s: ce: 0x%08x 0x%08x 0x%08x\n", 1382 DEVNAME(sc), ce->sg_hdr, ce->sg_hi_addr, 1383 ce->sg_lo_addr); 1384 1385 ce = nce; 1386 } 1387 1388 DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc), 1389 i, dmap->dm_segs[i].ds_len, 1390 (u_int64_t)dmap->dm_segs[i].ds_addr); 1391 1392 sge = nsge; 1393 1394 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len); 1395 addr = (u_int32_t)((u_int64_t)dmap->dm_segs[i].ds_addr >> 32); 1396 sge->sg_hi_addr = htole32(addr); 1397 addr = (u_int32_t)dmap->dm_segs[i].ds_addr; 1398 sge->sg_lo_addr = htole32(addr); 1399 1400 DNPRINTF(MPI_D_DMA, "%s: %d: 0x%08x 0x%08x 0x%08x\n", 1401 DEVNAME(sc), i, sge->sg_hdr, sge->sg_hi_addr, 1402 sge->sg_lo_addr); 1403 1404 nsge = sge + 1; 1405 } 1406 1407 /* terminate list */ 1408 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 1409 MPI_SGE_FL_EOL); 1410 1411 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, 1412 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : 1413 BUS_DMASYNC_PREWRITE); 1414 1415 return (0); 1416 } 1417 1418 void 1419 mpi_minphys(struct buf *bp) 1420 { 1421 /* XXX */ 1422 if (bp->b_bcount > MAXPHYS) 1423 bp->b_bcount = MAXPHYS; 1424 minphys(bp); 1425 } 1426 1427 int 1428 mpi_scsi_ioctl(struct scsi_link *a, u_long b, caddr_t c, int d, struct proc *e) 1429 { 1430 return (0); 1431 } 1432 1433 u_int32_t 1434 mpi_read(struct mpi_softc *sc, bus_size_t r) 1435 { 1436 u_int32_t rv; 1437 1438 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1439 BUS_SPACE_BARRIER_READ); 1440 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r); 1441 1442 DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv); 1443 1444 return (rv); 1445 } 1446 1447 void 1448 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v) 1449 { 1450 DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v); 1451 1452 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 1453 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 1454 BUS_SPACE_BARRIER_WRITE); 1455 } 1456 1457 int 1458 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1459 u_int32_t target) 1460 { 1461 int i; 1462 1463 DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r, 1464 mask, target); 1465 1466 for (i = 0; i < 10000; i++) { 1467 if ((mpi_read(sc, r) & mask) == target) 1468 return (0); 1469 delay(1000); 1470 } 1471 1472 return (1); 1473 } 1474 1475 int 1476 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask, 1477 u_int32_t target) 1478 { 1479 int i; 1480 1481 DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r, 1482 mask, target); 1483 1484 for (i = 0; i < 10000; i++) { 1485 if ((mpi_read(sc, r) & mask) != target) 1486 return (0); 1487 delay(1000); 1488 } 1489 1490 return (1); 1491 } 1492 1493 int 1494 mpi_init(struct mpi_softc *sc) 1495 { 1496 u_int32_t db; 1497 int i; 1498 1499 /* spin until the IOC leaves the RESET state */ 1500 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1501 MPI_DOORBELL_STATE_RESET) != 0) { 1502 DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave " 1503 "reset state\n", DEVNAME(sc)); 1504 return (1); 1505 } 1506 1507 /* check current ownership */ 1508 db = mpi_read_db(sc); 1509 if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) { 1510 DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n", 1511 DEVNAME(sc)); 1512 return (0); 1513 } 1514 1515 for (i = 0; i < 5; i++) { 1516 switch (db & MPI_DOORBELL_STATE) { 1517 case MPI_DOORBELL_STATE_READY: 1518 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n", 1519 DEVNAME(sc)); 1520 return (0); 1521 1522 case MPI_DOORBELL_STATE_OPER: 1523 case MPI_DOORBELL_STATE_FAULT: 1524 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being " 1525 "reset\n" , DEVNAME(sc)); 1526 if (mpi_reset_soft(sc) != 0) 1527 mpi_reset_hard(sc); 1528 break; 1529 1530 case MPI_DOORBELL_STATE_RESET: 1531 DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come " 1532 "out of reset\n", DEVNAME(sc)); 1533 if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1534 MPI_DOORBELL_STATE_RESET) != 0) 1535 return (1); 1536 break; 1537 } 1538 db = mpi_read_db(sc); 1539 } 1540 1541 return (1); 1542 } 1543 1544 int 1545 mpi_reset_soft(struct mpi_softc *sc) 1546 { 1547 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc)); 1548 1549 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1550 return (1); 1551 1552 mpi_write_db(sc, 1553 MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET)); 1554 if (mpi_wait_eq(sc, MPI_INTR_STATUS, 1555 MPI_INTR_STATUS_IOCDOORBELL, 0) != 0) 1556 return (1); 1557 1558 if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE, 1559 MPI_DOORBELL_STATE_READY) != 0) 1560 return (1); 1561 1562 return (0); 1563 } 1564 1565 int 1566 mpi_reset_hard(struct mpi_softc *sc) 1567 { 1568 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc)); 1569 1570 /* enable diagnostic register */ 1571 mpi_write(sc, MPI_WRITESEQ, 0xff); 1572 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1); 1573 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2); 1574 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3); 1575 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4); 1576 mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5); 1577 1578 /* reset ioc */ 1579 mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER); 1580 1581 delay(10000); 1582 1583 /* disable diagnostic register */ 1584 mpi_write(sc, MPI_WRITESEQ, 0xff); 1585 1586 /* restore pci bits? */ 1587 1588 /* firmware bits? */ 1589 return (0); 1590 } 1591 1592 int 1593 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords) 1594 { 1595 u_int32_t *query = buf; 1596 int i; 1597 1598 /* make sure the doorbell is not in use. */ 1599 if (mpi_read_db(sc) & MPI_DOORBELL_INUSE) 1600 return (1); 1601 1602 /* clear pending doorbell interrupts */ 1603 if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL) 1604 mpi_write_intr(sc, 0); 1605 1606 /* 1607 * first write the doorbell with the handshake function and the 1608 * dword count. 1609 */ 1610 mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) | 1611 MPI_DOORBELL_DWORDS(dwords)); 1612 1613 /* 1614 * the doorbell used bit will be set because a doorbell function has 1615 * started. Wait for the interrupt and then ack it. 1616 */ 1617 if (mpi_wait_db_int(sc) != 0) 1618 return (1); 1619 mpi_write_intr(sc, 0); 1620 1621 /* poll for the acknowledgement. */ 1622 if (mpi_wait_db_ack(sc) != 0) 1623 return (1); 1624 1625 /* write the query through the doorbell. */ 1626 for (i = 0; i < dwords; i++) { 1627 mpi_write_db(sc, htole32(query[i])); 1628 if (mpi_wait_db_ack(sc) != 0) 1629 return (1); 1630 } 1631 1632 return (0); 1633 } 1634 1635 int 1636 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword) 1637 { 1638 u_int16_t *words = (u_int16_t *)dword; 1639 int i; 1640 1641 for (i = 0; i < 2; i++) { 1642 if (mpi_wait_db_int(sc) != 0) 1643 return (1); 1644 words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK); 1645 mpi_write_intr(sc, 0); 1646 } 1647 1648 return (0); 1649 } 1650 1651 int 1652 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords) 1653 { 1654 struct mpi_msg_reply *reply = buf; 1655 u_int32_t *dbuf = buf, dummy; 1656 int i; 1657 1658 /* get the first dword so we can read the length out of the header. */ 1659 if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0) 1660 return (1); 1661 1662 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n", 1663 DEVNAME(sc), dwords, reply->msg_length); 1664 1665 /* 1666 * the total length, in dwords, is in the message length field of the 1667 * reply header. 1668 */ 1669 for (i = 1; i < MIN(dwords, reply->msg_length); i++) { 1670 if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0) 1671 return (1); 1672 } 1673 1674 /* if there's extra stuff to come off the ioc, discard it */ 1675 while (i++ < reply->msg_length) { 1676 if (mpi_handshake_recv_dword(sc, &dummy) != 0) 1677 return (1); 1678 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: " 1679 "0x%08x\n", DEVNAME(sc), dummy); 1680 } 1681 1682 /* wait for the doorbell used bit to be reset and clear the intr */ 1683 if (mpi_wait_db_int(sc) != 0) 1684 return (1); 1685 mpi_write_intr(sc, 0); 1686 1687 return (0); 1688 } 1689 1690 void 1691 mpi_empty_done(struct mpi_ccb *ccb) 1692 { 1693 /* nothing to do */ 1694 } 1695 1696 int 1697 mpi_iocfacts(struct mpi_softc *sc) 1698 { 1699 struct mpi_msg_iocfacts_request ifq; 1700 struct mpi_msg_iocfacts_reply ifp; 1701 1702 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc)); 1703 1704 bzero(&ifq, sizeof(ifq)); 1705 bzero(&ifp, sizeof(ifp)); 1706 1707 ifq.function = MPI_FUNCTION_IOC_FACTS; 1708 ifq.chain_offset = 0; 1709 ifq.msg_flags = 0; 1710 ifq.msg_context = htole32(0xdeadbeef); 1711 1712 if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) { 1713 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n", 1714 DEVNAME(sc)); 1715 return (1); 1716 } 1717 1718 if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) { 1719 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n", 1720 DEVNAME(sc)); 1721 return (1); 1722 } 1723 1724 DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n", 1725 DEVNAME(sc), ifp.function, ifp.msg_length, 1726 ifp.msg_version_maj, ifp.msg_version_min); 1727 DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x " 1728 "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags, 1729 ifp.ioc_number, ifp.header_version_maj, 1730 ifp.header_version_min); 1731 DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc), 1732 letoh32(ifp.msg_context)); 1733 DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n", 1734 DEVNAME(sc), letoh16(ifp.ioc_status), 1735 letoh16(ifp.ioc_exceptions)); 1736 DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc), 1737 letoh32(ifp.ioc_loginfo)); 1738 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x " 1739 "maxchdepth: %d\n", DEVNAME(sc), ifp.flags, 1740 ifp.block_size, ifp.whoinit, ifp.max_chain_depth); 1741 DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n", 1742 DEVNAME(sc), letoh16(ifp.request_frame_size), 1743 letoh16(ifp.reply_queue_depth)); 1744 DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc), 1745 letoh16(ifp.product_id)); 1746 DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc), 1747 letoh32(ifp.current_host_mfa_hi_addr)); 1748 DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d " 1749 "global_credits: %d\n", 1750 DEVNAME(sc), ifp.event_state, ifp.number_of_ports, 1751 letoh16(ifp.global_credits)); 1752 DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc), 1753 letoh32(ifp.current_sense_buffer_hi_addr)); 1754 DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n", 1755 DEVNAME(sc), ifp.max_buses, ifp.max_devices, 1756 letoh16(ifp.current_reply_frame_size)); 1757 DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc), 1758 letoh32(ifp.fw_image_size)); 1759 DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc), 1760 letoh32(ifp.ioc_capabilities)); 1761 DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x " 1762 "fw_version_dev: 0x%02x\n", DEVNAME(sc), 1763 ifp.fw_version_maj, ifp.fw_version_min, 1764 ifp.fw_version_unit, ifp.fw_version_dev); 1765 DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n", 1766 DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth)); 1767 DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x " 1768 "addr 0x%08x %08x\n", DEVNAME(sc), 1769 letoh32(ifp.host_page_buffer_sge.sg_hdr), 1770 letoh32(ifp.host_page_buffer_sge.sg_hi_addr), 1771 letoh32(ifp.host_page_buffer_sge.sg_lo_addr)); 1772 1773 sc->sc_maxcmds = letoh16(ifp.global_credits); 1774 sc->sc_maxchdepth = ifp.max_chain_depth; 1775 sc->sc_ioc_number = ifp.ioc_number; 1776 if (sc->sc_flags & MPI_F_VMWARE) 1777 sc->sc_buswidth = 16; 1778 else 1779 sc->sc_buswidth = 1780 (ifp.max_devices == 0) ? 256 : ifp.max_devices; 1781 if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) 1782 sc->sc_fw_len = letoh32(ifp.fw_image_size); 1783 1784 /* 1785 * you can fit sg elements on the end of the io cmd if they fit in the 1786 * request frame size. 1787 */ 1788 sc->sc_first_sgl_len = ((letoh16(ifp.request_frame_size) * 4) - 1789 sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge); 1790 DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc), 1791 sc->sc_first_sgl_len); 1792 1793 sc->sc_chain_len = (letoh16(ifp.request_frame_size) * 4) / 1794 sizeof(struct mpi_sge); 1795 DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc), 1796 sc->sc_chain_len); 1797 1798 /* the sgl tailing the io cmd loses an entry to the chain element. */ 1799 sc->sc_max_sgl_len = MPI_MAX_SGL - 1; 1800 /* the sgl chains lose an entry for each chain element */ 1801 sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) / 1802 sc->sc_chain_len; 1803 DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc), 1804 sc->sc_max_sgl_len); 1805 1806 /* XXX we're ignoring the max chain depth */ 1807 1808 return (0); 1809 } 1810 1811 int 1812 mpi_iocinit(struct mpi_softc *sc) 1813 { 1814 struct mpi_msg_iocinit_request iiq; 1815 struct mpi_msg_iocinit_reply iip; 1816 u_int32_t hi_addr; 1817 1818 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc)); 1819 1820 bzero(&iiq, sizeof(iiq)); 1821 bzero(&iip, sizeof(iip)); 1822 1823 iiq.function = MPI_FUNCTION_IOC_INIT; 1824 iiq.whoinit = MPI_WHOINIT_HOST_DRIVER; 1825 1826 iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth; 1827 iiq.max_buses = 1; 1828 1829 iiq.msg_context = htole32(0xd00fd00f); 1830 1831 iiq.reply_frame_size = htole16(MPI_REPLY_SIZE); 1832 1833 hi_addr = (u_int32_t)((u_int64_t)MPI_DMA_DVA(sc->sc_requests) >> 32); 1834 iiq.host_mfa_hi_addr = htole32(hi_addr); 1835 iiq.sense_buffer_hi_addr = htole32(hi_addr); 1836 1837 hi_addr = (u_int32_t)((u_int64_t)MPI_DMA_DVA(sc->sc_replies) >> 32); 1838 iiq.reply_fifo_host_signalling_addr = htole32(hi_addr); 1839 1840 iiq.msg_version_maj = 0x01; 1841 iiq.msg_version_min = 0x02; 1842 1843 iiq.hdr_version_unit = 0x0d; 1844 iiq.hdr_version_dev = 0x00; 1845 1846 if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) { 1847 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n", 1848 DEVNAME(sc)); 1849 return (1); 1850 } 1851 1852 if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) { 1853 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n", 1854 DEVNAME(sc)); 1855 return (1); 1856 } 1857 1858 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d " 1859 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function, 1860 iip.msg_length, iip.whoinit); 1861 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d " 1862 "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags, 1863 iip.max_buses, iip.max_devices, iip.flags); 1864 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 1865 letoh32(iip.msg_context)); 1866 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 1867 letoh16(iip.ioc_status)); 1868 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1869 letoh32(iip.ioc_loginfo)); 1870 1871 return (0); 1872 } 1873 1874 int 1875 mpi_portfacts(struct mpi_softc *sc) 1876 { 1877 struct mpi_ccb *ccb; 1878 struct mpi_msg_portfacts_request *pfq; 1879 volatile struct mpi_msg_portfacts_reply *pfp; 1880 int s, rv = 1; 1881 1882 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc)); 1883 1884 s = splbio(); 1885 ccb = mpi_get_ccb(sc); 1886 splx(s); 1887 if (ccb == NULL) { 1888 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n", 1889 DEVNAME(sc)); 1890 return (rv); 1891 } 1892 1893 ccb->ccb_done = mpi_empty_done; 1894 pfq = ccb->ccb_cmd; 1895 1896 pfq->function = MPI_FUNCTION_PORT_FACTS; 1897 pfq->chain_offset = 0; 1898 pfq->msg_flags = 0; 1899 pfq->port_number = 0; 1900 pfq->msg_context = htole32(ccb->ccb_id); 1901 1902 if (mpi_poll(sc, ccb, 50000) != 0) { 1903 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc)); 1904 goto err; 1905 } 1906 1907 pfp = ccb->ccb_reply; 1908 if (pfp == NULL) { 1909 DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n", 1910 DEVNAME(sc)); 1911 goto err; 1912 } 1913 1914 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n", 1915 DEVNAME(sc), pfp->function, pfp->msg_length); 1916 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n", 1917 DEVNAME(sc), pfp->msg_flags, pfp->port_number); 1918 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 1919 letoh32(pfp->msg_context)); 1920 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 1921 letoh16(pfp->ioc_status)); 1922 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1923 letoh32(pfp->ioc_loginfo)); 1924 DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n", 1925 DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type); 1926 DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n", 1927 DEVNAME(sc), letoh16(pfp->protocol_flags), 1928 letoh16(pfp->port_scsi_id)); 1929 DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d " 1930 "max_posted_cmd_buffers: %d\n", DEVNAME(sc), 1931 letoh16(pfp->max_persistent_ids), 1932 letoh16(pfp->max_posted_cmd_buffers)); 1933 DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc), 1934 letoh16(pfp->max_lan_buckets)); 1935 1936 sc->sc_porttype = pfp->port_type; 1937 sc->sc_target = letoh16(pfp->port_scsi_id); 1938 1939 mpi_push_reply(sc, ccb->ccb_reply_dva); 1940 rv = 0; 1941 err: 1942 mpi_put_ccb(sc, ccb); 1943 1944 return (rv); 1945 } 1946 1947 int 1948 mpi_eventnotify(struct mpi_softc *sc) 1949 { 1950 struct mpi_ccb *ccb; 1951 struct mpi_msg_event_request *enq; 1952 int s; 1953 1954 s = splbio(); 1955 ccb = mpi_get_ccb(sc); 1956 splx(s); 1957 if (ccb == NULL) { 1958 DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n", 1959 DEVNAME(sc)); 1960 return (1); 1961 } 1962 1963 ccb->ccb_done = mpi_eventnotify_done; 1964 enq = ccb->ccb_cmd; 1965 1966 enq->function = MPI_FUNCTION_EVENT_NOTIFICATION; 1967 enq->chain_offset = 0; 1968 enq->ev_switch = 1; 1969 enq->msg_context = htole32(ccb->ccb_id); 1970 1971 mpi_start(sc, ccb); 1972 return (0); 1973 } 1974 1975 void 1976 mpi_eventnotify_done(struct mpi_ccb *ccb) 1977 { 1978 struct mpi_softc *sc = ccb->ccb_sc; 1979 struct mpi_msg_event_reply *enp = ccb->ccb_reply; 1980 u_int32_t *data; 1981 int i; 1982 1983 printf("%s: %s\n", DEVNAME(sc), __func__); 1984 1985 printf("%s: function: 0x%02x msg_length: %d data_length: %d\n", 1986 DEVNAME(sc), enp->function, enp->msg_length, 1987 letoh16(enp->data_length)); 1988 1989 printf("%s: ack_required: %d msg_flags 0x%02x\n", DEVNAME(sc), 1990 enp->msg_flags, enp->msg_flags); 1991 1992 printf("%s: msg_context: 0x%08x\n", DEVNAME(sc), 1993 letoh32(enp->msg_context)); 1994 1995 printf("%s: ioc_status: 0x%04x\n", DEVNAME(sc), 1996 letoh16(enp->ioc_status)); 1997 1998 printf("%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 1999 letoh32(enp->ioc_loginfo)); 2000 2001 data = ccb->ccb_reply; 2002 data += dwordsof(struct mpi_msg_event_reply); 2003 for (i = 0; i < letoh16(enp->data_length); i++) { 2004 printf("%s: data[%d]: 0x%08x\n", DEVNAME(sc), i, data[i]); 2005 } 2006 } 2007 2008 int 2009 mpi_portenable(struct mpi_softc *sc) 2010 { 2011 struct mpi_ccb *ccb; 2012 struct mpi_msg_portenable_request *peq; 2013 struct mpi_msg_portenable_repy *pep; 2014 int s; 2015 2016 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc)); 2017 2018 s = splbio(); 2019 ccb = mpi_get_ccb(sc); 2020 splx(s); 2021 if (ccb == NULL) { 2022 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n", 2023 DEVNAME(sc)); 2024 return (1); 2025 } 2026 2027 ccb->ccb_done = mpi_empty_done; 2028 peq = ccb->ccb_cmd; 2029 2030 peq->function = MPI_FUNCTION_PORT_ENABLE; 2031 peq->port_number = 0; 2032 peq->msg_context = htole32(ccb->ccb_id); 2033 2034 if (mpi_poll(sc, ccb, 50000) != 0) { 2035 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc)); 2036 return (1); 2037 } 2038 2039 pep = ccb->ccb_reply; 2040 if (pep == NULL) { 2041 DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n", 2042 DEVNAME(sc)); 2043 return (1); 2044 } 2045 2046 mpi_push_reply(sc, ccb->ccb_reply_dva); 2047 mpi_put_ccb(sc, ccb); 2048 2049 return (0); 2050 } 2051 2052 int 2053 mpi_fwupload(struct mpi_softc *sc) 2054 { 2055 struct mpi_ccb *ccb; 2056 struct { 2057 struct mpi_msg_fwupload_request req; 2058 struct mpi_sge sge; 2059 } __packed *bundle; 2060 struct mpi_msg_fwupload_reply *upp; 2061 u_int64_t addr; 2062 int s; 2063 int rv = 0; 2064 2065 if (sc->sc_fw_len == 0) 2066 return (0); 2067 2068 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc)); 2069 2070 sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len); 2071 if (sc->sc_fw == NULL) { 2072 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n", 2073 DEVNAME(sc), sc->sc_fw_len); 2074 return (1); 2075 } 2076 2077 s = splbio(); 2078 ccb = mpi_get_ccb(sc); 2079 splx(s); 2080 if (ccb == NULL) { 2081 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n", 2082 DEVNAME(sc)); 2083 goto err; 2084 } 2085 2086 ccb->ccb_done = mpi_empty_done; 2087 bundle = ccb->ccb_cmd; 2088 2089 bundle->req.function = MPI_FUNCTION_FW_UPLOAD; 2090 bundle->req.msg_context = htole32(ccb->ccb_id); 2091 2092 bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW; 2093 2094 bundle->req.tce.details_length = 12; 2095 bundle->req.tce.image_size = htole32(sc->sc_fw_len); 2096 2097 bundle->sge.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2098 MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | 2099 MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len); 2100 addr = MPI_DMA_DVA(sc->sc_fw); 2101 bundle->sge.sg_hi_addr = htole32((u_int32_t)(addr >> 32)); 2102 bundle->sge.sg_lo_addr = htole32((u_int32_t)addr); 2103 2104 if (mpi_poll(sc, ccb, 50000) != 0) { 2105 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc)); 2106 goto err; 2107 } 2108 2109 upp = ccb->ccb_reply; 2110 if (upp == NULL) 2111 panic("%s: unable to do fw upload\n", DEVNAME(sc)); 2112 2113 if (letoh16(upp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2114 rv = 1; 2115 2116 mpi_push_reply(sc, ccb->ccb_reply_dva); 2117 mpi_put_ccb(sc, ccb); 2118 2119 return (rv); 2120 2121 err: 2122 mpi_dmamem_free(sc, sc->sc_fw); 2123 return (1); 2124 } 2125 2126 void 2127 mpi_get_raid(struct mpi_softc *sc) 2128 { 2129 struct mpi_cfg_hdr hdr; 2130 struct mpi_cfg_ioc_pg2 *vol_page; 2131 struct mpi_cfg_raid_vol *vol_list, *vol; 2132 size_t pagelen; 2133 u_int32_t capabilities; 2134 struct scsi_link *link; 2135 int i; 2136 2137 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc)); 2138 2139 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) { 2140 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header" 2141 "for IOC page 2\n", DEVNAME(sc)); 2142 return; 2143 } 2144 2145 pagelen = hdr.page_length * 4; /* dwords to bytes */ 2146 vol_page = malloc(pagelen, M_TEMP, M_WAITOK); 2147 if (vol_page == NULL) { 2148 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate " 2149 "space for ioc config page 2\n", DEVNAME(sc)); 2150 return; 2151 } 2152 vol_list = (struct mpi_cfg_raid_vol *)(vol_page + 1); 2153 2154 if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) { 2155 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC " 2156 "page 2\n", DEVNAME(sc)); 2157 goto out; 2158 } 2159 2160 capabilities = letoh32(vol_page->capabilities); 2161 2162 DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc), 2163 letoh32(vol_page->capabilities)); 2164 DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d " 2165 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc), 2166 vol_page->active_vols, vol_page->max_vols, 2167 vol_page->active_physdisks, vol_page->max_physdisks); 2168 2169 /* don't walk list if there are no RAID capability */ 2170 if (capabilities == 0xdeadbeef) { 2171 printf("%s: deadbeef in raid configuration\n", DEVNAME(sc)); 2172 goto out; 2173 } 2174 2175 if ((capabilities & MPI_CFG_IOC_2_CAPABILITIES_RAID) == 0 || 2176 (vol_page->active_vols == 0)) 2177 goto out; 2178 2179 sc->sc_flags |= MPI_F_RAID; 2180 2181 for (i = 0; i < vol_page->active_vols; i++) { 2182 vol = &vol_list[i]; 2183 2184 DNPRINTF(MPI_D_RAID, "%s: id: %d bus: %d ioc: %d pg: %d\n", 2185 DEVNAME(sc), vol->vol_id, vol->vol_bus, vol->vol_ioc, 2186 vol->vol_page); 2187 DNPRINTF(MPI_D_RAID, "%s: type: 0x%02x flags: 0x%02x\n", 2188 DEVNAME(sc), vol->vol_type, vol->flags); 2189 2190 if (vol->vol_ioc != sc->sc_ioc_number || vol->vol_bus != 0) 2191 continue; 2192 2193 link = sc->sc_scsibus->sc_link[vol->vol_id][0]; 2194 if (link == NULL) 2195 continue; 2196 2197 link->flags |= SDEV_VIRTUAL; 2198 } 2199 2200 out: 2201 free(vol_page, M_TEMP); 2202 } 2203 2204 int 2205 mpi_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number, 2206 u_int32_t address, struct mpi_cfg_hdr *hdr) 2207 { 2208 struct mpi_ccb *ccb; 2209 struct mpi_msg_config_request *cq; 2210 struct mpi_msg_config_reply *cp; 2211 int rv = 0; 2212 int s; 2213 2214 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header type: %#x number: %x " 2215 "address: %d\n", DEVNAME(sc), type, number, address); 2216 2217 s = splbio(); 2218 ccb = mpi_get_ccb(sc); 2219 splx(s); 2220 if (ccb == NULL) { 2221 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n", 2222 DEVNAME(sc)); 2223 return (1); 2224 } 2225 2226 ccb->ccb_done = mpi_empty_done; 2227 cq = ccb->ccb_cmd; 2228 2229 cq->function = MPI_FUNCTION_CONFIG; 2230 cq->msg_context = htole32(ccb->ccb_id); 2231 2232 cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER; 2233 2234 cq->config_header.page_number = number; 2235 cq->config_header.page_type = type; 2236 cq->page_address = htole32(address); 2237 cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2238 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL); 2239 2240 if (mpi_poll(sc, ccb, 50000) != 0) { 2241 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc)); 2242 return (1); 2243 } 2244 2245 cp = ccb->ccb_reply; 2246 if (cp == NULL) 2247 panic("%s: unable to fetch config header\n", DEVNAME(sc)); 2248 2249 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2250 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2251 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2252 "msg_flags: 0x%02x\n", DEVNAME(sc), 2253 letoh16(cp->ext_page_length), cp->ext_page_type, 2254 cp->msg_flags); 2255 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2256 letoh32(cp->msg_context)); 2257 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2258 letoh16(cp->ioc_status)); 2259 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2260 letoh32(cp->ioc_loginfo)); 2261 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2262 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2263 cp->config_header.page_version, 2264 cp->config_header.page_length, 2265 cp->config_header.page_number, 2266 cp->config_header.page_type); 2267 2268 if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2269 rv = 1; 2270 else 2271 *hdr = cp->config_header; 2272 2273 mpi_push_reply(sc, ccb->ccb_reply_dva); 2274 mpi_put_ccb(sc, ccb); 2275 2276 return (rv); 2277 } 2278 2279 int 2280 mpi_cfg_page(struct mpi_softc *sc, u_int32_t address, struct mpi_cfg_hdr *hdr, 2281 int read, void *page, size_t len) 2282 { 2283 struct mpi_ccb *ccb; 2284 struct mpi_msg_config_request *cq; 2285 struct mpi_msg_config_reply *cp; 2286 u_int64_t dva; 2287 char *kva; 2288 int rv = 0; 2289 int s; 2290 2291 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n", 2292 DEVNAME(sc), address, read, hdr->page_type); 2293 2294 if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) || 2295 len < hdr->page_length * 4) 2296 return (1); 2297 2298 s = splbio(); 2299 ccb = mpi_get_ccb(sc); 2300 splx(s); 2301 if (ccb == NULL) { 2302 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc)); 2303 return (1); 2304 } 2305 2306 ccb->ccb_done = mpi_empty_done; 2307 cq = ccb->ccb_cmd; 2308 2309 cq->function = MPI_FUNCTION_CONFIG; 2310 cq->msg_context = htole32(ccb->ccb_id); 2311 2312 cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT : 2313 MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT); 2314 2315 cq->config_header = *hdr; 2316 cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK; 2317 cq->page_address = htole32(address); 2318 cq->page_buffer.sg_hdr = htole32(MPI_SGE_FL_TYPE_SIMPLE | 2319 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL | 2320 (hdr->page_length * 4) | 2321 (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT)); 2322 2323 /* bounce the page via the request space to avoid more bus_dma games */ 2324 dva = ccb->ccb_cmd_dva + sizeof(struct mpi_msg_config_request); 2325 2326 cq->page_buffer.sg_hi_addr = htole32((u_int32_t)(dva >> 32)); 2327 cq->page_buffer.sg_lo_addr = htole32((u_int32_t)dva); 2328 2329 kva = ccb->ccb_cmd; 2330 kva += sizeof(struct mpi_msg_config_request); 2331 if (!read) 2332 bcopy(page, kva, len); 2333 2334 if (mpi_poll(sc, ccb, 50000) != 0) { 2335 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page poll\n", DEVNAME(sc)); 2336 return (1); 2337 } 2338 2339 cp = ccb->ccb_reply; 2340 if (cp == NULL) { 2341 mpi_put_ccb(sc, ccb); 2342 return (1); 2343 } 2344 2345 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: " 2346 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function); 2347 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x " 2348 "msg_flags: 0x%02x\n", DEVNAME(sc), 2349 letoh16(cp->ext_page_length), cp->ext_page_type, 2350 cp->msg_flags); 2351 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc), 2352 letoh32(cp->msg_context)); 2353 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc), 2354 letoh16(cp->ioc_status)); 2355 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc), 2356 letoh32(cp->ioc_loginfo)); 2357 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d " 2358 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc), 2359 cp->config_header.page_version, 2360 cp->config_header.page_length, 2361 cp->config_header.page_number, 2362 cp->config_header.page_type); 2363 2364 if (letoh16(cp->ioc_status) != MPI_IOCSTATUS_SUCCESS) 2365 rv = 1; 2366 else if (read) 2367 bcopy(kva, page, len); 2368 2369 mpi_push_reply(sc, ccb->ccb_reply_dva); 2370 mpi_put_ccb(sc, ccb); 2371 2372 return (rv); 2373 } 2374