1 /* $NetBSD: mpt_netbsd.c,v 1.7 2003/07/14 15:47:11 lukem Exp $ */ 2 3 /* 4 * Copyright (c) 2003 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 2000, 2001 by Greg Ansley 40 * Partially derived from Matt Jacob's ISP driver. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice immediately at the beginning of the file, without modification, 47 * this list of conditions, and the following disclaimer. 48 * 2. The name of the author may not be used to endorse or promote products 49 * derived from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 61 * SUCH DAMAGE. 62 */ 63 /* 64 * Additional Copyright (c) 2002 by Matthew Jacob under same license. 65 */ 66 67 /* 68 * mpt_netbsd.c: 69 * 70 * NetBSD-specific routines for LSI Fusion adapters. Includes some 71 * bus_dma glue, and SCSIPI glue. 72 * 73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for 74 * Wasabi Systems, Inc. 75 */ 76 77 #include <sys/cdefs.h> 78 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.7 2003/07/14 15:47:11 lukem Exp $"); 79 80 #include <dev/ic/mpt.h> /* pulls in all headers */ 81 82 #include <machine/stdarg.h> /* for mpt_prt() */ 83 84 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int); 85 static void mpt_timeout(void *); 86 static void mpt_done(mpt_softc_t *, uint32_t); 87 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *); 88 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *); 89 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *); 90 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t); 91 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *); 92 93 static void mpt_scsipi_request(struct scsipi_channel *, 94 scsipi_adapter_req_t, void *); 95 static void mpt_minphys(struct buf *); 96 97 void 98 mpt_scsipi_attach(mpt_softc_t *mpt) 99 { 100 struct scsipi_adapter *adapt = &mpt->sc_adapter; 101 struct scsipi_channel *chan = &mpt->sc_channel; 102 int maxq; 103 104 mpt->bus = 0; /* XXX ?? */ 105 106 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ? 107 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt); 108 109 /* Fill in the scsipi_adapter. */ 110 memset(adapt, 0, sizeof(*adapt)); 111 adapt->adapt_dev = &mpt->sc_dev; 112 adapt->adapt_nchannels = 1; 113 adapt->adapt_openings = maxq; 114 adapt->adapt_max_periph = maxq; 115 adapt->adapt_request = mpt_scsipi_request; 116 adapt->adapt_minphys = mpt_minphys; 117 118 /* Fill in the scsipi_channel. */ 119 memset(chan, 0, sizeof(*chan)); 120 chan->chan_adapter = adapt; 121 chan->chan_bustype = &scsi_bustype; 122 chan->chan_channel = 0; 123 chan->chan_flags = 0; 124 chan->chan_nluns = 8; 125 if (mpt->is_fc) { 126 chan->chan_ntargets = 256; 127 chan->chan_id = 256; 128 } else { 129 chan->chan_ntargets = 16; 130 chan->chan_id = mpt->mpt_ini_id; 131 } 132 133 (void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint); 134 } 135 136 int 137 mpt_dma_mem_alloc(mpt_softc_t *mpt) 138 { 139 bus_dma_segment_t reply_seg, request_seg; 140 int reply_rseg, request_rseg; 141 bus_addr_t pptr, end; 142 caddr_t vptr; 143 size_t len; 144 int error, i; 145 146 /* Check if we have already allocated the reply memory. */ 147 if (mpt->reply != NULL) 148 return (0); 149 150 /* 151 * Allocate the request pool. This isn't really DMA'd memory, 152 * but it's a convenient place to do it. 153 */ 154 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt); 155 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 156 if (mpt->request_pool == NULL) { 157 aprint_error("%s: unable to allocate request pool\n", 158 mpt->sc_dev.dv_xname); 159 return (ENOMEM); 160 } 161 162 /* 163 * Allocate DMA resources for reply buffers. 164 */ 165 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 166 &reply_seg, 1, &reply_rseg, 0); 167 if (error) { 168 aprint_error("%s: unable to allocate reply area, error = %d\n", 169 mpt->sc_dev.dv_xname, error); 170 goto fail_0; 171 } 172 173 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE, 174 (caddr_t *) &mpt->reply, BUS_DMA_COHERENT/*XXX*/); 175 if (error) { 176 aprint_error("%s: unable to map reply area, error = %d\n", 177 mpt->sc_dev.dv_xname, error); 178 goto fail_1; 179 } 180 181 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 182 0, 0, &mpt->reply_dmap); 183 if (error) { 184 aprint_error("%s: unable to create reply DMA map, error = %d\n", 185 mpt->sc_dev.dv_xname, error); 186 goto fail_2; 187 } 188 189 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply, 190 PAGE_SIZE, NULL, 0); 191 if (error) { 192 aprint_error("%s: unable to load reply DMA map, error = %d\n", 193 mpt->sc_dev.dv_xname, error); 194 goto fail_3; 195 } 196 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr; 197 198 /* 199 * Allocate DMA resources for request buffers. 200 */ 201 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 202 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0); 203 if (error) { 204 aprint_error("%s: unable to allocate request area, " 205 "error = %d\n", mpt->sc_dev.dv_xname, error); 206 goto fail_4; 207 } 208 209 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg, 210 MPT_REQ_MEM_SIZE(mpt), (caddr_t *) &mpt->request, 0); 211 if (error) { 212 aprint_error("%s: unable to map request area, error = %d\n", 213 mpt->sc_dev.dv_xname, error); 214 goto fail_5; 215 } 216 217 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1, 218 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap); 219 if (error) { 220 aprint_error("%s: unable to create request DMA map, " 221 "error = %d\n", mpt->sc_dev.dv_xname, error); 222 goto fail_6; 223 } 224 225 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request, 226 MPT_REQ_MEM_SIZE(mpt), NULL, 0); 227 if (error) { 228 aprint_error("%s: unable to load request DMA map, error = %d\n", 229 mpt->sc_dev.dv_xname, error); 230 goto fail_7; 231 } 232 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr; 233 234 pptr = mpt->request_phys; 235 vptr = (caddr_t) mpt->request; 236 end = pptr + MPT_REQ_MEM_SIZE(mpt); 237 238 for (i = 0; pptr < end; i++) { 239 request_t *req = &mpt->request_pool[i]; 240 req->index = i; 241 242 /* Store location of Request Data */ 243 req->req_pbuf = pptr; 244 req->req_vbuf = vptr; 245 246 pptr += MPT_REQUEST_AREA; 247 vptr += MPT_REQUEST_AREA; 248 249 req->sense_pbuf = (pptr - MPT_SENSE_SIZE); 250 req->sense_vbuf = (vptr - MPT_SENSE_SIZE); 251 252 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS, 253 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap); 254 if (error) { 255 aprint_error("%s: unable to create req %d DMA map, " 256 "error = %d\n", mpt->sc_dev.dv_xname, i, error); 257 goto fail_8; 258 } 259 } 260 261 return (0); 262 263 fail_8: 264 for (--i; i >= 0; i--) { 265 request_t *req = &mpt->request_pool[i]; 266 if (req->dmap != NULL) 267 bus_dmamap_destroy(mpt->sc_dmat, req->dmap); 268 } 269 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap); 270 fail_7: 271 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap); 272 fail_6: 273 bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->request, PAGE_SIZE); 274 fail_5: 275 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg); 276 fail_4: 277 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap); 278 fail_3: 279 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap); 280 fail_2: 281 bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->reply, PAGE_SIZE); 282 fail_1: 283 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg); 284 fail_0: 285 free(mpt->request_pool, M_DEVBUF); 286 287 mpt->reply = NULL; 288 mpt->request = NULL; 289 mpt->request_pool = NULL; 290 291 return (error); 292 } 293 294 int 295 mpt_intr(void *arg) 296 { 297 mpt_softc_t *mpt = arg; 298 int nrepl = 0; 299 uint32_t reply; 300 301 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0) 302 return (0); 303 304 reply = mpt_pop_reply_queue(mpt); 305 while (reply != MPT_REPLY_EMPTY) { 306 nrepl++; 307 if (mpt->verbose > 1) { 308 if ((reply & MPT_CONTEXT_REPLY) != 0) { 309 /* Address reply; IOC has something to say */ 310 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply)); 311 } else { 312 /* Context reply; all went well */ 313 mpt_prt(mpt, "context %u reply OK", reply); 314 } 315 } 316 mpt_done(mpt, reply); 317 reply = mpt_pop_reply_queue(mpt); 318 } 319 return (nrepl != 0); 320 } 321 322 void 323 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...) 324 { 325 va_list ap; 326 327 printf("%s: ", mpt->sc_dev.dv_xname); 328 va_start(ap, fmt); 329 vprintf(fmt, ap); 330 va_end(ap); 331 printf("\n"); 332 } 333 334 static int 335 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count) 336 { 337 338 /* Timeouts are in msec, so we loop in 1000usec cycles */ 339 while (count) { 340 mpt_intr(mpt); 341 if (xs->xs_status & XS_STS_DONE) 342 return (0); 343 delay(1000); /* only happens in boot, so ok */ 344 count--; 345 } 346 return (1); 347 } 348 349 static void 350 mpt_timeout(void *arg) 351 { 352 request_t *req = arg; 353 struct scsipi_xfer *xs = req->xfer; 354 struct scsipi_periph *periph = xs->xs_periph; 355 mpt_softc_t *mpt = 356 (void *) periph->periph_channel->chan_adapter->adapt_dev; 357 uint32_t oseq; 358 int s; 359 360 scsipi_printaddr(periph); 361 printf("command timeout\n"); 362 363 s = splbio(); 364 365 oseq = req->sequence; 366 mpt->timeouts++; 367 if (mpt_intr(mpt)) { 368 if (req->sequence != oseq) { 369 mpt_prt(mpt, "recovered from command timeout"); 370 splx(s); 371 return; 372 } 373 } 374 mpt_prt(mpt, 375 "timeout on request index = 0x%x, seq = 0x%08x", 376 req->index, req->sequence); 377 mpt_check_doorbell(mpt); 378 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x", 379 mpt_read(mpt, MPT_OFFSET_INTR_STATUS), 380 mpt_read(mpt, MPT_OFFSET_INTR_MASK), 381 mpt_read(mpt, MPT_OFFSET_DOORBELL)); 382 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug)); 383 if (mpt->verbose > 1) 384 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); 385 386 /* XXX WHAT IF THE IOC IS STILL USING IT?? */ 387 req->xfer = NULL; 388 mpt_free_request(mpt, req); 389 390 xs->error = XS_TIMEOUT; 391 scsipi_done(xs); 392 393 splx(s); 394 } 395 396 static void 397 mpt_done(mpt_softc_t *mpt, uint32_t reply) 398 { 399 struct scsipi_xfer *xs = NULL; 400 struct scsipi_periph *periph; 401 int index; 402 request_t *req; 403 MSG_REQUEST_HEADER *mpt_req; 404 MSG_SCSI_IO_REPLY *mpt_reply; 405 406 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) { 407 /* context reply (ok) */ 408 mpt_reply = NULL; 409 index = reply & MPT_CONTEXT_MASK; 410 } else { 411 /* address reply (error) */ 412 413 /* XXX BUS_DMASYNC_POSTREAD XXX */ 414 mpt_reply = MPT_REPLY_PTOV(mpt, reply); 415 if (mpt->verbose > 1) { 416 uint32_t *pReply = (uint32_t *) mpt_reply; 417 418 mpt_prt(mpt, "Address Reply (index %u):", 419 mpt_reply->MsgContext & 0xffff); 420 mpt_prt(mpt, "%08x %08x %08x %08x", 421 pReply[0], pReply[1], pReply[2], pReply[3]); 422 mpt_prt(mpt, "%08x %08x %08x %08x", 423 pReply[4], pReply[5], pReply[6], pReply[7]); 424 mpt_prt(mpt, "%08x %08x %08x %08x", 425 pReply[8], pReply[9], pReply[10], pReply[11]); 426 } 427 index = mpt_reply->MsgContext; 428 } 429 430 /* 431 * Address reply with MessageContext high bit set. 432 * This is most likely a notify message, so we try 433 * to process it, then free it. 434 */ 435 if (__predict_false((index & 0x80000000) != 0)) { 436 if (mpt_reply != NULL) 437 mpt_ctlop(mpt, mpt_reply, reply); 438 else 439 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index); 440 return; 441 } 442 443 /* Did we end up with a valid index into the table? */ 444 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) { 445 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index); 446 return; 447 } 448 449 req = &mpt->request_pool[index]; 450 451 /* Make sure memory hasn't been trashed. */ 452 if (__predict_false(req->index != index)) { 453 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index); 454 return; 455 } 456 457 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 458 mpt_req = req->req_vbuf; 459 460 /* Short cut for task management replies; nothing more for us to do. */ 461 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) { 462 if (mpt->verbose > 1) 463 mpt_prt(mpt, "mpt_done: TASK MGMT"); 464 goto done; 465 } 466 467 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE)) 468 goto done; 469 470 /* 471 * At this point, it had better be a SCSI I/O command, but don't 472 * crash if it isn't. 473 */ 474 if (__predict_false(mpt_req->Function != 475 MPI_FUNCTION_SCSI_IO_REQUEST)) { 476 if (mpt->verbose > 1) 477 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)", 478 mpt_req->Function, index); 479 goto done; 480 } 481 482 /* Recover scsipi_xfer from the request structure. */ 483 xs = req->xfer; 484 485 /* Can't have a SCSI command without a scsipi_xfer. */ 486 if (__predict_false(xs == NULL)) { 487 mpt_prt(mpt, 488 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x", 489 req->index, req->sequence); 490 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug)); 491 mpt_prt(mpt, "mpt_request:"); 492 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); 493 494 if (mpt_reply != NULL) { 495 mpt_prt(mpt, "mpt_reply:"); 496 mpt_print_reply(mpt_reply); 497 } else { 498 mpt_prt(mpt, "context reply: 0x%08x", reply); 499 } 500 goto done; 501 } 502 503 callout_stop(&xs->xs_callout); 504 505 periph = xs->xs_periph; 506 507 /* 508 * If we were a data transfer, unload the map that described 509 * the data buffer. 510 */ 511 if (__predict_true(xs->datalen != 0)) { 512 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 513 req->dmap->dm_mapsize, 514 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD 515 : BUS_DMASYNC_POSTWRITE); 516 bus_dmamap_unload(mpt->sc_dmat, req->dmap); 517 } 518 519 if (__predict_true(mpt_reply == NULL)) { 520 /* 521 * Context reply; report that the command was 522 * successful! 523 * 524 * Also report the xfer mode, if necessary. 525 */ 526 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) { 527 if ((mpt->mpt_report_xfer_mode & 528 (1 << periph->periph_target)) != 0) 529 mpt_get_xfer_mode(mpt, periph); 530 } 531 xs->error = XS_NOERROR; 532 xs->status = SCSI_OK; 533 xs->resid = 0; 534 mpt_free_request(mpt, req); 535 scsipi_done(xs); 536 return; 537 } 538 539 xs->status = mpt_reply->SCSIStatus; 540 switch (mpt_reply->IOCStatus) { 541 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 542 xs->error = XS_DRIVER_STUFFUP; 543 break; 544 545 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 546 /* 547 * Yikes! Tagged queue full comes through this path! 548 * 549 * So we'll change it to a status error and anything 550 * that returns status should probably be a status 551 * error as well. 552 */ 553 xs->resid = xs->datalen - mpt_reply->TransferCount; 554 if (mpt_reply->SCSIState & 555 MPI_SCSI_STATE_NO_SCSI_STATUS) { 556 xs->error = XS_DRIVER_STUFFUP; 557 break; 558 } 559 /* FALLTHROUGH */ 560 case MPI_IOCSTATUS_SUCCESS: 561 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 562 switch (xs->status) { 563 case SCSI_OK: 564 /* Report the xfer mode, if necessary. */ 565 if ((mpt->mpt_report_xfer_mode & 566 (1 << periph->periph_target)) != 0) 567 mpt_get_xfer_mode(mpt, periph); 568 xs->resid = 0; 569 break; 570 571 case SCSI_CHECK: 572 xs->error = XS_SENSE; 573 break; 574 575 case SCSI_BUSY: 576 case SCSI_QUEUE_FULL: 577 xs->error = XS_BUSY; 578 break; 579 580 default: 581 scsipi_printaddr(periph); 582 printf("invalid status code %d\n", xs->status); 583 xs->error = XS_DRIVER_STUFFUP; 584 break; 585 } 586 break; 587 588 case MPI_IOCSTATUS_BUSY: 589 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 590 xs->error = XS_RESOURCE_SHORTAGE; 591 break; 592 593 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 594 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 595 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 596 xs->error = XS_SELTIMEOUT; 597 break; 598 599 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 600 xs->error = XS_DRIVER_STUFFUP; 601 break; 602 603 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 604 /* XXX What should we do here? */ 605 break; 606 607 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 608 /* XXX */ 609 xs->error = XS_DRIVER_STUFFUP; 610 break; 611 612 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 613 /* XXX */ 614 xs->error = XS_DRIVER_STUFFUP; 615 break; 616 617 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 618 /* XXX This is a bus-reset */ 619 xs->error = XS_DRIVER_STUFFUP; 620 break; 621 622 default: 623 /* XXX unrecognized HBA error */ 624 xs->error = XS_DRIVER_STUFFUP; 625 break; 626 } 627 628 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { 629 memcpy(&xs->sense.scsi_sense, req->sense_vbuf, 630 sizeof(xs->sense.scsi_sense)); 631 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) { 632 /* 633 * This will cause the scsipi layer to issue 634 * a REQUEST SENSE. 635 */ 636 if (xs->status == SCSI_CHECK) 637 xs->error = XS_BUSY; 638 } 639 640 done: 641 /* If IOC done with this requeset, free it up. */ 642 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0) 643 mpt_free_request(mpt, req); 644 645 /* If address reply, give the buffer back to the IOC. */ 646 if (mpt_reply != NULL) 647 mpt_free_reply(mpt, (reply << 1)); 648 649 if (xs != NULL) 650 scsipi_done(xs); 651 } 652 653 static void 654 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs) 655 { 656 struct scsipi_periph *periph = xs->xs_periph; 657 request_t *req; 658 MSG_SCSI_IO_REQUEST *mpt_req; 659 int error, s; 660 661 s = splbio(); 662 req = mpt_get_request(mpt); 663 if (__predict_false(req == NULL)) { 664 /* This should happen very infrequently. */ 665 xs->error = XS_RESOURCE_SHORTAGE; 666 scsipi_done(xs); 667 splx(s); 668 return; 669 } 670 splx(s); 671 672 /* Link the req and the scsipi_xfer. */ 673 req->xfer = xs; 674 675 /* Now we build the command for the IOC */ 676 mpt_req = req->req_vbuf; 677 memset(mpt_req, 0, sizeof(*mpt_req)); 678 679 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 680 mpt_req->Bus = mpt->bus; 681 682 mpt_req->SenseBufferLength = 683 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ? 684 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE; 685 686 /* 687 * We use the message context to find the request structure when 688 * we get the command completion interrupt from the IOC. 689 */ 690 mpt_req->MsgContext = req->index; 691 692 /* Which physical device to do the I/O on. */ 693 mpt_req->TargetID = periph->periph_target; 694 mpt_req->LUN[1] = periph->periph_lun; 695 696 /* Set the direction of the transfer. */ 697 if (xs->xs_control & XS_CTL_DATA_IN) 698 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 699 else if (xs->xs_control & XS_CTL_DATA_OUT) 700 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 701 else 702 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 703 704 /* Set the queue behavior. */ 705 if (__predict_true(mpt->is_fc || 706 (mpt->mpt_tag_enable & 707 (1 << periph->periph_target)))) { 708 switch (XS_CTL_TAGTYPE(xs)) { 709 case XS_CTL_HEAD_TAG: 710 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 711 break; 712 713 #if 0 /* XXX */ 714 case XS_CTL_ACA_TAG: 715 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 716 break; 717 #endif 718 719 case XS_CTL_ORDERED_TAG: 720 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 721 break; 722 723 case XS_CTL_SIMPLE_TAG: 724 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 725 break; 726 727 default: 728 if (mpt->is_fc) 729 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 730 else 731 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 732 break; 733 } 734 } else 735 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 736 737 if (__predict_false(mpt->is_fc == 0 && 738 (mpt->mpt_disc_enable & 739 (1 << periph->periph_target)) == 0)) 740 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 741 742 /* Copy the SCSI command block into place. */ 743 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen); 744 745 mpt_req->CDBLength = xs->cmdlen; 746 mpt_req->DataLength = xs->datalen; 747 mpt_req->SenseBufferLowAddr = req->sense_pbuf; 748 749 /* 750 * Map the DMA transfer. 751 */ 752 if (xs->datalen) { 753 SGE_SIMPLE32 *se; 754 755 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data, 756 xs->datalen, NULL, 757 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT 758 : BUS_DMA_WAITOK) | 759 BUS_DMA_STREAMING | 760 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ 761 : BUS_DMA_WRITE)); 762 switch (error) { 763 case 0: 764 break; 765 766 case ENOMEM: 767 case EAGAIN: 768 xs->error = XS_RESOURCE_SHORTAGE; 769 goto out_bad; 770 771 default: 772 xs->error = XS_DRIVER_STUFFUP; 773 mpt_prt(mpt, "error %d loading DMA map", error); 774 out_bad: 775 s = splbio(); 776 mpt_free_request(mpt, req); 777 scsipi_done(xs); 778 splx(s); 779 return; 780 } 781 782 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) { 783 int seg, i, nleft = req->dmap->dm_nsegs; 784 uint32_t flags; 785 SGE_CHAIN32 *ce; 786 787 seg = 0; 788 789 mpt_req->DataLength = xs->datalen; 790 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 791 if (xs->xs_control & XS_CTL_DATA_OUT) 792 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 793 794 se = (SGE_SIMPLE32 *) &mpt_req->SGL; 795 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; 796 i++, se++, seg++) { 797 uint32_t tf; 798 799 memset(se, 0, sizeof(*se)); 800 se->Address = req->dmap->dm_segs[seg].ds_addr; 801 MPI_pSGE_SET_LENGTH(se, 802 req->dmap->dm_segs[seg].ds_len); 803 tf = flags; 804 if (i == MPT_NSGL_FIRST(mpt) - 2) 805 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 806 MPI_pSGE_SET_FLAGS(se, tf); 807 nleft--; 808 } 809 810 /* 811 * Tell the IOC where to find the first chain element. 812 */ 813 mpt_req->ChainOffset = 814 ((char *)se - (char *)mpt_req) >> 2; 815 816 /* 817 * Until we're finished with all segments... 818 */ 819 while (nleft) { 820 int ntodo; 821 822 /* 823 * Construct the chain element that points to 824 * the next segment. 825 */ 826 ce = (SGE_CHAIN32 *) se++; 827 if (nleft > MPT_NSGL(mpt)) { 828 ntodo = MPT_NSGL(mpt) - 1; 829 ce->NextChainOffset = (MPT_RQSL(mpt) - 830 sizeof(SGE_SIMPLE32)) >> 2; 831 } else { 832 ntodo = nleft; 833 ce->NextChainOffset = 0; 834 } 835 ce->Length = ntodo * sizeof(SGE_SIMPLE32); 836 ce->Address = req->req_pbuf + 837 ((char *)se - (char *)mpt_req); 838 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 839 for (i = 0; i < ntodo; i++, se++, seg++) { 840 uint32_t tf; 841 842 memset(se, 0, sizeof(*se)); 843 se->Address = 844 req->dmap->dm_segs[seg].ds_addr; 845 MPI_pSGE_SET_LENGTH(se, 846 req->dmap->dm_segs[seg].ds_len); 847 tf = flags; 848 if (i == ntodo - 1) { 849 tf |= 850 MPI_SGE_FLAGS_LAST_ELEMENT; 851 if (ce->NextChainOffset == 0) { 852 tf |= 853 MPI_SGE_FLAGS_END_OF_LIST | 854 MPI_SGE_FLAGS_END_OF_BUFFER; 855 } 856 } 857 MPI_pSGE_SET_FLAGS(se, tf); 858 nleft--; 859 } 860 } 861 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 862 req->dmap->dm_mapsize, 863 (xs->xs_control & XS_CTL_DATA_IN) ? 864 BUS_DMASYNC_PREREAD 865 : BUS_DMASYNC_PREWRITE); 866 } else { 867 int i; 868 uint32_t flags; 869 870 mpt_req->DataLength = xs->datalen; 871 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 872 if (xs->xs_control & XS_CTL_DATA_OUT) 873 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 874 875 /* Copy the segments into our SG list. */ 876 se = (SGE_SIMPLE32 *) &mpt_req->SGL; 877 for (i = 0; i < req->dmap->dm_nsegs; 878 i++, se++) { 879 uint32_t tf; 880 881 memset(se, 0, sizeof(*se)); 882 se->Address = req->dmap->dm_segs[i].ds_addr; 883 MPI_pSGE_SET_LENGTH(se, 884 req->dmap->dm_segs[i].ds_len); 885 tf = flags; 886 if (i == req->dmap->dm_nsegs - 1) { 887 tf |= 888 MPI_SGE_FLAGS_LAST_ELEMENT | 889 MPI_SGE_FLAGS_END_OF_BUFFER | 890 MPI_SGE_FLAGS_END_OF_LIST; 891 } 892 MPI_pSGE_SET_FLAGS(se, tf); 893 } 894 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 895 req->dmap->dm_mapsize, 896 (xs->xs_control & XS_CTL_DATA_IN) ? 897 BUS_DMASYNC_PREREAD 898 : BUS_DMASYNC_PREWRITE); 899 } 900 } else { 901 /* 902 * No data to transfer; just make a single simple SGL 903 * with zero length. 904 */ 905 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL; 906 memset(se, 0, sizeof(*se)); 907 MPI_pSGE_SET_FLAGS(se, 908 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 909 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 910 } 911 912 if (mpt->verbose > 1) 913 mpt_print_scsi_io_request(mpt_req); 914 915 s = splbio(); 916 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) 917 callout_reset(&xs->xs_callout, 918 mstohz(xs->timeout), mpt_timeout, req); 919 mpt_send_cmd(mpt, req); 920 splx(s); 921 922 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) 923 return; 924 925 /* 926 * If we can't use interrupts, poll on completion. 927 */ 928 if (mpt_poll(mpt, xs, xs->timeout)) 929 mpt_timeout(req); 930 } 931 932 static void 933 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm) 934 { 935 fCONFIG_PAGE_SCSI_DEVICE_1 tmp; 936 937 if (mpt->is_fc) { 938 /* 939 * SCSI transport settings don't make any sense for 940 * Fibre Channel; silently ignore the request. 941 */ 942 return; 943 } 944 945 /* 946 * Always allow disconnect; we don't have a way to disable 947 * it right now, in any case. 948 */ 949 mpt->mpt_disc_enable |= (1 << xm->xm_target); 950 951 if (xm->xm_mode & PERIPH_CAP_TQING) 952 mpt->mpt_tag_enable |= (1 << xm->xm_target); 953 else 954 mpt->mpt_tag_enable &= ~(1 << xm->xm_target); 955 956 tmp = mpt->mpt_dev_page1[xm->xm_target]; 957 958 /* 959 * Set the wide/narrow parameter for the target. 960 */ 961 if (xm->xm_mode & PERIPH_CAP_WIDE16) 962 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 963 else 964 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 965 966 /* 967 * Set the synchronous parameters for the target. 968 * 969 * XXX If we request sync transfers, we just go ahead and 970 * XXX request the maximum available. We need finer control 971 * XXX in order to implement Domain Validation. 972 */ 973 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK | 974 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK | 975 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS | 976 MPI_SCSIDEVPAGE1_RP_IU); 977 if (xm->xm_mode & PERIPH_CAP_SYNC) { 978 int factor, offset, np; 979 980 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff; 981 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff; 982 np = 0; 983 if (factor < 0x9) { 984 /* Ultra320 */ 985 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU; 986 } 987 if (factor < 0xa) { 988 /* at least Ultra160 */ 989 np |= MPI_SCSIDEVPAGE1_RP_DT; 990 } 991 np |= (factor << 8) | (offset << 16); 992 tmp.RequestedParameters |= np; 993 } 994 995 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) { 996 mpt_prt(mpt, "unable to write Device Page 1"); 997 return; 998 } 999 1000 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) { 1001 mpt_prt(mpt, "unable to read back Device Page 1"); 1002 return; 1003 } 1004 1005 mpt->mpt_dev_page1[xm->xm_target] = tmp; 1006 if (mpt->verbose > 1) { 1007 mpt_prt(mpt, 1008 "SPI Target %d Page 1: RequestedParameters %x Config %x", 1009 xm->xm_target, 1010 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters, 1011 mpt->mpt_dev_page1[xm->xm_target].Configuration); 1012 } 1013 1014 /* 1015 * Make a note that we should perform an async callback at the 1016 * end of the next successful command completion to report the 1017 * negotiated transfer mode. 1018 */ 1019 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target); 1020 } 1021 1022 static void 1023 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph) 1024 { 1025 fCONFIG_PAGE_SCSI_DEVICE_0 tmp; 1026 struct scsipi_xfer_mode xm; 1027 int period, offset; 1028 1029 tmp = mpt->mpt_dev_page0[periph->periph_target]; 1030 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) { 1031 mpt_prt(mpt, "unable to read Device Page 0"); 1032 return; 1033 } 1034 1035 if (mpt->verbose > 1) { 1036 mpt_prt(mpt, 1037 "SPI Tgt %d Page 0: NParms %x Information %x", 1038 periph->periph_target, 1039 tmp.NegotiatedParameters, tmp.Information); 1040 } 1041 1042 xm.xm_target = periph->periph_target; 1043 xm.xm_mode = 0; 1044 1045 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) 1046 xm.xm_mode |= PERIPH_CAP_WIDE16; 1047 1048 period = (tmp.NegotiatedParameters >> 8) & 0xff; 1049 offset = (tmp.NegotiatedParameters >> 16) & 0xff; 1050 if (offset) { 1051 xm.xm_period = period; 1052 xm.xm_offset = offset; 1053 xm.xm_mode |= PERIPH_CAP_SYNC; 1054 } 1055 1056 /* 1057 * Tagged queueing is all controlled by us; there is no 1058 * other setting to query. 1059 */ 1060 if (mpt->mpt_tag_enable & (1 << periph->periph_target)) 1061 xm.xm_mode |= PERIPH_CAP_TQING; 1062 1063 /* 1064 * We're going to deliver the async event, so clear the marker. 1065 */ 1066 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target); 1067 1068 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm); 1069 } 1070 1071 static void 1072 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply) 1073 { 1074 MSG_DEFAULT_REPLY *dmsg = vmsg; 1075 1076 switch (dmsg->Function) { 1077 case MPI_FUNCTION_EVENT_NOTIFICATION: 1078 mpt_event_notify_reply(mpt, vmsg); 1079 mpt_free_reply(mpt, (reply << 1)); 1080 break; 1081 1082 case MPI_FUNCTION_EVENT_ACK: 1083 mpt_free_reply(mpt, (reply << 1)); 1084 break; 1085 1086 case MPI_FUNCTION_PORT_ENABLE: 1087 { 1088 MSG_PORT_ENABLE_REPLY *msg = vmsg; 1089 int index = msg->MsgContext & ~0x80000000; 1090 if (mpt->verbose > 1) 1091 mpt_prt(mpt, "enable port reply index %d", index); 1092 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { 1093 request_t *req = &mpt->request_pool[index]; 1094 req->debug = REQ_DONE; 1095 } 1096 mpt_free_reply(mpt, (reply << 1)); 1097 break; 1098 } 1099 1100 case MPI_FUNCTION_CONFIG: 1101 { 1102 MSG_CONFIG_REPLY *msg = vmsg; 1103 int index = msg->MsgContext & ~0x80000000; 1104 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { 1105 request_t *req = &mpt->request_pool[index]; 1106 req->debug = REQ_DONE; 1107 req->sequence = reply; 1108 } else 1109 mpt_free_reply(mpt, (reply << 1)); 1110 break; 1111 } 1112 1113 default: 1114 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function); 1115 } 1116 } 1117 1118 static void 1119 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg) 1120 { 1121 1122 switch (msg->Event) { 1123 case MPI_EVENT_LOG_DATA: 1124 { 1125 int i; 1126 1127 /* Some error occurrerd that the Fusion wants logged. */ 1128 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo); 1129 mpt_prt(mpt, "EvtLogData: Event Data:"); 1130 for (i = 0; i < msg->EventDataLength; i++) { 1131 if ((i % 4) == 0) 1132 printf("%s:\t", mpt->sc_dev.dv_xname); 1133 printf("0x%08x%c", msg->Data[i], 1134 ((i % 4) == 3) ? '\n' : ' '); 1135 } 1136 if ((i % 4) != 0) 1137 printf("\n"); 1138 break; 1139 } 1140 1141 case MPI_EVENT_UNIT_ATTENTION: 1142 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x", 1143 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff); 1144 break; 1145 1146 case MPI_EVENT_IOC_BUS_RESET: 1147 /* We generated a bus reset. */ 1148 mpt_prt(mpt, "IOC Bus Reset Port %d", 1149 (msg->Data[0] >> 8) & 0xff); 1150 break; 1151 1152 case MPI_EVENT_EXT_BUS_RESET: 1153 /* Someone else generated a bus reset. */ 1154 mpt_prt(mpt, "External Bus Reset"); 1155 /* 1156 * These replies don't return EventData like the MPI 1157 * spec says they do. 1158 */ 1159 /* XXX Send an async event? */ 1160 break; 1161 1162 case MPI_EVENT_RESCAN: 1163 /* 1164 * In general, thise means a device has been added 1165 * to the loop. 1166 */ 1167 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff); 1168 /* XXX Send an async event? */ 1169 break; 1170 1171 case MPI_EVENT_LINK_STATUS_CHANGE: 1172 mpt_prt(mpt, "Port %d: Link state %s", 1173 (msg->Data[1] >> 8) & 0xff, 1174 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active"); 1175 break; 1176 1177 case MPI_EVENT_LOOP_STATE_CHANGE: 1178 switch ((msg->Data[0] >> 16) & 0xff) { 1179 case 0x01: 1180 mpt_prt(mpt, 1181 "Port %d: FC Link Event: LIP(%02x,%02x) " 1182 "(Loop Initialization)", 1183 (msg->Data[1] >> 8) & 0xff, 1184 (msg->Data[0] >> 8) & 0xff, 1185 (msg->Data[0] ) & 0xff); 1186 switch ((msg->Data[0] >> 8) & 0xff) { 1187 case 0xf7: 1188 if ((msg->Data[0] & 0xff) == 0xf7) 1189 mpt_prt(mpt, "\tDevice needs AL_PA"); 1190 else 1191 mpt_prt(mpt, "\tDevice %02x doesn't " 1192 "like FC performance", 1193 msg->Data[0] & 0xff); 1194 break; 1195 1196 case 0xf8: 1197 if ((msg->Data[0] & 0xff) == 0xf7) 1198 mpt_prt(mpt, "\tDevice detected loop " 1199 "failure before acquiring AL_PA"); 1200 else 1201 mpt_prt(mpt, "\tDevice %02x detected " 1202 "loop failure", 1203 msg->Data[0] & 0xff); 1204 break; 1205 1206 default: 1207 mpt_prt(mpt, "\tDevice %02x requests that " 1208 "device %02x reset itself", 1209 msg->Data[0] & 0xff, 1210 (msg->Data[0] >> 8) & 0xff); 1211 break; 1212 } 1213 break; 1214 1215 case 0x02: 1216 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) " 1217 "(Loop Port Enable)", 1218 (msg->Data[1] >> 8) & 0xff, 1219 (msg->Data[0] >> 8) & 0xff, 1220 (msg->Data[0] ) & 0xff); 1221 break; 1222 1223 case 0x03: 1224 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) " 1225 "(Loop Port Bypass)", 1226 (msg->Data[1] >> 8) & 0xff, 1227 (msg->Data[0] >> 8) & 0xff, 1228 (msg->Data[0] ) & 0xff); 1229 break; 1230 1231 default: 1232 mpt_prt(mpt, "Port %d: FC Link Event: " 1233 "Unknown event (%02x %02x %02x)", 1234 (msg->Data[1] >> 8) & 0xff, 1235 (msg->Data[0] >> 16) & 0xff, 1236 (msg->Data[0] >> 8) & 0xff, 1237 (msg->Data[0] ) & 0xff); 1238 break; 1239 } 1240 break; 1241 1242 case MPI_EVENT_LOGOUT: 1243 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x", 1244 (msg->Data[1] >> 8) & 0xff, msg->Data[0]); 1245 break; 1246 1247 case MPI_EVENT_EVENT_CHANGE: 1248 /* 1249 * This is just an acknowledgement of our 1250 * mpt_send_event_request(). 1251 */ 1252 break; 1253 1254 default: 1255 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event); 1256 break; 1257 } 1258 1259 if (msg->AckRequired) { 1260 MSG_EVENT_ACK *ackp; 1261 request_t *req; 1262 1263 if ((req = mpt_get_request(mpt)) == NULL) { 1264 /* XXX XXX XXX XXXJRT */ 1265 panic("mpt_event_notify_reply: unable to allocate " 1266 "request structure"); 1267 } 1268 1269 ackp = (MSG_EVENT_ACK *) req->req_vbuf; 1270 memset(ackp, 0, sizeof(*ackp)); 1271 ackp->Function = MPI_FUNCTION_EVENT_ACK; 1272 ackp->Event = msg->Event; 1273 ackp->EventContext = msg->EventContext; 1274 ackp->MsgContext = req->index | 0x80000000; 1275 mpt_check_doorbell(mpt); 1276 mpt_send_cmd(mpt, req); 1277 } 1278 } 1279 1280 /* XXXJRT mpt_bus_reset() */ 1281 1282 /***************************************************************************** 1283 * SCSI interface routines 1284 *****************************************************************************/ 1285 1286 static void 1287 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1288 void *arg) 1289 { 1290 struct scsipi_adapter *adapt = chan->chan_adapter; 1291 mpt_softc_t *mpt = (void *) adapt->adapt_dev; 1292 1293 switch (req) { 1294 case ADAPTER_REQ_RUN_XFER: 1295 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg); 1296 return; 1297 1298 case ADAPTER_REQ_GROW_RESOURCES: 1299 /* Not supported. */ 1300 return; 1301 1302 case ADAPTER_REQ_SET_XFER_MODE: 1303 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg); 1304 return; 1305 } 1306 } 1307 1308 static void 1309 mpt_minphys(struct buf *bp) 1310 { 1311 1312 /* 1313 * Subtract one from the SGL limit, since we need an extra one to handle 1314 * an non-page-aligned transfer. 1315 */ 1316 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE) 1317 1318 if (bp->b_bcount > MPT_MAX_XFER) 1319 bp->b_bcount = MPT_MAX_XFER; 1320 minphys(bp); 1321 } 1322