1 /* $NetBSD: mpt_netbsd.c,v 1.8 2004/04/10 01:59:19 briggs Exp $ */ 2 3 /* 4 * Copyright (c) 2003 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 2000, 2001 by Greg Ansley 40 * Partially derived from Matt Jacob's ISP driver. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice immediately at the beginning of the file, without modification, 47 * this list of conditions, and the following disclaimer. 48 * 2. The name of the author may not be used to endorse or promote products 49 * derived from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 61 * SUCH DAMAGE. 62 */ 63 /* 64 * Additional Copyright (c) 2002 by Matthew Jacob under same license. 65 */ 66 67 /* 68 * mpt_netbsd.c: 69 * 70 * NetBSD-specific routines for LSI Fusion adapters. Includes some 71 * bus_dma glue, and SCSIPI glue. 72 * 73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for 74 * Wasabi Systems, Inc. 75 */ 76 77 #include <sys/cdefs.h> 78 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.8 2004/04/10 01:59:19 briggs Exp $"); 79 80 #include <dev/ic/mpt.h> /* pulls in all headers */ 81 82 #include <machine/stdarg.h> /* for mpt_prt() */ 83 84 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int); 85 static void mpt_timeout(void *); 86 static void mpt_done(mpt_softc_t *, uint32_t); 87 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *); 88 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *); 89 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *); 90 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t); 91 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *); 92 93 static void mpt_scsipi_request(struct scsipi_channel *, 94 scsipi_adapter_req_t, void *); 95 static void mpt_minphys(struct buf *); 96 97 void 98 mpt_scsipi_attach(mpt_softc_t *mpt) 99 { 100 struct scsipi_adapter *adapt = &mpt->sc_adapter; 101 struct scsipi_channel *chan = &mpt->sc_channel; 102 int maxq; 103 104 mpt->bus = 0; /* XXX ?? */ 105 106 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ? 107 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt); 108 109 /* Fill in the scsipi_adapter. */ 110 memset(adapt, 0, sizeof(*adapt)); 111 adapt->adapt_dev = &mpt->sc_dev; 112 adapt->adapt_nchannels = 1; 113 adapt->adapt_openings = maxq; 114 adapt->adapt_max_periph = maxq; 115 adapt->adapt_request = mpt_scsipi_request; 116 adapt->adapt_minphys = mpt_minphys; 117 118 /* Fill in the scsipi_channel. */ 119 memset(chan, 0, sizeof(*chan)); 120 chan->chan_adapter = adapt; 121 chan->chan_bustype = &scsi_bustype; 122 chan->chan_channel = 0; 123 chan->chan_flags = 0; 124 chan->chan_nluns = 8; 125 if (mpt->is_fc) { 126 chan->chan_ntargets = 256; 127 chan->chan_id = 256; 128 } else { 129 chan->chan_ntargets = 16; 130 chan->chan_id = mpt->mpt_ini_id; 131 } 132 133 (void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint); 134 } 135 136 int 137 mpt_dma_mem_alloc(mpt_softc_t *mpt) 138 { 139 bus_dma_segment_t reply_seg, request_seg; 140 int reply_rseg, request_rseg; 141 bus_addr_t pptr, end; 142 caddr_t vptr; 143 size_t len; 144 int error, i; 145 146 /* Check if we have already allocated the reply memory. */ 147 if (mpt->reply != NULL) 148 return (0); 149 150 /* 151 * Allocate the request pool. This isn't really DMA'd memory, 152 * but it's a convenient place to do it. 153 */ 154 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt); 155 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 156 if (mpt->request_pool == NULL) { 157 aprint_error("%s: unable to allocate request pool\n", 158 mpt->sc_dev.dv_xname); 159 return (ENOMEM); 160 } 161 162 /* 163 * Allocate DMA resources for reply buffers. 164 */ 165 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 166 &reply_seg, 1, &reply_rseg, 0); 167 if (error) { 168 aprint_error("%s: unable to allocate reply area, error = %d\n", 169 mpt->sc_dev.dv_xname, error); 170 goto fail_0; 171 } 172 173 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE, 174 (caddr_t *) &mpt->reply, BUS_DMA_COHERENT/*XXX*/); 175 if (error) { 176 aprint_error("%s: unable to map reply area, error = %d\n", 177 mpt->sc_dev.dv_xname, error); 178 goto fail_1; 179 } 180 181 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 182 0, 0, &mpt->reply_dmap); 183 if (error) { 184 aprint_error("%s: unable to create reply DMA map, error = %d\n", 185 mpt->sc_dev.dv_xname, error); 186 goto fail_2; 187 } 188 189 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply, 190 PAGE_SIZE, NULL, 0); 191 if (error) { 192 aprint_error("%s: unable to load reply DMA map, error = %d\n", 193 mpt->sc_dev.dv_xname, error); 194 goto fail_3; 195 } 196 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr; 197 198 /* 199 * Allocate DMA resources for request buffers. 200 */ 201 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 202 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0); 203 if (error) { 204 aprint_error("%s: unable to allocate request area, " 205 "error = %d\n", mpt->sc_dev.dv_xname, error); 206 goto fail_4; 207 } 208 209 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg, 210 MPT_REQ_MEM_SIZE(mpt), (caddr_t *) &mpt->request, 0); 211 if (error) { 212 aprint_error("%s: unable to map request area, error = %d\n", 213 mpt->sc_dev.dv_xname, error); 214 goto fail_5; 215 } 216 217 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1, 218 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap); 219 if (error) { 220 aprint_error("%s: unable to create request DMA map, " 221 "error = %d\n", mpt->sc_dev.dv_xname, error); 222 goto fail_6; 223 } 224 225 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request, 226 MPT_REQ_MEM_SIZE(mpt), NULL, 0); 227 if (error) { 228 aprint_error("%s: unable to load request DMA map, error = %d\n", 229 mpt->sc_dev.dv_xname, error); 230 goto fail_7; 231 } 232 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr; 233 234 pptr = mpt->request_phys; 235 vptr = (caddr_t) mpt->request; 236 end = pptr + MPT_REQ_MEM_SIZE(mpt); 237 238 for (i = 0; pptr < end; i++) { 239 request_t *req = &mpt->request_pool[i]; 240 req->index = i; 241 242 /* Store location of Request Data */ 243 req->req_pbuf = pptr; 244 req->req_vbuf = vptr; 245 246 pptr += MPT_REQUEST_AREA; 247 vptr += MPT_REQUEST_AREA; 248 249 req->sense_pbuf = (pptr - MPT_SENSE_SIZE); 250 req->sense_vbuf = (vptr - MPT_SENSE_SIZE); 251 252 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS, 253 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap); 254 if (error) { 255 aprint_error("%s: unable to create req %d DMA map, " 256 "error = %d\n", mpt->sc_dev.dv_xname, i, error); 257 goto fail_8; 258 } 259 } 260 261 return (0); 262 263 fail_8: 264 for (--i; i >= 0; i--) { 265 request_t *req = &mpt->request_pool[i]; 266 if (req->dmap != NULL) 267 bus_dmamap_destroy(mpt->sc_dmat, req->dmap); 268 } 269 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap); 270 fail_7: 271 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap); 272 fail_6: 273 bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->request, PAGE_SIZE); 274 fail_5: 275 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg); 276 fail_4: 277 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap); 278 fail_3: 279 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap); 280 fail_2: 281 bus_dmamem_unmap(mpt->sc_dmat, (caddr_t)mpt->reply, PAGE_SIZE); 282 fail_1: 283 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg); 284 fail_0: 285 free(mpt->request_pool, M_DEVBUF); 286 287 mpt->reply = NULL; 288 mpt->request = NULL; 289 mpt->request_pool = NULL; 290 291 return (error); 292 } 293 294 int 295 mpt_intr(void *arg) 296 { 297 mpt_softc_t *mpt = arg; 298 int nrepl = 0; 299 uint32_t reply; 300 301 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0) 302 return (0); 303 304 reply = mpt_pop_reply_queue(mpt); 305 while (reply != MPT_REPLY_EMPTY) { 306 nrepl++; 307 if (mpt->verbose > 1) { 308 if ((reply & MPT_CONTEXT_REPLY) != 0) { 309 /* Address reply; IOC has something to say */ 310 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply)); 311 } else { 312 /* Context reply; all went well */ 313 mpt_prt(mpt, "context %u reply OK", reply); 314 } 315 } 316 mpt_done(mpt, reply); 317 reply = mpt_pop_reply_queue(mpt); 318 } 319 return (nrepl != 0); 320 } 321 322 void 323 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...) 324 { 325 va_list ap; 326 327 printf("%s: ", mpt->sc_dev.dv_xname); 328 va_start(ap, fmt); 329 vprintf(fmt, ap); 330 va_end(ap); 331 printf("\n"); 332 } 333 334 static int 335 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count) 336 { 337 338 /* Timeouts are in msec, so we loop in 1000usec cycles */ 339 while (count) { 340 mpt_intr(mpt); 341 if (xs->xs_status & XS_STS_DONE) 342 return (0); 343 delay(1000); /* only happens in boot, so ok */ 344 count--; 345 } 346 return (1); 347 } 348 349 static void 350 mpt_timeout(void *arg) 351 { 352 request_t *req = arg; 353 struct scsipi_xfer *xs = req->xfer; 354 struct scsipi_periph *periph = xs->xs_periph; 355 mpt_softc_t *mpt = 356 (void *) periph->periph_channel->chan_adapter->adapt_dev; 357 uint32_t oseq; 358 int s; 359 360 scsipi_printaddr(periph); 361 printf("command timeout\n"); 362 363 s = splbio(); 364 365 oseq = req->sequence; 366 mpt->timeouts++; 367 if (mpt_intr(mpt)) { 368 if (req->sequence != oseq) { 369 mpt_prt(mpt, "recovered from command timeout"); 370 splx(s); 371 return; 372 } 373 } 374 mpt_prt(mpt, 375 "timeout on request index = 0x%x, seq = 0x%08x", 376 req->index, req->sequence); 377 mpt_check_doorbell(mpt); 378 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x", 379 mpt_read(mpt, MPT_OFFSET_INTR_STATUS), 380 mpt_read(mpt, MPT_OFFSET_INTR_MASK), 381 mpt_read(mpt, MPT_OFFSET_DOORBELL)); 382 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug)); 383 if (mpt->verbose > 1) 384 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); 385 386 /* XXX WHAT IF THE IOC IS STILL USING IT?? */ 387 req->xfer = NULL; 388 mpt_free_request(mpt, req); 389 390 xs->error = XS_TIMEOUT; 391 scsipi_done(xs); 392 393 splx(s); 394 } 395 396 static void 397 mpt_done(mpt_softc_t *mpt, uint32_t reply) 398 { 399 struct scsipi_xfer *xs = NULL; 400 struct scsipi_periph *periph; 401 int index; 402 request_t *req; 403 MSG_REQUEST_HEADER *mpt_req; 404 MSG_SCSI_IO_REPLY *mpt_reply; 405 406 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) { 407 /* context reply (ok) */ 408 mpt_reply = NULL; 409 index = reply & MPT_CONTEXT_MASK; 410 } else { 411 /* address reply (error) */ 412 413 /* XXX BUS_DMASYNC_POSTREAD XXX */ 414 mpt_reply = MPT_REPLY_PTOV(mpt, reply); 415 if (mpt->verbose > 1) { 416 uint32_t *pReply = (uint32_t *) mpt_reply; 417 418 mpt_prt(mpt, "Address Reply (index %u):", 419 mpt_reply->MsgContext & 0xffff); 420 mpt_prt(mpt, "%08x %08x %08x %08x", 421 pReply[0], pReply[1], pReply[2], pReply[3]); 422 mpt_prt(mpt, "%08x %08x %08x %08x", 423 pReply[4], pReply[5], pReply[6], pReply[7]); 424 mpt_prt(mpt, "%08x %08x %08x %08x", 425 pReply[8], pReply[9], pReply[10], pReply[11]); 426 } 427 index = mpt_reply->MsgContext; 428 } 429 430 /* 431 * Address reply with MessageContext high bit set. 432 * This is most likely a notify message, so we try 433 * to process it, then free it. 434 */ 435 if (__predict_false((index & 0x80000000) != 0)) { 436 if (mpt_reply != NULL) 437 mpt_ctlop(mpt, mpt_reply, reply); 438 else 439 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index); 440 return; 441 } 442 443 /* Did we end up with a valid index into the table? */ 444 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) { 445 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index); 446 return; 447 } 448 449 req = &mpt->request_pool[index]; 450 451 /* Make sure memory hasn't been trashed. */ 452 if (__predict_false(req->index != index)) { 453 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index); 454 return; 455 } 456 457 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 458 mpt_req = req->req_vbuf; 459 460 /* Short cut for task management replies; nothing more for us to do. */ 461 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) { 462 if (mpt->verbose > 1) 463 mpt_prt(mpt, "mpt_done: TASK MGMT"); 464 goto done; 465 } 466 467 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE)) 468 goto done; 469 470 /* 471 * At this point, it had better be a SCSI I/O command, but don't 472 * crash if it isn't. 473 */ 474 if (__predict_false(mpt_req->Function != 475 MPI_FUNCTION_SCSI_IO_REQUEST)) { 476 if (mpt->verbose > 1) 477 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)", 478 mpt_req->Function, index); 479 goto done; 480 } 481 482 /* Recover scsipi_xfer from the request structure. */ 483 xs = req->xfer; 484 485 /* Can't have a SCSI command without a scsipi_xfer. */ 486 if (__predict_false(xs == NULL)) { 487 mpt_prt(mpt, 488 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x", 489 req->index, req->sequence); 490 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug)); 491 mpt_prt(mpt, "mpt_request:"); 492 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); 493 494 if (mpt_reply != NULL) { 495 mpt_prt(mpt, "mpt_reply:"); 496 mpt_print_reply(mpt_reply); 497 } else { 498 mpt_prt(mpt, "context reply: 0x%08x", reply); 499 } 500 goto done; 501 } 502 503 callout_stop(&xs->xs_callout); 504 505 periph = xs->xs_periph; 506 507 /* 508 * If we were a data transfer, unload the map that described 509 * the data buffer. 510 */ 511 if (__predict_true(xs->datalen != 0)) { 512 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 513 req->dmap->dm_mapsize, 514 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD 515 : BUS_DMASYNC_POSTWRITE); 516 bus_dmamap_unload(mpt->sc_dmat, req->dmap); 517 } 518 519 if (__predict_true(mpt_reply == NULL)) { 520 /* 521 * Context reply; report that the command was 522 * successful! 523 * 524 * Also report the xfer mode, if necessary. 525 */ 526 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) { 527 if ((mpt->mpt_report_xfer_mode & 528 (1 << periph->periph_target)) != 0) 529 mpt_get_xfer_mode(mpt, periph); 530 } 531 xs->error = XS_NOERROR; 532 xs->status = SCSI_OK; 533 xs->resid = 0; 534 mpt_free_request(mpt, req); 535 scsipi_done(xs); 536 return; 537 } 538 539 xs->status = mpt_reply->SCSIStatus; 540 switch (mpt_reply->IOCStatus) { 541 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 542 xs->error = XS_DRIVER_STUFFUP; 543 break; 544 545 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 546 /* 547 * Yikes! Tagged queue full comes through this path! 548 * 549 * So we'll change it to a status error and anything 550 * that returns status should probably be a status 551 * error as well. 552 */ 553 xs->resid = xs->datalen - mpt_reply->TransferCount; 554 if (mpt_reply->SCSIState & 555 MPI_SCSI_STATE_NO_SCSI_STATUS) { 556 xs->error = XS_DRIVER_STUFFUP; 557 break; 558 } 559 /* FALLTHROUGH */ 560 case MPI_IOCSTATUS_SUCCESS: 561 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 562 switch (xs->status) { 563 case SCSI_OK: 564 /* Report the xfer mode, if necessary. */ 565 if ((mpt->mpt_report_xfer_mode & 566 (1 << periph->periph_target)) != 0) 567 mpt_get_xfer_mode(mpt, periph); 568 xs->resid = 0; 569 break; 570 571 case SCSI_CHECK: 572 xs->error = XS_SENSE; 573 break; 574 575 case SCSI_BUSY: 576 case SCSI_QUEUE_FULL: 577 xs->error = XS_BUSY; 578 break; 579 580 default: 581 scsipi_printaddr(periph); 582 printf("invalid status code %d\n", xs->status); 583 xs->error = XS_DRIVER_STUFFUP; 584 break; 585 } 586 break; 587 588 case MPI_IOCSTATUS_BUSY: 589 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 590 xs->error = XS_RESOURCE_SHORTAGE; 591 break; 592 593 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 594 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 595 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 596 xs->error = XS_SELTIMEOUT; 597 break; 598 599 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 600 xs->error = XS_DRIVER_STUFFUP; 601 break; 602 603 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 604 /* XXX What should we do here? */ 605 break; 606 607 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 608 /* XXX */ 609 xs->error = XS_DRIVER_STUFFUP; 610 break; 611 612 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 613 /* XXX */ 614 xs->error = XS_DRIVER_STUFFUP; 615 break; 616 617 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 618 /* XXX This is a bus-reset */ 619 xs->error = XS_DRIVER_STUFFUP; 620 break; 621 622 default: 623 /* XXX unrecognized HBA error */ 624 xs->error = XS_DRIVER_STUFFUP; 625 break; 626 } 627 628 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { 629 memcpy(&xs->sense.scsi_sense, req->sense_vbuf, 630 sizeof(xs->sense.scsi_sense)); 631 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) { 632 /* 633 * This will cause the scsipi layer to issue 634 * a REQUEST SENSE. 635 */ 636 if (xs->status == SCSI_CHECK) 637 xs->error = XS_BUSY; 638 } 639 640 done: 641 /* If IOC done with this requeset, free it up. */ 642 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0) 643 mpt_free_request(mpt, req); 644 645 /* If address reply, give the buffer back to the IOC. */ 646 if (mpt_reply != NULL) 647 mpt_free_reply(mpt, (reply << 1)); 648 649 if (xs != NULL) 650 scsipi_done(xs); 651 } 652 653 static void 654 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs) 655 { 656 struct scsipi_periph *periph = xs->xs_periph; 657 request_t *req; 658 MSG_SCSI_IO_REQUEST *mpt_req; 659 int error, s; 660 661 s = splbio(); 662 req = mpt_get_request(mpt); 663 if (__predict_false(req == NULL)) { 664 /* This should happen very infrequently. */ 665 xs->error = XS_RESOURCE_SHORTAGE; 666 scsipi_done(xs); 667 splx(s); 668 return; 669 } 670 splx(s); 671 672 /* Link the req and the scsipi_xfer. */ 673 req->xfer = xs; 674 675 /* Now we build the command for the IOC */ 676 mpt_req = req->req_vbuf; 677 memset(mpt_req, 0, sizeof(*mpt_req)); 678 679 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 680 mpt_req->Bus = mpt->bus; 681 682 mpt_req->SenseBufferLength = 683 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ? 684 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE; 685 686 /* 687 * We use the message context to find the request structure when 688 * we get the command completion interrupt from the IOC. 689 */ 690 mpt_req->MsgContext = req->index; 691 692 /* Which physical device to do the I/O on. */ 693 mpt_req->TargetID = periph->periph_target; 694 mpt_req->LUN[1] = periph->periph_lun; 695 696 /* Set the direction of the transfer. */ 697 if (xs->xs_control & XS_CTL_DATA_IN) 698 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 699 else if (xs->xs_control & XS_CTL_DATA_OUT) 700 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 701 else 702 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 703 704 /* Set the queue behavior. */ 705 if (__predict_true(mpt->is_fc || 706 (mpt->mpt_tag_enable & 707 (1 << periph->periph_target)))) { 708 switch (XS_CTL_TAGTYPE(xs)) { 709 case XS_CTL_HEAD_TAG: 710 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 711 break; 712 713 #if 0 /* XXX */ 714 case XS_CTL_ACA_TAG: 715 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 716 break; 717 #endif 718 719 case XS_CTL_ORDERED_TAG: 720 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 721 break; 722 723 case XS_CTL_SIMPLE_TAG: 724 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 725 break; 726 727 default: 728 if (mpt->is_fc) 729 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 730 else 731 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 732 break; 733 } 734 } else 735 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 736 737 if (__predict_false(mpt->is_fc == 0 && 738 (mpt->mpt_disc_enable & 739 (1 << periph->periph_target)) == 0)) 740 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 741 742 /* Copy the SCSI command block into place. */ 743 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen); 744 745 mpt_req->CDBLength = xs->cmdlen; 746 mpt_req->DataLength = xs->datalen; 747 mpt_req->SenseBufferLowAddr = req->sense_pbuf; 748 749 /* 750 * Map the DMA transfer. 751 */ 752 if (xs->datalen) { 753 SGE_SIMPLE32 *se; 754 755 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data, 756 xs->datalen, NULL, 757 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT 758 : BUS_DMA_WAITOK) | 759 BUS_DMA_STREAMING | 760 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ 761 : BUS_DMA_WRITE)); 762 switch (error) { 763 case 0: 764 break; 765 766 case ENOMEM: 767 case EAGAIN: 768 xs->error = XS_RESOURCE_SHORTAGE; 769 goto out_bad; 770 771 default: 772 xs->error = XS_DRIVER_STUFFUP; 773 mpt_prt(mpt, "error %d loading DMA map", error); 774 out_bad: 775 s = splbio(); 776 mpt_free_request(mpt, req); 777 scsipi_done(xs); 778 splx(s); 779 return; 780 } 781 782 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) { 783 int seg, i, nleft = req->dmap->dm_nsegs; 784 uint32_t flags; 785 SGE_CHAIN32 *ce; 786 787 seg = 0; 788 789 mpt_req->DataLength = xs->datalen; 790 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 791 if (xs->xs_control & XS_CTL_DATA_OUT) 792 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 793 794 se = (SGE_SIMPLE32 *) &mpt_req->SGL; 795 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; 796 i++, se++, seg++) { 797 uint32_t tf; 798 799 memset(se, 0, sizeof(*se)); 800 se->Address = req->dmap->dm_segs[seg].ds_addr; 801 MPI_pSGE_SET_LENGTH(se, 802 req->dmap->dm_segs[seg].ds_len); 803 tf = flags; 804 if (i == MPT_NSGL_FIRST(mpt) - 2) 805 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 806 MPI_pSGE_SET_FLAGS(se, tf); 807 nleft--; 808 } 809 810 /* 811 * Tell the IOC where to find the first chain element. 812 */ 813 mpt_req->ChainOffset = 814 ((char *)se - (char *)mpt_req) >> 2; 815 816 /* 817 * Until we're finished with all segments... 818 */ 819 while (nleft) { 820 int ntodo; 821 822 /* 823 * Construct the chain element that points to 824 * the next segment. 825 */ 826 ce = (SGE_CHAIN32 *) se++; 827 if (nleft > MPT_NSGL(mpt)) { 828 ntodo = MPT_NSGL(mpt) - 1; 829 ce->NextChainOffset = (MPT_RQSL(mpt) - 830 sizeof(SGE_SIMPLE32)) >> 2; 831 ce->Length = MPT_NSGL(mpt) 832 * sizeof(SGE_SIMPLE32); 833 } else { 834 ntodo = nleft; 835 ce->NextChainOffset = 0; 836 ce->Length = ntodo 837 * sizeof(SGE_SIMPLE32); 838 } 839 ce->Address = req->req_pbuf + 840 ((char *)se - (char *)mpt_req); 841 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 842 for (i = 0; i < ntodo; i++, se++, seg++) { 843 uint32_t tf; 844 845 memset(se, 0, sizeof(*se)); 846 se->Address = 847 req->dmap->dm_segs[seg].ds_addr; 848 MPI_pSGE_SET_LENGTH(se, 849 req->dmap->dm_segs[seg].ds_len); 850 tf = flags; 851 if (i == ntodo - 1) { 852 tf |= 853 MPI_SGE_FLAGS_LAST_ELEMENT; 854 if (ce->NextChainOffset == 0) { 855 tf |= 856 MPI_SGE_FLAGS_END_OF_LIST | 857 MPI_SGE_FLAGS_END_OF_BUFFER; 858 } 859 } 860 MPI_pSGE_SET_FLAGS(se, tf); 861 nleft--; 862 } 863 } 864 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 865 req->dmap->dm_mapsize, 866 (xs->xs_control & XS_CTL_DATA_IN) ? 867 BUS_DMASYNC_PREREAD 868 : BUS_DMASYNC_PREWRITE); 869 } else { 870 int i; 871 uint32_t flags; 872 873 mpt_req->DataLength = xs->datalen; 874 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 875 if (xs->xs_control & XS_CTL_DATA_OUT) 876 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 877 878 /* Copy the segments into our SG list. */ 879 se = (SGE_SIMPLE32 *) &mpt_req->SGL; 880 for (i = 0; i < req->dmap->dm_nsegs; 881 i++, se++) { 882 uint32_t tf; 883 884 memset(se, 0, sizeof(*se)); 885 se->Address = req->dmap->dm_segs[i].ds_addr; 886 MPI_pSGE_SET_LENGTH(se, 887 req->dmap->dm_segs[i].ds_len); 888 tf = flags; 889 if (i == req->dmap->dm_nsegs - 1) { 890 tf |= 891 MPI_SGE_FLAGS_LAST_ELEMENT | 892 MPI_SGE_FLAGS_END_OF_BUFFER | 893 MPI_SGE_FLAGS_END_OF_LIST; 894 } 895 MPI_pSGE_SET_FLAGS(se, tf); 896 } 897 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 898 req->dmap->dm_mapsize, 899 (xs->xs_control & XS_CTL_DATA_IN) ? 900 BUS_DMASYNC_PREREAD 901 : BUS_DMASYNC_PREWRITE); 902 } 903 } else { 904 /* 905 * No data to transfer; just make a single simple SGL 906 * with zero length. 907 */ 908 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL; 909 memset(se, 0, sizeof(*se)); 910 MPI_pSGE_SET_FLAGS(se, 911 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 912 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 913 } 914 915 if (mpt->verbose > 1) 916 mpt_print_scsi_io_request(mpt_req); 917 918 s = splbio(); 919 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) 920 callout_reset(&xs->xs_callout, 921 mstohz(xs->timeout), mpt_timeout, req); 922 mpt_send_cmd(mpt, req); 923 splx(s); 924 925 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) 926 return; 927 928 /* 929 * If we can't use interrupts, poll on completion. 930 */ 931 if (mpt_poll(mpt, xs, xs->timeout)) 932 mpt_timeout(req); 933 } 934 935 static void 936 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm) 937 { 938 fCONFIG_PAGE_SCSI_DEVICE_1 tmp; 939 940 if (mpt->is_fc) { 941 /* 942 * SCSI transport settings don't make any sense for 943 * Fibre Channel; silently ignore the request. 944 */ 945 return; 946 } 947 948 /* 949 * Always allow disconnect; we don't have a way to disable 950 * it right now, in any case. 951 */ 952 mpt->mpt_disc_enable |= (1 << xm->xm_target); 953 954 if (xm->xm_mode & PERIPH_CAP_TQING) 955 mpt->mpt_tag_enable |= (1 << xm->xm_target); 956 else 957 mpt->mpt_tag_enable &= ~(1 << xm->xm_target); 958 959 tmp = mpt->mpt_dev_page1[xm->xm_target]; 960 961 /* 962 * Set the wide/narrow parameter for the target. 963 */ 964 if (xm->xm_mode & PERIPH_CAP_WIDE16) 965 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 966 else 967 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 968 969 /* 970 * Set the synchronous parameters for the target. 971 * 972 * XXX If we request sync transfers, we just go ahead and 973 * XXX request the maximum available. We need finer control 974 * XXX in order to implement Domain Validation. 975 */ 976 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK | 977 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK | 978 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS | 979 MPI_SCSIDEVPAGE1_RP_IU); 980 if (xm->xm_mode & PERIPH_CAP_SYNC) { 981 int factor, offset, np; 982 983 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff; 984 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff; 985 np = 0; 986 if (factor < 0x9) { 987 /* Ultra320 */ 988 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU; 989 } 990 if (factor < 0xa) { 991 /* at least Ultra160 */ 992 np |= MPI_SCSIDEVPAGE1_RP_DT; 993 } 994 np |= (factor << 8) | (offset << 16); 995 tmp.RequestedParameters |= np; 996 } 997 998 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) { 999 mpt_prt(mpt, "unable to write Device Page 1"); 1000 return; 1001 } 1002 1003 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) { 1004 mpt_prt(mpt, "unable to read back Device Page 1"); 1005 return; 1006 } 1007 1008 mpt->mpt_dev_page1[xm->xm_target] = tmp; 1009 if (mpt->verbose > 1) { 1010 mpt_prt(mpt, 1011 "SPI Target %d Page 1: RequestedParameters %x Config %x", 1012 xm->xm_target, 1013 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters, 1014 mpt->mpt_dev_page1[xm->xm_target].Configuration); 1015 } 1016 1017 /* 1018 * Make a note that we should perform an async callback at the 1019 * end of the next successful command completion to report the 1020 * negotiated transfer mode. 1021 */ 1022 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target); 1023 } 1024 1025 static void 1026 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph) 1027 { 1028 fCONFIG_PAGE_SCSI_DEVICE_0 tmp; 1029 struct scsipi_xfer_mode xm; 1030 int period, offset; 1031 1032 tmp = mpt->mpt_dev_page0[periph->periph_target]; 1033 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) { 1034 mpt_prt(mpt, "unable to read Device Page 0"); 1035 return; 1036 } 1037 1038 if (mpt->verbose > 1) { 1039 mpt_prt(mpt, 1040 "SPI Tgt %d Page 0: NParms %x Information %x", 1041 periph->periph_target, 1042 tmp.NegotiatedParameters, tmp.Information); 1043 } 1044 1045 xm.xm_target = periph->periph_target; 1046 xm.xm_mode = 0; 1047 1048 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) 1049 xm.xm_mode |= PERIPH_CAP_WIDE16; 1050 1051 period = (tmp.NegotiatedParameters >> 8) & 0xff; 1052 offset = (tmp.NegotiatedParameters >> 16) & 0xff; 1053 if (offset) { 1054 xm.xm_period = period; 1055 xm.xm_offset = offset; 1056 xm.xm_mode |= PERIPH_CAP_SYNC; 1057 } 1058 1059 /* 1060 * Tagged queueing is all controlled by us; there is no 1061 * other setting to query. 1062 */ 1063 if (mpt->mpt_tag_enable & (1 << periph->periph_target)) 1064 xm.xm_mode |= PERIPH_CAP_TQING; 1065 1066 /* 1067 * We're going to deliver the async event, so clear the marker. 1068 */ 1069 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target); 1070 1071 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm); 1072 } 1073 1074 static void 1075 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply) 1076 { 1077 MSG_DEFAULT_REPLY *dmsg = vmsg; 1078 1079 switch (dmsg->Function) { 1080 case MPI_FUNCTION_EVENT_NOTIFICATION: 1081 mpt_event_notify_reply(mpt, vmsg); 1082 mpt_free_reply(mpt, (reply << 1)); 1083 break; 1084 1085 case MPI_FUNCTION_EVENT_ACK: 1086 mpt_free_reply(mpt, (reply << 1)); 1087 break; 1088 1089 case MPI_FUNCTION_PORT_ENABLE: 1090 { 1091 MSG_PORT_ENABLE_REPLY *msg = vmsg; 1092 int index = msg->MsgContext & ~0x80000000; 1093 if (mpt->verbose > 1) 1094 mpt_prt(mpt, "enable port reply index %d", index); 1095 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { 1096 request_t *req = &mpt->request_pool[index]; 1097 req->debug = REQ_DONE; 1098 } 1099 mpt_free_reply(mpt, (reply << 1)); 1100 break; 1101 } 1102 1103 case MPI_FUNCTION_CONFIG: 1104 { 1105 MSG_CONFIG_REPLY *msg = vmsg; 1106 int index = msg->MsgContext & ~0x80000000; 1107 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { 1108 request_t *req = &mpt->request_pool[index]; 1109 req->debug = REQ_DONE; 1110 req->sequence = reply; 1111 } else 1112 mpt_free_reply(mpt, (reply << 1)); 1113 break; 1114 } 1115 1116 default: 1117 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function); 1118 } 1119 } 1120 1121 static void 1122 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg) 1123 { 1124 1125 switch (msg->Event) { 1126 case MPI_EVENT_LOG_DATA: 1127 { 1128 int i; 1129 1130 /* Some error occurrerd that the Fusion wants logged. */ 1131 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo); 1132 mpt_prt(mpt, "EvtLogData: Event Data:"); 1133 for (i = 0; i < msg->EventDataLength; i++) { 1134 if ((i % 4) == 0) 1135 printf("%s:\t", mpt->sc_dev.dv_xname); 1136 printf("0x%08x%c", msg->Data[i], 1137 ((i % 4) == 3) ? '\n' : ' '); 1138 } 1139 if ((i % 4) != 0) 1140 printf("\n"); 1141 break; 1142 } 1143 1144 case MPI_EVENT_UNIT_ATTENTION: 1145 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x", 1146 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff); 1147 break; 1148 1149 case MPI_EVENT_IOC_BUS_RESET: 1150 /* We generated a bus reset. */ 1151 mpt_prt(mpt, "IOC Bus Reset Port %d", 1152 (msg->Data[0] >> 8) & 0xff); 1153 break; 1154 1155 case MPI_EVENT_EXT_BUS_RESET: 1156 /* Someone else generated a bus reset. */ 1157 mpt_prt(mpt, "External Bus Reset"); 1158 /* 1159 * These replies don't return EventData like the MPI 1160 * spec says they do. 1161 */ 1162 /* XXX Send an async event? */ 1163 break; 1164 1165 case MPI_EVENT_RESCAN: 1166 /* 1167 * In general, thise means a device has been added 1168 * to the loop. 1169 */ 1170 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff); 1171 /* XXX Send an async event? */ 1172 break; 1173 1174 case MPI_EVENT_LINK_STATUS_CHANGE: 1175 mpt_prt(mpt, "Port %d: Link state %s", 1176 (msg->Data[1] >> 8) & 0xff, 1177 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active"); 1178 break; 1179 1180 case MPI_EVENT_LOOP_STATE_CHANGE: 1181 switch ((msg->Data[0] >> 16) & 0xff) { 1182 case 0x01: 1183 mpt_prt(mpt, 1184 "Port %d: FC Link Event: LIP(%02x,%02x) " 1185 "(Loop Initialization)", 1186 (msg->Data[1] >> 8) & 0xff, 1187 (msg->Data[0] >> 8) & 0xff, 1188 (msg->Data[0] ) & 0xff); 1189 switch ((msg->Data[0] >> 8) & 0xff) { 1190 case 0xf7: 1191 if ((msg->Data[0] & 0xff) == 0xf7) 1192 mpt_prt(mpt, "\tDevice needs AL_PA"); 1193 else 1194 mpt_prt(mpt, "\tDevice %02x doesn't " 1195 "like FC performance", 1196 msg->Data[0] & 0xff); 1197 break; 1198 1199 case 0xf8: 1200 if ((msg->Data[0] & 0xff) == 0xf7) 1201 mpt_prt(mpt, "\tDevice detected loop " 1202 "failure before acquiring AL_PA"); 1203 else 1204 mpt_prt(mpt, "\tDevice %02x detected " 1205 "loop failure", 1206 msg->Data[0] & 0xff); 1207 break; 1208 1209 default: 1210 mpt_prt(mpt, "\tDevice %02x requests that " 1211 "device %02x reset itself", 1212 msg->Data[0] & 0xff, 1213 (msg->Data[0] >> 8) & 0xff); 1214 break; 1215 } 1216 break; 1217 1218 case 0x02: 1219 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) " 1220 "(Loop Port Enable)", 1221 (msg->Data[1] >> 8) & 0xff, 1222 (msg->Data[0] >> 8) & 0xff, 1223 (msg->Data[0] ) & 0xff); 1224 break; 1225 1226 case 0x03: 1227 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) " 1228 "(Loop Port Bypass)", 1229 (msg->Data[1] >> 8) & 0xff, 1230 (msg->Data[0] >> 8) & 0xff, 1231 (msg->Data[0] ) & 0xff); 1232 break; 1233 1234 default: 1235 mpt_prt(mpt, "Port %d: FC Link Event: " 1236 "Unknown event (%02x %02x %02x)", 1237 (msg->Data[1] >> 8) & 0xff, 1238 (msg->Data[0] >> 16) & 0xff, 1239 (msg->Data[0] >> 8) & 0xff, 1240 (msg->Data[0] ) & 0xff); 1241 break; 1242 } 1243 break; 1244 1245 case MPI_EVENT_LOGOUT: 1246 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x", 1247 (msg->Data[1] >> 8) & 0xff, msg->Data[0]); 1248 break; 1249 1250 case MPI_EVENT_EVENT_CHANGE: 1251 /* 1252 * This is just an acknowledgement of our 1253 * mpt_send_event_request(). 1254 */ 1255 break; 1256 1257 default: 1258 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event); 1259 break; 1260 } 1261 1262 if (msg->AckRequired) { 1263 MSG_EVENT_ACK *ackp; 1264 request_t *req; 1265 1266 if ((req = mpt_get_request(mpt)) == NULL) { 1267 /* XXX XXX XXX XXXJRT */ 1268 panic("mpt_event_notify_reply: unable to allocate " 1269 "request structure"); 1270 } 1271 1272 ackp = (MSG_EVENT_ACK *) req->req_vbuf; 1273 memset(ackp, 0, sizeof(*ackp)); 1274 ackp->Function = MPI_FUNCTION_EVENT_ACK; 1275 ackp->Event = msg->Event; 1276 ackp->EventContext = msg->EventContext; 1277 ackp->MsgContext = req->index | 0x80000000; 1278 mpt_check_doorbell(mpt); 1279 mpt_send_cmd(mpt, req); 1280 } 1281 } 1282 1283 /* XXXJRT mpt_bus_reset() */ 1284 1285 /***************************************************************************** 1286 * SCSI interface routines 1287 *****************************************************************************/ 1288 1289 static void 1290 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1291 void *arg) 1292 { 1293 struct scsipi_adapter *adapt = chan->chan_adapter; 1294 mpt_softc_t *mpt = (void *) adapt->adapt_dev; 1295 1296 switch (req) { 1297 case ADAPTER_REQ_RUN_XFER: 1298 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg); 1299 return; 1300 1301 case ADAPTER_REQ_GROW_RESOURCES: 1302 /* Not supported. */ 1303 return; 1304 1305 case ADAPTER_REQ_SET_XFER_MODE: 1306 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg); 1307 return; 1308 } 1309 } 1310 1311 static void 1312 mpt_minphys(struct buf *bp) 1313 { 1314 1315 /* 1316 * Subtract one from the SGL limit, since we need an extra one to handle 1317 * an non-page-aligned transfer. 1318 */ 1319 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE) 1320 1321 if (bp->b_bcount > MPT_MAX_XFER) 1322 bp->b_bcount = MPT_MAX_XFER; 1323 minphys(bp); 1324 } 1325