1 /* $NetBSD: mpt_netbsd.c,v 1.13 2007/08/04 22:01:06 tron Exp $ */ 2 3 /* 4 * Copyright (c) 2003 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 2000, 2001 by Greg Ansley 40 * Partially derived from Matt Jacob's ISP driver. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice immediately at the beginning of the file, without modification, 47 * this list of conditions, and the following disclaimer. 48 * 2. The name of the author may not be used to endorse or promote products 49 * derived from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 61 * SUCH DAMAGE. 62 */ 63 /* 64 * Additional Copyright (c) 2002 by Matthew Jacob under same license. 65 */ 66 67 /* 68 * mpt_netbsd.c: 69 * 70 * NetBSD-specific routines for LSI Fusion adapters. Includes some 71 * bus_dma glue, and SCSIPI glue. 72 * 73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for 74 * Wasabi Systems, Inc. 75 * 76 * Additional contributions by Garrett D'Amore on behalf of TELES AG. 77 */ 78 79 #include <sys/cdefs.h> 80 __KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.13 2007/08/04 22:01:06 tron Exp $"); 81 82 #include <dev/ic/mpt.h> /* pulls in all headers */ 83 84 #include <machine/stdarg.h> /* for mpt_prt() */ 85 86 static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int); 87 static void mpt_timeout(void *); 88 static void mpt_done(mpt_softc_t *, uint32_t); 89 static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *); 90 static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *); 91 static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *); 92 static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t); 93 static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *); 94 95 static void mpt_scsipi_request(struct scsipi_channel *, 96 scsipi_adapter_req_t, void *); 97 static void mpt_minphys(struct buf *); 98 99 void 100 mpt_scsipi_attach(mpt_softc_t *mpt) 101 { 102 struct scsipi_adapter *adapt = &mpt->sc_adapter; 103 struct scsipi_channel *chan = &mpt->sc_channel; 104 int maxq; 105 106 mpt->bus = 0; /* XXX ?? */ 107 108 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ? 109 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt); 110 111 /* Fill in the scsipi_adapter. */ 112 memset(adapt, 0, sizeof(*adapt)); 113 adapt->adapt_dev = &mpt->sc_dev; 114 adapt->adapt_nchannels = 1; 115 adapt->adapt_openings = maxq; 116 adapt->adapt_max_periph = maxq; 117 adapt->adapt_request = mpt_scsipi_request; 118 adapt->adapt_minphys = mpt_minphys; 119 120 /* Fill in the scsipi_channel. */ 121 memset(chan, 0, sizeof(*chan)); 122 chan->chan_adapter = adapt; 123 chan->chan_bustype = &scsi_bustype; 124 chan->chan_channel = 0; 125 chan->chan_flags = 0; 126 chan->chan_nluns = 8; 127 chan->chan_ntargets = mpt->mpt_max_devices; 128 chan->chan_id = mpt->mpt_ini_id; 129 130 (void) config_found(&mpt->sc_dev, &mpt->sc_channel, scsiprint); 131 } 132 133 int 134 mpt_dma_mem_alloc(mpt_softc_t *mpt) 135 { 136 bus_dma_segment_t reply_seg, request_seg; 137 int reply_rseg, request_rseg; 138 bus_addr_t pptr, end; 139 char *vptr; 140 size_t len; 141 int error, i; 142 143 /* Check if we have already allocated the reply memory. */ 144 if (mpt->reply != NULL) 145 return (0); 146 147 /* 148 * Allocate the request pool. This isn't really DMA'd memory, 149 * but it's a convenient place to do it. 150 */ 151 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt); 152 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 153 if (mpt->request_pool == NULL) { 154 aprint_error("%s: unable to allocate request pool\n", 155 mpt->sc_dev.dv_xname); 156 return (ENOMEM); 157 } 158 159 /* 160 * Allocate DMA resources for reply buffers. 161 */ 162 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 163 &reply_seg, 1, &reply_rseg, 0); 164 if (error) { 165 aprint_error("%s: unable to allocate reply area, error = %d\n", 166 mpt->sc_dev.dv_xname, error); 167 goto fail_0; 168 } 169 170 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE, 171 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/); 172 if (error) { 173 aprint_error("%s: unable to map reply area, error = %d\n", 174 mpt->sc_dev.dv_xname, error); 175 goto fail_1; 176 } 177 178 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 179 0, 0, &mpt->reply_dmap); 180 if (error) { 181 aprint_error("%s: unable to create reply DMA map, error = %d\n", 182 mpt->sc_dev.dv_xname, error); 183 goto fail_2; 184 } 185 186 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply, 187 PAGE_SIZE, NULL, 0); 188 if (error) { 189 aprint_error("%s: unable to load reply DMA map, error = %d\n", 190 mpt->sc_dev.dv_xname, error); 191 goto fail_3; 192 } 193 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr; 194 195 /* 196 * Allocate DMA resources for request buffers. 197 */ 198 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 199 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0); 200 if (error) { 201 aprint_error("%s: unable to allocate request area, " 202 "error = %d\n", mpt->sc_dev.dv_xname, error); 203 goto fail_4; 204 } 205 206 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg, 207 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0); 208 if (error) { 209 aprint_error("%s: unable to map request area, error = %d\n", 210 mpt->sc_dev.dv_xname, error); 211 goto fail_5; 212 } 213 214 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1, 215 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap); 216 if (error) { 217 aprint_error("%s: unable to create request DMA map, " 218 "error = %d\n", mpt->sc_dev.dv_xname, error); 219 goto fail_6; 220 } 221 222 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request, 223 MPT_REQ_MEM_SIZE(mpt), NULL, 0); 224 if (error) { 225 aprint_error("%s: unable to load request DMA map, error = %d\n", 226 mpt->sc_dev.dv_xname, error); 227 goto fail_7; 228 } 229 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr; 230 231 pptr = mpt->request_phys; 232 vptr = (void *) mpt->request; 233 end = pptr + MPT_REQ_MEM_SIZE(mpt); 234 235 for (i = 0; pptr < end; i++) { 236 request_t *req = &mpt->request_pool[i]; 237 req->index = i; 238 239 /* Store location of Request Data */ 240 req->req_pbuf = pptr; 241 req->req_vbuf = vptr; 242 243 pptr += MPT_REQUEST_AREA; 244 vptr += MPT_REQUEST_AREA; 245 246 req->sense_pbuf = (pptr - MPT_SENSE_SIZE); 247 req->sense_vbuf = (vptr - MPT_SENSE_SIZE); 248 249 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS, 250 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap); 251 if (error) { 252 aprint_error("%s: unable to create req %d DMA map, " 253 "error = %d\n", mpt->sc_dev.dv_xname, i, error); 254 goto fail_8; 255 } 256 } 257 258 return (0); 259 260 fail_8: 261 for (--i; i >= 0; i--) { 262 request_t *req = &mpt->request_pool[i]; 263 if (req->dmap != NULL) 264 bus_dmamap_destroy(mpt->sc_dmat, req->dmap); 265 } 266 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap); 267 fail_7: 268 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap); 269 fail_6: 270 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE); 271 fail_5: 272 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg); 273 fail_4: 274 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap); 275 fail_3: 276 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap); 277 fail_2: 278 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE); 279 fail_1: 280 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg); 281 fail_0: 282 free(mpt->request_pool, M_DEVBUF); 283 284 mpt->reply = NULL; 285 mpt->request = NULL; 286 mpt->request_pool = NULL; 287 288 return (error); 289 } 290 291 int 292 mpt_intr(void *arg) 293 { 294 mpt_softc_t *mpt = arg; 295 int nrepl = 0; 296 uint32_t reply; 297 298 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0) 299 return (0); 300 301 reply = mpt_pop_reply_queue(mpt); 302 while (reply != MPT_REPLY_EMPTY) { 303 nrepl++; 304 if (mpt->verbose > 1) { 305 if ((reply & MPT_CONTEXT_REPLY) != 0) { 306 /* Address reply; IOC has something to say */ 307 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply)); 308 } else { 309 /* Context reply; all went well */ 310 mpt_prt(mpt, "context %u reply OK", reply); 311 } 312 } 313 mpt_done(mpt, reply); 314 reply = mpt_pop_reply_queue(mpt); 315 } 316 return (nrepl != 0); 317 } 318 319 void 320 mpt_prt(mpt_softc_t *mpt, const char *fmt, ...) 321 { 322 va_list ap; 323 324 printf("%s: ", mpt->sc_dev.dv_xname); 325 va_start(ap, fmt); 326 vprintf(fmt, ap); 327 va_end(ap); 328 printf("\n"); 329 } 330 331 static int 332 mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count) 333 { 334 335 /* Timeouts are in msec, so we loop in 1000usec cycles */ 336 while (count) { 337 mpt_intr(mpt); 338 if (xs->xs_status & XS_STS_DONE) 339 return (0); 340 delay(1000); /* only happens in boot, so ok */ 341 count--; 342 } 343 return (1); 344 } 345 346 static void 347 mpt_timeout(void *arg) 348 { 349 request_t *req = arg; 350 struct scsipi_xfer *xs = req->xfer; 351 struct scsipi_periph *periph = xs->xs_periph; 352 mpt_softc_t *mpt = 353 (void *) periph->periph_channel->chan_adapter->adapt_dev; 354 uint32_t oseq; 355 int s; 356 357 scsipi_printaddr(periph); 358 printf("command timeout\n"); 359 360 s = splbio(); 361 362 oseq = req->sequence; 363 mpt->timeouts++; 364 if (mpt_intr(mpt)) { 365 if (req->sequence != oseq) { 366 mpt_prt(mpt, "recovered from command timeout"); 367 splx(s); 368 return; 369 } 370 } 371 mpt_prt(mpt, 372 "timeout on request index = 0x%x, seq = 0x%08x", 373 req->index, req->sequence); 374 mpt_check_doorbell(mpt); 375 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x", 376 mpt_read(mpt, MPT_OFFSET_INTR_STATUS), 377 mpt_read(mpt, MPT_OFFSET_INTR_MASK), 378 mpt_read(mpt, MPT_OFFSET_DOORBELL)); 379 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug)); 380 if (mpt->verbose > 1) 381 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); 382 383 /* XXX WHAT IF THE IOC IS STILL USING IT?? */ 384 req->xfer = NULL; 385 mpt_free_request(mpt, req); 386 387 xs->error = XS_TIMEOUT; 388 scsipi_done(xs); 389 390 splx(s); 391 } 392 393 static void 394 mpt_done(mpt_softc_t *mpt, uint32_t reply) 395 { 396 struct scsipi_xfer *xs = NULL; 397 struct scsipi_periph *periph; 398 int index; 399 request_t *req; 400 MSG_REQUEST_HEADER *mpt_req; 401 MSG_SCSI_IO_REPLY *mpt_reply; 402 403 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) { 404 /* context reply (ok) */ 405 mpt_reply = NULL; 406 index = reply & MPT_CONTEXT_MASK; 407 } else { 408 /* address reply (error) */ 409 410 /* XXX BUS_DMASYNC_POSTREAD XXX */ 411 mpt_reply = MPT_REPLY_PTOV(mpt, reply); 412 if (mpt->verbose > 1) { 413 uint32_t *pReply = (uint32_t *) mpt_reply; 414 415 mpt_prt(mpt, "Address Reply (index %u):", 416 mpt_reply->MsgContext & 0xffff); 417 mpt_prt(mpt, "%08x %08x %08x %08x", 418 pReply[0], pReply[1], pReply[2], pReply[3]); 419 mpt_prt(mpt, "%08x %08x %08x %08x", 420 pReply[4], pReply[5], pReply[6], pReply[7]); 421 mpt_prt(mpt, "%08x %08x %08x %08x", 422 pReply[8], pReply[9], pReply[10], pReply[11]); 423 } 424 index = mpt_reply->MsgContext; 425 } 426 427 /* 428 * Address reply with MessageContext high bit set. 429 * This is most likely a notify message, so we try 430 * to process it, then free it. 431 */ 432 if (__predict_false((index & 0x80000000) != 0)) { 433 if (mpt_reply != NULL) 434 mpt_ctlop(mpt, mpt_reply, reply); 435 else 436 mpt_prt(mpt, "mpt_done: index 0x%x, NULL reply", index); 437 return; 438 } 439 440 /* Did we end up with a valid index into the table? */ 441 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) { 442 mpt_prt(mpt, "mpt_done: invalid index (0x%x) in reply", index); 443 return; 444 } 445 446 req = &mpt->request_pool[index]; 447 448 /* Make sure memory hasn't been trashed. */ 449 if (__predict_false(req->index != index)) { 450 mpt_prt(mpt, "mpt_done: corrupted request_t (0x%x)", index); 451 return; 452 } 453 454 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 455 mpt_req = req->req_vbuf; 456 457 /* Short cut for task management replies; nothing more for us to do. */ 458 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) { 459 if (mpt->verbose > 1) 460 mpt_prt(mpt, "mpt_done: TASK MGMT"); 461 goto done; 462 } 463 464 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE)) 465 goto done; 466 467 /* 468 * At this point, it had better be a SCSI I/O command, but don't 469 * crash if it isn't. 470 */ 471 if (__predict_false(mpt_req->Function != 472 MPI_FUNCTION_SCSI_IO_REQUEST)) { 473 if (mpt->verbose > 1) 474 mpt_prt(mpt, "mpt_done: unknown Function 0x%x (0x%x)", 475 mpt_req->Function, index); 476 goto done; 477 } 478 479 /* Recover scsipi_xfer from the request structure. */ 480 xs = req->xfer; 481 482 /* Can't have a SCSI command without a scsipi_xfer. */ 483 if (__predict_false(xs == NULL)) { 484 mpt_prt(mpt, 485 "mpt_done: no scsipi_xfer, index = 0x%x, seq = 0x%08x", 486 req->index, req->sequence); 487 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug)); 488 mpt_prt(mpt, "mpt_request:"); 489 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); 490 491 if (mpt_reply != NULL) { 492 mpt_prt(mpt, "mpt_reply:"); 493 mpt_print_reply(mpt_reply); 494 } else { 495 mpt_prt(mpt, "context reply: 0x%08x", reply); 496 } 497 goto done; 498 } 499 500 callout_stop(&xs->xs_callout); 501 502 periph = xs->xs_periph; 503 504 /* 505 * If we were a data transfer, unload the map that described 506 * the data buffer. 507 */ 508 if (__predict_true(xs->datalen != 0)) { 509 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 510 req->dmap->dm_mapsize, 511 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD 512 : BUS_DMASYNC_POSTWRITE); 513 bus_dmamap_unload(mpt->sc_dmat, req->dmap); 514 } 515 516 if (__predict_true(mpt_reply == NULL)) { 517 /* 518 * Context reply; report that the command was 519 * successful! 520 * 521 * Also report the xfer mode, if necessary. 522 */ 523 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) { 524 if ((mpt->mpt_report_xfer_mode & 525 (1 << periph->periph_target)) != 0) 526 mpt_get_xfer_mode(mpt, periph); 527 } 528 xs->error = XS_NOERROR; 529 xs->status = SCSI_OK; 530 xs->resid = 0; 531 mpt_free_request(mpt, req); 532 scsipi_done(xs); 533 return; 534 } 535 536 xs->status = mpt_reply->SCSIStatus; 537 switch (mpt_reply->IOCStatus) { 538 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 539 xs->error = XS_DRIVER_STUFFUP; 540 break; 541 542 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 543 /* 544 * Yikes! Tagged queue full comes through this path! 545 * 546 * So we'll change it to a status error and anything 547 * that returns status should probably be a status 548 * error as well. 549 */ 550 xs->resid = xs->datalen - mpt_reply->TransferCount; 551 if (mpt_reply->SCSIState & 552 MPI_SCSI_STATE_NO_SCSI_STATUS) { 553 xs->error = XS_DRIVER_STUFFUP; 554 break; 555 } 556 /* FALLTHROUGH */ 557 case MPI_IOCSTATUS_SUCCESS: 558 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 559 switch (xs->status) { 560 case SCSI_OK: 561 /* Report the xfer mode, if necessary. */ 562 if ((mpt->mpt_report_xfer_mode & 563 (1 << periph->periph_target)) != 0) 564 mpt_get_xfer_mode(mpt, periph); 565 xs->resid = 0; 566 break; 567 568 case SCSI_CHECK: 569 xs->error = XS_SENSE; 570 break; 571 572 case SCSI_BUSY: 573 case SCSI_QUEUE_FULL: 574 xs->error = XS_BUSY; 575 break; 576 577 default: 578 scsipi_printaddr(periph); 579 printf("invalid status code %d\n", xs->status); 580 xs->error = XS_DRIVER_STUFFUP; 581 break; 582 } 583 break; 584 585 case MPI_IOCSTATUS_BUSY: 586 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 587 xs->error = XS_RESOURCE_SHORTAGE; 588 break; 589 590 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 591 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 592 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 593 xs->error = XS_SELTIMEOUT; 594 break; 595 596 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 597 xs->error = XS_DRIVER_STUFFUP; 598 break; 599 600 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 601 /* XXX What should we do here? */ 602 break; 603 604 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 605 /* XXX */ 606 xs->error = XS_DRIVER_STUFFUP; 607 break; 608 609 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 610 /* XXX */ 611 xs->error = XS_DRIVER_STUFFUP; 612 break; 613 614 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 615 /* XXX This is a bus-reset */ 616 xs->error = XS_DRIVER_STUFFUP; 617 break; 618 619 default: 620 /* XXX unrecognized HBA error */ 621 xs->error = XS_DRIVER_STUFFUP; 622 break; 623 } 624 625 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { 626 memcpy(&xs->sense.scsi_sense, req->sense_vbuf, 627 sizeof(xs->sense.scsi_sense)); 628 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) { 629 /* 630 * This will cause the scsipi layer to issue 631 * a REQUEST SENSE. 632 */ 633 if (xs->status == SCSI_CHECK) 634 xs->error = XS_BUSY; 635 } 636 637 done: 638 /* If IOC done with this requeset, free it up. */ 639 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0) 640 mpt_free_request(mpt, req); 641 642 /* If address reply, give the buffer back to the IOC. */ 643 if (mpt_reply != NULL) 644 mpt_free_reply(mpt, (reply << 1)); 645 646 if (xs != NULL) 647 scsipi_done(xs); 648 } 649 650 static void 651 mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs) 652 { 653 struct scsipi_periph *periph = xs->xs_periph; 654 request_t *req; 655 MSG_SCSI_IO_REQUEST *mpt_req; 656 int error, s; 657 658 s = splbio(); 659 req = mpt_get_request(mpt); 660 if (__predict_false(req == NULL)) { 661 /* This should happen very infrequently. */ 662 xs->error = XS_RESOURCE_SHORTAGE; 663 scsipi_done(xs); 664 splx(s); 665 return; 666 } 667 splx(s); 668 669 /* Link the req and the scsipi_xfer. */ 670 req->xfer = xs; 671 672 /* Now we build the command for the IOC */ 673 mpt_req = req->req_vbuf; 674 memset(mpt_req, 0, sizeof(*mpt_req)); 675 676 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 677 mpt_req->Bus = mpt->bus; 678 679 mpt_req->SenseBufferLength = 680 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ? 681 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE; 682 683 /* 684 * We use the message context to find the request structure when 685 * we get the command completion interrupt from the IOC. 686 */ 687 mpt_req->MsgContext = req->index; 688 689 /* Which physical device to do the I/O on. */ 690 mpt_req->TargetID = periph->periph_target; 691 mpt_req->LUN[1] = periph->periph_lun; 692 693 /* Set the direction of the transfer. */ 694 if (xs->xs_control & XS_CTL_DATA_IN) 695 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 696 else if (xs->xs_control & XS_CTL_DATA_OUT) 697 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 698 else 699 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 700 701 /* Set the queue behavior. */ 702 if (__predict_true((!mpt->is_scsi) || 703 (mpt->mpt_tag_enable & 704 (1 << periph->periph_target)))) { 705 switch (XS_CTL_TAGTYPE(xs)) { 706 case XS_CTL_HEAD_TAG: 707 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 708 break; 709 710 #if 0 /* XXX */ 711 case XS_CTL_ACA_TAG: 712 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 713 break; 714 #endif 715 716 case XS_CTL_ORDERED_TAG: 717 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 718 break; 719 720 case XS_CTL_SIMPLE_TAG: 721 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 722 break; 723 724 default: 725 if (mpt->is_scsi) 726 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 727 else 728 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 729 break; 730 } 731 } else 732 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 733 734 if (__predict_false(mpt->is_scsi && 735 (mpt->mpt_disc_enable & 736 (1 << periph->periph_target)) == 0)) 737 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 738 739 /* Copy the SCSI command block into place. */ 740 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen); 741 742 mpt_req->CDBLength = xs->cmdlen; 743 mpt_req->DataLength = xs->datalen; 744 mpt_req->SenseBufferLowAddr = req->sense_pbuf; 745 746 /* 747 * Map the DMA transfer. 748 */ 749 if (xs->datalen) { 750 SGE_SIMPLE32 *se; 751 752 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data, 753 xs->datalen, NULL, 754 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT 755 : BUS_DMA_WAITOK) | 756 BUS_DMA_STREAMING | 757 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ 758 : BUS_DMA_WRITE)); 759 switch (error) { 760 case 0: 761 break; 762 763 case ENOMEM: 764 case EAGAIN: 765 xs->error = XS_RESOURCE_SHORTAGE; 766 goto out_bad; 767 768 default: 769 xs->error = XS_DRIVER_STUFFUP; 770 mpt_prt(mpt, "error %d loading DMA map", error); 771 out_bad: 772 s = splbio(); 773 mpt_free_request(mpt, req); 774 scsipi_done(xs); 775 splx(s); 776 return; 777 } 778 779 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) { 780 int seg, i, nleft = req->dmap->dm_nsegs; 781 uint32_t flags; 782 SGE_CHAIN32 *ce; 783 784 seg = 0; 785 786 mpt_req->DataLength = xs->datalen; 787 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 788 if (xs->xs_control & XS_CTL_DATA_OUT) 789 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 790 791 se = (SGE_SIMPLE32 *) &mpt_req->SGL; 792 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; 793 i++, se++, seg++) { 794 uint32_t tf; 795 796 memset(se, 0, sizeof(*se)); 797 se->Address = req->dmap->dm_segs[seg].ds_addr; 798 MPI_pSGE_SET_LENGTH(se, 799 req->dmap->dm_segs[seg].ds_len); 800 tf = flags; 801 if (i == MPT_NSGL_FIRST(mpt) - 2) 802 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 803 MPI_pSGE_SET_FLAGS(se, tf); 804 nleft--; 805 } 806 807 /* 808 * Tell the IOC where to find the first chain element. 809 */ 810 mpt_req->ChainOffset = 811 ((char *)se - (char *)mpt_req) >> 2; 812 813 /* 814 * Until we're finished with all segments... 815 */ 816 while (nleft) { 817 int ntodo; 818 819 /* 820 * Construct the chain element that points to 821 * the next segment. 822 */ 823 ce = (SGE_CHAIN32 *) se++; 824 if (nleft > MPT_NSGL(mpt)) { 825 ntodo = MPT_NSGL(mpt) - 1; 826 ce->NextChainOffset = (MPT_RQSL(mpt) - 827 sizeof(SGE_SIMPLE32)) >> 2; 828 ce->Length = MPT_NSGL(mpt) 829 * sizeof(SGE_SIMPLE32); 830 } else { 831 ntodo = nleft; 832 ce->NextChainOffset = 0; 833 ce->Length = ntodo 834 * sizeof(SGE_SIMPLE32); 835 } 836 ce->Address = req->req_pbuf + 837 ((char *)se - (char *)mpt_req); 838 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 839 for (i = 0; i < ntodo; i++, se++, seg++) { 840 uint32_t tf; 841 842 memset(se, 0, sizeof(*se)); 843 se->Address = 844 req->dmap->dm_segs[seg].ds_addr; 845 MPI_pSGE_SET_LENGTH(se, 846 req->dmap->dm_segs[seg].ds_len); 847 tf = flags; 848 if (i == ntodo - 1) { 849 tf |= 850 MPI_SGE_FLAGS_LAST_ELEMENT; 851 if (ce->NextChainOffset == 0) { 852 tf |= 853 MPI_SGE_FLAGS_END_OF_LIST | 854 MPI_SGE_FLAGS_END_OF_BUFFER; 855 } 856 } 857 MPI_pSGE_SET_FLAGS(se, tf); 858 nleft--; 859 } 860 } 861 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 862 req->dmap->dm_mapsize, 863 (xs->xs_control & XS_CTL_DATA_IN) ? 864 BUS_DMASYNC_PREREAD 865 : BUS_DMASYNC_PREWRITE); 866 } else { 867 int i; 868 uint32_t flags; 869 870 mpt_req->DataLength = xs->datalen; 871 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 872 if (xs->xs_control & XS_CTL_DATA_OUT) 873 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 874 875 /* Copy the segments into our SG list. */ 876 se = (SGE_SIMPLE32 *) &mpt_req->SGL; 877 for (i = 0; i < req->dmap->dm_nsegs; 878 i++, se++) { 879 uint32_t tf; 880 881 memset(se, 0, sizeof(*se)); 882 se->Address = req->dmap->dm_segs[i].ds_addr; 883 MPI_pSGE_SET_LENGTH(se, 884 req->dmap->dm_segs[i].ds_len); 885 tf = flags; 886 if (i == req->dmap->dm_nsegs - 1) { 887 tf |= 888 MPI_SGE_FLAGS_LAST_ELEMENT | 889 MPI_SGE_FLAGS_END_OF_BUFFER | 890 MPI_SGE_FLAGS_END_OF_LIST; 891 } 892 MPI_pSGE_SET_FLAGS(se, tf); 893 } 894 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 895 req->dmap->dm_mapsize, 896 (xs->xs_control & XS_CTL_DATA_IN) ? 897 BUS_DMASYNC_PREREAD 898 : BUS_DMASYNC_PREWRITE); 899 } 900 } else { 901 /* 902 * No data to transfer; just make a single simple SGL 903 * with zero length. 904 */ 905 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL; 906 memset(se, 0, sizeof(*se)); 907 MPI_pSGE_SET_FLAGS(se, 908 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 909 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 910 } 911 912 if (mpt->verbose > 1) 913 mpt_print_scsi_io_request(mpt_req); 914 915 s = splbio(); 916 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) 917 callout_reset(&xs->xs_callout, 918 mstohz(xs->timeout), mpt_timeout, req); 919 mpt_send_cmd(mpt, req); 920 splx(s); 921 922 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) 923 return; 924 925 /* 926 * If we can't use interrupts, poll on completion. 927 */ 928 if (mpt_poll(mpt, xs, xs->timeout)) 929 mpt_timeout(req); 930 } 931 932 static void 933 mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm) 934 { 935 fCONFIG_PAGE_SCSI_DEVICE_1 tmp; 936 937 if (!mpt->is_scsi) { 938 /* 939 * SCSI transport settings don't make any sense for 940 * Fibre Channel; silently ignore the request. 941 */ 942 return; 943 } 944 945 /* 946 * Always allow disconnect; we don't have a way to disable 947 * it right now, in any case. 948 */ 949 mpt->mpt_disc_enable |= (1 << xm->xm_target); 950 951 if (xm->xm_mode & PERIPH_CAP_TQING) 952 mpt->mpt_tag_enable |= (1 << xm->xm_target); 953 else 954 mpt->mpt_tag_enable &= ~(1 << xm->xm_target); 955 956 tmp = mpt->mpt_dev_page1[xm->xm_target]; 957 958 /* 959 * Set the wide/narrow parameter for the target. 960 */ 961 if (xm->xm_mode & PERIPH_CAP_WIDE16) 962 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 963 else 964 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 965 966 /* 967 * Set the synchronous parameters for the target. 968 * 969 * XXX If we request sync transfers, we just go ahead and 970 * XXX request the maximum available. We need finer control 971 * XXX in order to implement Domain Validation. 972 */ 973 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK | 974 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK | 975 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS | 976 MPI_SCSIDEVPAGE1_RP_IU); 977 if (xm->xm_mode & PERIPH_CAP_SYNC) { 978 int factor, offset, np; 979 980 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff; 981 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff; 982 np = 0; 983 if (factor < 0x9) { 984 /* Ultra320 */ 985 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU; 986 } 987 if (factor < 0xa) { 988 /* at least Ultra160 */ 989 np |= MPI_SCSIDEVPAGE1_RP_DT; 990 } 991 np |= (factor << 8) | (offset << 16); 992 tmp.RequestedParameters |= np; 993 } 994 995 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) { 996 mpt_prt(mpt, "unable to write Device Page 1"); 997 return; 998 } 999 1000 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) { 1001 mpt_prt(mpt, "unable to read back Device Page 1"); 1002 return; 1003 } 1004 1005 mpt->mpt_dev_page1[xm->xm_target] = tmp; 1006 if (mpt->verbose > 1) { 1007 mpt_prt(mpt, 1008 "SPI Target %d Page 1: RequestedParameters %x Config %x", 1009 xm->xm_target, 1010 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters, 1011 mpt->mpt_dev_page1[xm->xm_target].Configuration); 1012 } 1013 1014 /* 1015 * Make a note that we should perform an async callback at the 1016 * end of the next successful command completion to report the 1017 * negotiated transfer mode. 1018 */ 1019 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target); 1020 } 1021 1022 static void 1023 mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph) 1024 { 1025 fCONFIG_PAGE_SCSI_DEVICE_0 tmp; 1026 struct scsipi_xfer_mode xm; 1027 int period, offset; 1028 1029 tmp = mpt->mpt_dev_page0[periph->periph_target]; 1030 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) { 1031 mpt_prt(mpt, "unable to read Device Page 0"); 1032 return; 1033 } 1034 1035 if (mpt->verbose > 1) { 1036 mpt_prt(mpt, 1037 "SPI Tgt %d Page 0: NParms %x Information %x", 1038 periph->periph_target, 1039 tmp.NegotiatedParameters, tmp.Information); 1040 } 1041 1042 xm.xm_target = periph->periph_target; 1043 xm.xm_mode = 0; 1044 1045 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) 1046 xm.xm_mode |= PERIPH_CAP_WIDE16; 1047 1048 period = (tmp.NegotiatedParameters >> 8) & 0xff; 1049 offset = (tmp.NegotiatedParameters >> 16) & 0xff; 1050 if (offset) { 1051 xm.xm_period = period; 1052 xm.xm_offset = offset; 1053 xm.xm_mode |= PERIPH_CAP_SYNC; 1054 } 1055 1056 /* 1057 * Tagged queueing is all controlled by us; there is no 1058 * other setting to query. 1059 */ 1060 if (mpt->mpt_tag_enable & (1 << periph->periph_target)) 1061 xm.xm_mode |= PERIPH_CAP_TQING; 1062 1063 /* 1064 * We're going to deliver the async event, so clear the marker. 1065 */ 1066 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target); 1067 1068 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm); 1069 } 1070 1071 static void 1072 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply) 1073 { 1074 MSG_DEFAULT_REPLY *dmsg = vmsg; 1075 1076 switch (dmsg->Function) { 1077 case MPI_FUNCTION_EVENT_NOTIFICATION: 1078 mpt_event_notify_reply(mpt, vmsg); 1079 mpt_free_reply(mpt, (reply << 1)); 1080 break; 1081 1082 case MPI_FUNCTION_EVENT_ACK: 1083 mpt_free_reply(mpt, (reply << 1)); 1084 break; 1085 1086 case MPI_FUNCTION_PORT_ENABLE: 1087 { 1088 MSG_PORT_ENABLE_REPLY *msg = vmsg; 1089 int index = msg->MsgContext & ~0x80000000; 1090 if (mpt->verbose > 1) 1091 mpt_prt(mpt, "enable port reply index %d", index); 1092 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { 1093 request_t *req = &mpt->request_pool[index]; 1094 req->debug = REQ_DONE; 1095 } 1096 mpt_free_reply(mpt, (reply << 1)); 1097 break; 1098 } 1099 1100 case MPI_FUNCTION_CONFIG: 1101 { 1102 MSG_CONFIG_REPLY *msg = vmsg; 1103 int index = msg->MsgContext & ~0x80000000; 1104 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { 1105 request_t *req = &mpt->request_pool[index]; 1106 req->debug = REQ_DONE; 1107 req->sequence = reply; 1108 } else 1109 mpt_free_reply(mpt, (reply << 1)); 1110 break; 1111 } 1112 1113 default: 1114 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function); 1115 } 1116 } 1117 1118 static void 1119 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg) 1120 { 1121 1122 switch (msg->Event) { 1123 case MPI_EVENT_LOG_DATA: 1124 { 1125 int i; 1126 1127 /* Some error occurrerd that the Fusion wants logged. */ 1128 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo); 1129 mpt_prt(mpt, "EvtLogData: Event Data:"); 1130 for (i = 0; i < msg->EventDataLength; i++) { 1131 if ((i % 4) == 0) 1132 printf("%s:\t", mpt->sc_dev.dv_xname); 1133 printf("0x%08x%c", msg->Data[i], 1134 ((i % 4) == 3) ? '\n' : ' '); 1135 } 1136 if ((i % 4) != 0) 1137 printf("\n"); 1138 break; 1139 } 1140 1141 case MPI_EVENT_UNIT_ATTENTION: 1142 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x", 1143 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff); 1144 break; 1145 1146 case MPI_EVENT_IOC_BUS_RESET: 1147 /* We generated a bus reset. */ 1148 mpt_prt(mpt, "IOC Bus Reset Port %d", 1149 (msg->Data[0] >> 8) & 0xff); 1150 break; 1151 1152 case MPI_EVENT_EXT_BUS_RESET: 1153 /* Someone else generated a bus reset. */ 1154 mpt_prt(mpt, "External Bus Reset"); 1155 /* 1156 * These replies don't return EventData like the MPI 1157 * spec says they do. 1158 */ 1159 /* XXX Send an async event? */ 1160 break; 1161 1162 case MPI_EVENT_RESCAN: 1163 /* 1164 * In general, thise means a device has been added 1165 * to the loop. 1166 */ 1167 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff); 1168 /* XXX Send an async event? */ 1169 break; 1170 1171 case MPI_EVENT_LINK_STATUS_CHANGE: 1172 mpt_prt(mpt, "Port %d: Link state %s", 1173 (msg->Data[1] >> 8) & 0xff, 1174 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active"); 1175 break; 1176 1177 case MPI_EVENT_LOOP_STATE_CHANGE: 1178 switch ((msg->Data[0] >> 16) & 0xff) { 1179 case 0x01: 1180 mpt_prt(mpt, 1181 "Port %d: FC Link Event: LIP(%02x,%02x) " 1182 "(Loop Initialization)", 1183 (msg->Data[1] >> 8) & 0xff, 1184 (msg->Data[0] >> 8) & 0xff, 1185 (msg->Data[0] ) & 0xff); 1186 switch ((msg->Data[0] >> 8) & 0xff) { 1187 case 0xf7: 1188 if ((msg->Data[0] & 0xff) == 0xf7) 1189 mpt_prt(mpt, "\tDevice needs AL_PA"); 1190 else 1191 mpt_prt(mpt, "\tDevice %02x doesn't " 1192 "like FC performance", 1193 msg->Data[0] & 0xff); 1194 break; 1195 1196 case 0xf8: 1197 if ((msg->Data[0] & 0xff) == 0xf7) 1198 mpt_prt(mpt, "\tDevice detected loop " 1199 "failure before acquiring AL_PA"); 1200 else 1201 mpt_prt(mpt, "\tDevice %02x detected " 1202 "loop failure", 1203 msg->Data[0] & 0xff); 1204 break; 1205 1206 default: 1207 mpt_prt(mpt, "\tDevice %02x requests that " 1208 "device %02x reset itself", 1209 msg->Data[0] & 0xff, 1210 (msg->Data[0] >> 8) & 0xff); 1211 break; 1212 } 1213 break; 1214 1215 case 0x02: 1216 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) " 1217 "(Loop Port Enable)", 1218 (msg->Data[1] >> 8) & 0xff, 1219 (msg->Data[0] >> 8) & 0xff, 1220 (msg->Data[0] ) & 0xff); 1221 break; 1222 1223 case 0x03: 1224 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) " 1225 "(Loop Port Bypass)", 1226 (msg->Data[1] >> 8) & 0xff, 1227 (msg->Data[0] >> 8) & 0xff, 1228 (msg->Data[0] ) & 0xff); 1229 break; 1230 1231 default: 1232 mpt_prt(mpt, "Port %d: FC Link Event: " 1233 "Unknown event (%02x %02x %02x)", 1234 (msg->Data[1] >> 8) & 0xff, 1235 (msg->Data[0] >> 16) & 0xff, 1236 (msg->Data[0] >> 8) & 0xff, 1237 (msg->Data[0] ) & 0xff); 1238 break; 1239 } 1240 break; 1241 1242 case MPI_EVENT_LOGOUT: 1243 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x", 1244 (msg->Data[1] >> 8) & 0xff, msg->Data[0]); 1245 break; 1246 1247 case MPI_EVENT_EVENT_CHANGE: 1248 /* 1249 * This is just an acknowledgement of our 1250 * mpt_send_event_request(). 1251 */ 1252 break; 1253 1254 case MPI_EVENT_SAS_PHY_LINK_STATUS: 1255 switch ((msg->Data[0] >> 12) & 0x0f) { 1256 case 0x00: 1257 mpt_prt(mpt, "Phy %d: Link Status Unknown", 1258 msg->Data[0] & 0xff); 1259 break; 1260 case 0x01: 1261 mpt_prt(mpt, "Phy %d: Link Disabled", 1262 msg->Data[0] & 0xff); 1263 break; 1264 case 0x02: 1265 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation", 1266 msg->Data[0] & 0xff); 1267 break; 1268 case 0x03: 1269 mpt_prt(mpt, "Phy %d: SATA OOB Complete", 1270 msg->Data[0] & 0xff); 1271 break; 1272 case 0x08: 1273 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps", 1274 msg->Data[0] & 0xff); 1275 break; 1276 case 0x09: 1277 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps", 1278 msg->Data[0] & 0xff); 1279 break; 1280 default: 1281 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: " 1282 "Unknown event (%0x)", 1283 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff); 1284 } 1285 break; 1286 1287 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 1288 case MPI_EVENT_SAS_DISCOVERY: 1289 /* ignore these events for now */ 1290 break; 1291 1292 default: 1293 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event); 1294 break; 1295 } 1296 1297 if (msg->AckRequired) { 1298 MSG_EVENT_ACK *ackp; 1299 request_t *req; 1300 1301 if ((req = mpt_get_request(mpt)) == NULL) { 1302 /* XXX XXX XXX XXXJRT */ 1303 panic("mpt_event_notify_reply: unable to allocate " 1304 "request structure"); 1305 } 1306 1307 ackp = (MSG_EVENT_ACK *) req->req_vbuf; 1308 memset(ackp, 0, sizeof(*ackp)); 1309 ackp->Function = MPI_FUNCTION_EVENT_ACK; 1310 ackp->Event = msg->Event; 1311 ackp->EventContext = msg->EventContext; 1312 ackp->MsgContext = req->index | 0x80000000; 1313 mpt_check_doorbell(mpt); 1314 mpt_send_cmd(mpt, req); 1315 } 1316 } 1317 1318 /* XXXJRT mpt_bus_reset() */ 1319 1320 /***************************************************************************** 1321 * SCSI interface routines 1322 *****************************************************************************/ 1323 1324 static void 1325 mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1326 void *arg) 1327 { 1328 struct scsipi_adapter *adapt = chan->chan_adapter; 1329 mpt_softc_t *mpt = (void *) adapt->adapt_dev; 1330 1331 switch (req) { 1332 case ADAPTER_REQ_RUN_XFER: 1333 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg); 1334 return; 1335 1336 case ADAPTER_REQ_GROW_RESOURCES: 1337 /* Not supported. */ 1338 return; 1339 1340 case ADAPTER_REQ_SET_XFER_MODE: 1341 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg); 1342 return; 1343 } 1344 } 1345 1346 static void 1347 mpt_minphys(struct buf *bp) 1348 { 1349 1350 /* 1351 * Subtract one from the SGL limit, since we need an extra one to handle 1352 * an non-page-aligned transfer. 1353 */ 1354 #define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE) 1355 1356 if (bp->b_bcount > MPT_MAX_XFER) 1357 bp->b_bcount = MPT_MAX_XFER; 1358 minphys(bp); 1359 } 1360