Lines Matching +full:pci +full:- +full:host +full:- +full:cam +full:- +full:generic

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
65 #include <dev/pci/pcivar.h>
67 #include <cam/cam.h>
68 #include <cam/scsi/scsi_all.h>
121 /* Added this union to smoothly convert le64toh cm->cm_desc.Words.
135 /* Rate limit chain-fail messages to 1 per minute */
164 if (curthread->td_no_sleeping != 0)
176 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
177 msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0,
210 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
211 msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0,
281 sleep_flags = (sc->mps_flags & MPS_FLAGS_ATTACH_DONE)
308 "control of another peer host, aborting "
389 prireqcr = MAX(1, sc->max_prireqframes);
390 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit);
392 reqcr = MAX(2, sc->max_reqframes);
393 reqcr = MIN(reqcr, sc->facts->RequestCredit);
395 sc->num_reqs = prireqcr + reqcr;
396 sc->num_prireqs = prireqcr;
397 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes,
398 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
401 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4;
405 * ((SGEs per frame - 1 for chain element) * Max Chain Depth)
412 sges_per_frame = sc->reqframesz / sizeof(MPI2_SGE_SIMPLE64) - 1;
413 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE;
416 * If I/O size limitation requested, then use it and pass up to CAM.
419 if (sc->max_io_pages > 0) {
420 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE);
421 sc->maxio = maxio;
423 sc->maxio = maxio;
427 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) /
429 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains)
430 sc->num_chains = sc->max_chains;
433 * Figure out the number of MSIx-based queues. If the firmware or
435 * the queues to be useful then don't enable multi-queue.
437 if (sc->facts->MaxMSIxVectors < 2)
438 sc->msi_msgs = 1;
440 if (sc->msi_msgs > 1) {
441 sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus);
442 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors);
443 if (sc->num_reqs / sc->msi_msgs < 2)
444 sc->msi_msgs = 1;
448 sc->msi_msgs, sc->num_reqs, sc->num_replies);
452 * This is called during attach and when re-initializing due to a Diag Reset.
454 * If called from attach, de-allocation is not required because the driver has
456 * allocated structures based on IOC Facts will need to be freed and re-
470 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY));
475 * a re-initialization and only return the error if attaching so the OS
478 if ((error = mps_get_iocfacts(sc, sc->facts)) != 0) {
489 MPS_DPRINT_PAGE(sc, MPS_XINFO, iocfacts, sc->facts);
491 snprintf(sc->fw_version, sizeof(sc->fw_version),
493 sc->facts->FWVersion.Struct.Major,
494 sc->facts->FWVersion.Struct.Minor,
495 sc->facts->FWVersion.Struct.Unit,
496 sc->facts->FWVersion.Struct.Dev);
498 snprintf(sc->msg_version, sizeof(sc->msg_version), "%d.%d",
499 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK) >>
501 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MINOR_MASK) >>
504 mps_dprint(sc, MPS_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version,
507 sc->facts->IOCCapabilities,
519 if (attaching && ((sc->facts->IOCCapabilities &
536 saved_mode = sc->ir_firmware;
537 if (sc->facts->IOCCapabilities &
539 sc->ir_firmware = 1;
541 if (sc->ir_firmware != saved_mode) {
549 sc->mps_flags &= ~MPS_FLAGS_REALLOCATED;
552 ((saved_facts.MsgVersion != sc->facts->MsgVersion) ||
553 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) ||
554 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) ||
555 (saved_facts.RequestCredit != sc->facts->RequestCredit) ||
556 (saved_facts.ProductID != sc->facts->ProductID) ||
557 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) ||
559 sc->facts->IOCRequestFrameSize) ||
560 (saved_facts.MaxTargets != sc->facts->MaxTargets) ||
561 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) ||
562 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) ||
563 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) ||
565 sc->facts->MaxReplyDescriptorPostQueueDepth) ||
566 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) ||
567 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) ||
569 sc->facts->MaxPersistentEntries))) {
573 sc->mps_flags |= MPS_FLAGS_REALLOCATED;
577 * Some things should be done if attaching or re-allocating after a Diag
586 if (sc->facts->IOCCapabilities &
588 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].
590 if (sc->facts->IOCCapabilities &
592 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].
594 if (sc->facts->IOCCapabilities &
596 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].
602 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
603 sc->eedp_enabled = TRUE;
604 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
605 sc->control_TLR = TRUE;
612 TAILQ_INIT(&sc->req_list);
613 TAILQ_INIT(&sc->high_priority_req_list);
614 TAILQ_INIT(&sc->chain_list);
615 TAILQ_INIT(&sc->tm_list);
657 bzero(sc->free_queue, sc->fqdepth * 4);
683 sc->replypostindex = 0;
684 mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
710 * XXX If the number of MSI-X vectors changes during re-init, this
724 sc->WD_available = FALSE;
725 if (pci_get_device(sc->mps_dev) == MPI2_MFGPAGE_DEVID_SSS6200)
726 sc->WD_available = TRUE;
743 if (sc->free_busaddr != 0)
744 bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
745 if (sc->free_queue != NULL)
746 bus_dmamem_free(sc->queues_dmat, sc->free_queue,
747 sc->queues_map);
748 if (sc->queues_dmat != NULL)
749 bus_dma_tag_destroy(sc->queues_dmat);
751 if (sc->chain_frames != NULL) {
752 bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
753 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
754 sc->chain_map);
756 if (sc->chain_dmat != NULL)
757 bus_dma_tag_destroy(sc->chain_dmat);
759 if (sc->sense_busaddr != 0)
760 bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
761 if (sc->sense_frames != NULL)
762 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,
763 sc->sense_map);
764 if (sc->sense_dmat != NULL)
765 bus_dma_tag_destroy(sc->sense_dmat);
767 if (sc->reply_busaddr != 0)
768 bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
769 if (sc->reply_frames != NULL)
770 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,
771 sc->reply_map);
772 if (sc->reply_dmat != NULL)
773 bus_dma_tag_destroy(sc->reply_dmat);
775 if (sc->req_busaddr != 0)
776 bus_dmamap_unload(sc->req_dmat, sc->req_map);
777 if (sc->req_frames != NULL)
778 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
779 if (sc->req_dmat != NULL)
780 bus_dma_tag_destroy(sc->req_dmat);
782 if (sc->chains != NULL)
783 free(sc->chains, M_MPT2);
784 if (sc->commands != NULL) {
785 for (i = 1; i < sc->num_reqs; i++) {
786 cm = &sc->commands[i];
787 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
789 free(sc->commands, M_MPT2);
791 if (sc->buffer_dmat != NULL)
792 bus_dma_tag_destroy(sc->buffer_dmat);
795 free(sc->queues, M_MPT2);
796 sc->queues = NULL;
813 sassc = sc->sassc;
817 mtx_assert(&sc->mps_mtx, MA_OWNED);
820 if (sc->mps_flags & MPS_FLAGS_DIAGRESET) {
828 sc->mps_flags |= MPS_FLAGS_DIAGRESET;
843 /* Restore the PCI state, including the MSI-X registers */
861 * Mapping structures will be re-allocated after getting IOC Page8, so
874 sc->mps_flags &= ~MPS_FLAGS_DIAGRESET;
878 * Some mapping info is based in IOC Page8 data, so re-initialize the
891 sc, sc->replypostindex, sc->replyfreeindex);
934 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
935 msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0,
942 } while (--cntdn);
976 if (curthread->td_no_sleeping != 0)
1008 /* Clock out the message data synchronously in 32-bit dwords*/
1020 /* Clock in the reply in 16-bit words. The total length of the
1041 ioc_sz = reply->MsgLength;
1051 residual = ioc_sz * 2 - count;
1072 while (residual--) {
1100 cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
1102 if (sc->mps_flags & MPS_FLAGS_ATTACH_DONE && !(sc->mps_flags & MPS_FLAGS_SHUTDOWN))
1103 mtx_assert(&sc->mps_mtx, MA_OWNED);
1105 if (++sc->io_cmds_active > sc->io_cmds_highwater)
1106 sc->io_cmds_highwater++;
1107 rd.u.low = cm->cm_desc.Words.Low;
1108 rd.u.high = cm->cm_desc.Words.High;
1111 KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
1112 ("command not busy, state = %u\n", cm->cm_state));
1113 cm->cm_state = MPS_CM_STATE_INQUEUE;
1115 /* TODO-We may need to make below regwrite atomic */
1171 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0)
1172 || (sc->replyframesz == 0)) {
1185 * deliberately in the lower 32bits of memory. This is a micro-
1186 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
1192 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4));
1193 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth);
1194 init.ReplyFreeQueueDepth = htole16(sc->fqdepth);
1198 init.SystemRequestFrameBaseAddress.Low = htole32((uint32_t)sc->req_busaddr);
1200 init.ReplyDescriptorPostQueueAddress.Low = htole32((uint32_t)sc->post_busaddr);
1202 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr);
1247 mps_lock(ctx->softc);
1248 ctx->error = error;
1249 ctx->completed = 1;
1250 if ((error == 0) && (ctx->abandoned == 0)) {
1251 *ctx->addr = segs[0].ds_addr;
1255 if (ctx->abandoned != 0)
1261 mps_unlock(ctx->softc);
1264 bus_dmamap_unload(ctx->buffer_dmat,
1265 ctx->buffer_dmamap);
1266 *ctx->addr = 0;
1279 nq = sc->msi_msgs;
1282 sc->queues = malloc(sizeof(struct mps_queue) * nq, M_MPT2,
1284 if (sc->queues == NULL)
1288 q = &sc->queues[i];
1290 q->sc = sc;
1291 q->qnum = i;
1312 * contains filled-in reply frames sent from the firmware to the host.
1316 sc->fqdepth = roundup2(sc->num_replies + 1, 16);
1317 sc->pqdepth = roundup2(sc->num_replies + 1, 16);
1318 fqsize= sc->fqdepth * 4;
1319 pqsize = sc->pqdepth * 8;
1322 bus_dma_template_init(&t, sc->mps_parent_dmat);
1326 if (bus_dma_template_tag(&t, &sc->queues_dmat)) {
1330 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
1331 &sc->queues_map)) {
1336 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
1339 sc->free_queue = (uint32_t *)queues;
1340 sc->free_busaddr = queues_busaddr;
1341 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
1342 sc->post_busaddr = queues_busaddr + fqsize;
1344 (uintmax_t)sc->free_busaddr, fqsize);
1346 (uintmax_t)sc->post_busaddr, pqsize);
1358 sc->replyframesz = sc->facts->ReplyFrameSize * 4;
1361 * sc->num_replies should be one less than sc->fqdepth. We need to
1362 * allocate space for sc->fqdepth replies, but only sc->num_replies
1365 num_replies = max(sc->fqdepth, sc->num_replies);
1367 rsize = sc->replyframesz * num_replies;
1368 bus_dma_template_init(&t, sc->mps_parent_dmat);
1372 if (bus_dma_template_tag(&t, &sc->reply_dmat)) {
1376 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
1377 BUS_DMA_NOWAIT, &sc->reply_map)) {
1381 bzero(sc->reply_frames, rsize);
1382 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
1383 mps_memaddr_cb, &sc->reply_busaddr, 0);
1386 (uintmax_t)sc->reply_busaddr, rsize);
1403 KASSERT(segs[s].ds_addr + segs[s].ds_len - 1 <= BUS_SPACE_MAXADDR_32BIT,
1406 for (bo = 0; bo + sc->reqframesz <= segs[s].ds_len;
1407 bo += sc->reqframesz) {
1408 chain = &sc->chains[i++];
1409 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o);
1410 chain->chain_busaddr = segs[s].ds_addr + bo;
1411 o += sc->reqframesz;
1415 o += segs[s].ds_len - bo;
1417 sc->chain_free_lowwater = i;
1427 rsize = sc->reqframesz * sc->num_reqs;
1428 bus_dma_template_init(&t, sc->mps_parent_dmat);
1432 if (bus_dma_template_tag(&t, &sc->req_dmat)) {
1436 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
1437 BUS_DMA_NOWAIT, &sc->req_map)) {
1441 bzero(sc->req_frames, rsize);
1442 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
1443 mps_memaddr_cb, &sc->req_busaddr, 0);
1445 (uintmax_t)sc->req_busaddr, rsize);
1447 sc->chains = malloc(sizeof(struct mps_chain) * sc->num_chains, M_MPT2,
1449 if (!sc->chains) {
1453 rsize = sc->reqframesz * sc->num_chains;
1454 bus_dma_template_clone(&t, sc->req_dmat);
1458 if (bus_dma_template_tag(&t, &sc->chain_dmat)) {
1462 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
1463 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) {
1467 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames,
1470 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
1471 sc->chain_map);
1475 rsize = MPS_SENSE_LEN * sc->num_reqs;
1476 bus_dma_template_clone(&t, sc->req_dmat);
1479 if (bus_dma_template_tag(&t, &sc->sense_dmat)) {
1483 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
1484 BUS_DMA_NOWAIT, &sc->sense_map)) {
1488 bzero(sc->sense_frames, rsize);
1489 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
1490 mps_memaddr_cb, &sc->sense_busaddr, 0);
1492 (uintmax_t)sc->sense_busaddr, rsize);
1494 nsegs = (sc->maxio / PAGE_SIZE) + 1;
1495 bus_dma_template_init(&t, sc->mps_parent_dmat);
1499 BD_LOCKFUNCARG(&sc->mps_mtx),
1501 if (bus_dma_template_tag(&t, &sc->buffer_dmat)) {
1510 sc->commands = malloc(sizeof(struct mps_command) * sc->num_reqs,
1512 for (i = 1; i < sc->num_reqs; i++) {
1513 cm = &sc->commands[i];
1514 cm->cm_req = sc->req_frames + i * sc->reqframesz;
1515 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz;
1516 cm->cm_sense = &sc->sense_frames[i];
1517 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN;
1518 cm->cm_desc.Default.SMID = i;
1519 cm->cm_sc = sc;
1520 cm->cm_state = MPS_CM_STATE_BUSY;
1521 TAILQ_INIT(&cm->cm_chain_list);
1522 callout_init_mtx(&cm->cm_callout, &sc->mps_mtx, 0);
1525 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0)
1526 if (i <= sc->num_prireqs)
1532 sc->num_reqs = i;
1545 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
1549 * have space for on the queue. So sc->num_replies (the number we
1550 * use) should be less than sc->fqdepth (allocated size).
1552 if (sc->num_replies >= sc->fqdepth)
1558 for (i = 0; i < sc->fqdepth; i++)
1559 sc->free_queue[i] = sc->reply_busaddr + (i * sc->replyframesz);
1560 sc->replyfreeindex = sc->num_replies;
1566 * Next are the global settings, if they exist. Highest are the per-unit
1575 sc->mps_debug = MPS_INFO|MPS_FAULT;
1576 sc->disable_msix = 0;
1577 sc->disable_msi = 0;
1578 sc->max_msix = MPS_MSIX_MAX;
1579 sc->max_chains = MPS_CHAIN_FRAMES;
1580 sc->max_io_pages = MPS_MAXIO_PAGES;
1581 sc->enable_ssu = MPS_SSU_ENABLE_SSD_DISABLE_HDD;
1582 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
1583 sc->use_phynum = 1;
1584 sc->max_reqframes = MPS_REQ_FRAMES;
1585 sc->max_prireqframes = MPS_PRI_REQ_FRAMES;
1586 sc->max_replyframes = MPS_REPLY_FRAMES;
1587 sc->max_evtframes = MPS_EVT_REPLY_FRAMES;
1595 TUNABLE_INT_FETCH("hw.mps.disable_msix", &sc->disable_msix);
1596 TUNABLE_INT_FETCH("hw.mps.disable_msi", &sc->disable_msi);
1597 TUNABLE_INT_FETCH("hw.mps.max_msix", &sc->max_msix);
1598 TUNABLE_INT_FETCH("hw.mps.max_chains", &sc->max_chains);
1599 TUNABLE_INT_FETCH("hw.mps.max_io_pages", &sc->max_io_pages);
1600 TUNABLE_INT_FETCH("hw.mps.enable_ssu", &sc->enable_ssu);
1601 TUNABLE_INT_FETCH("hw.mps.spinup_wait_time", &sc->spinup_wait_time);
1602 TUNABLE_INT_FETCH("hw.mps.use_phy_num", &sc->use_phynum);
1603 TUNABLE_INT_FETCH("hw.mps.max_reqframes", &sc->max_reqframes);
1604 TUNABLE_INT_FETCH("hw.mps.max_prireqframes", &sc->max_prireqframes);
1605 TUNABLE_INT_FETCH("hw.mps.max_replyframes", &sc->max_replyframes);
1606 TUNABLE_INT_FETCH("hw.mps.max_evtframes", &sc->max_evtframes);
1608 /* Grab the unit-instance variables */
1610 device_get_unit(sc->mps_dev));
1616 device_get_unit(sc->mps_dev));
1617 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix);
1620 device_get_unit(sc->mps_dev));
1621 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi);
1624 device_get_unit(sc->mps_dev));
1625 TUNABLE_INT_FETCH(tmpstr, &sc->max_msix);
1628 device_get_unit(sc->mps_dev));
1629 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
1632 device_get_unit(sc->mps_dev));
1633 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages);
1635 bzero(sc->exclude_ids, sizeof(sc->exclude_ids));
1637 device_get_unit(sc->mps_dev));
1638 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids));
1641 device_get_unit(sc->mps_dev));
1642 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu);
1645 device_get_unit(sc->mps_dev));
1646 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time);
1649 device_get_unit(sc->mps_dev));
1650 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
1653 device_get_unit(sc->mps_dev));
1654 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
1657 device_get_unit(sc->mps_dev));
1658 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes);
1661 device_get_unit(sc->mps_dev));
1662 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes);
1665 device_get_unit(sc->mps_dev));
1666 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes);
1682 device_get_unit(sc->mps_dev));
1683 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev));
1685 sysctl_ctx = device_get_sysctl_ctx(sc->mps_dev);
1687 sysctl_tree = device_get_sysctl_tree(sc->mps_dev);
1690 sysctl_ctx_init(&sc->sysctl_ctx);
1691 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1694 if (sc->sysctl_tree == NULL)
1696 sysctl_ctx = &sc->sysctl_ctx;
1697 sysctl_tree = sc->sysctl_tree;
1705 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0,
1706 "Disable the use of MSI-X interrupts");
1709 OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0,
1713 OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0,
1714 "User-defined maximum number of MSIX queues");
1717 OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0,
1721 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0,
1725 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0,
1729 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0,
1733 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0,
1737 OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version,
1738 strlen(sc->fw_version), "firmware version");
1745 OID_AUTO, "msg_version", CTLFLAG_RD, sc->msg_version,
1746 strlen(sc->msg_version), "message interface version (deprecated)");
1750 &sc->io_cmds_active, 0, "number of currently active commands");
1754 &sc->io_cmds_highwater, 0, "maximum active commands seen");
1758 &sc->chain_free, 0, "number of free chain elements");
1762 &sc->chain_free_lowwater, 0,"lowest number of free chain elements");
1766 &sc->max_chains, 0,"maximum chain frames that will be allocated");
1770 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use "
1774 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0,
1779 &sc->chain_alloc_fail, "chain allocation failures");
1783 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for "
1803 &sc->dump_reqs_alltypes, 0,
1807 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0,
1851 debug = sc->mps_debug;
1858 if (debug & string->flag)
1859 sbuf_printf(sbuf, ",%s", string->name);
1865 if (error || req->newptr == NULL)
1868 len = req->newlen - req->newidx;
1896 } else if (*list == '-') {
1915 if (strcasecmp(token, string->name) == 0) {
1916 flags |= string->flag;
1924 sc->mps_debug = flags;
1927 sc->mps_debug |= flags;
1930 sc->mps_debug &= (~flags);
1965 numreqs = sc->num_reqs;
1967 if (req->newptr != NULL)
1970 if (smid == 0 || smid > sc->num_reqs)
1972 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs))
1973 numreqs = sc->num_reqs;
1978 cm = &sc->commands[i];
1979 if ((sc->dump_reqs_alltypes == 0) && (cm->cm_state != state))
1982 hdr.state = cm->cm_state;
1984 hdr.deschi = cm->cm_desc.Words.High;
1985 hdr.desclo = cm->cm_desc.Words.Low;
1986 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
1990 sbuf_bcat(sb, cm->cm_req, 128);
1991 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
1993 sbuf_bcat(sb, chain->chain, 128);
2009 mtx_init(&sc->mps_mtx, "MPT2SAS lock", NULL, MTX_DEF);
2010 callout_init_mtx(&sc->periodic, &sc->mps_mtx, 0);
2011 callout_init_mtx(&sc->device_check_callout, &sc->mps_mtx, 0);
2012 TAILQ_INIT(&sc->event_list);
2013 timevalclear(&sc->lastfail);
2021 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2,
2023 if(!sc->facts) {
2031 * A Diag Reset will also call mps_iocfacts_allocate and re-read the IOC
2047 * rest of the initialization process. The CAM/SAS module will
2050 sc->mps_ich.ich_func = mps_startup;
2051 sc->mps_ich.ich_arg = sc;
2052 if (config_intrhook_establish(&sc->mps_ich) != 0) {
2061 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
2064 if (sc->shutdown_eh == NULL)
2070 sc->mps_flags |= MPS_FLAGS_ATTACH_DONE;
2076 /* Run through any late-start handlers. */
2095 config_intrhook_disestablish(&sc->mps_ich);
2096 sc->mps_ich.ich_arg = NULL;
2109 if (sc->mps_flags & MPS_FLAGS_SHUTDOWN)
2118 callout_reset_sbt(&sc->periodic, MPS_PERIODIC_DELAY * SBT_1S, 0,
2128 MPS_DPRINT_EVENT(sc, generic, event);
2130 switch (event->Event) {
2133 if (sc->mps_debug & MPS_EVENT)
2134 hexdump(event->EventData, event->EventDataLength, NULL, 0);
2137 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
2139 "0x%x Sequence %d:\n", entry->LogEntryQualifier,
2140 entry->LogSequence);
2158 &sc->mps_log_eh);
2167 if (sc->mps_log_eh != NULL)
2168 mps_deregister_events(sc, sc->mps_log_eh);
2184 sc->mps_flags |= MPS_FLAGS_SHUTDOWN;
2187 callout_drain(&sc->periodic);
2188 callout_drain(&sc->device_check_callout);
2207 if (sc->facts != NULL)
2208 free(sc->facts, M_MPT2);
2216 if (sc->sysctl_tree != NULL)
2217 sysctl_ctx_free(&sc->sysctl_ctx);
2220 if (sc->shutdown_eh != NULL)
2221 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
2223 mtx_destroy(&sc->mps_mtx);
2239 KASSERT(cm->cm_state == MPS_CM_STATE_INQUEUE,
2240 ("command not inqueue, state = %u\n", cm->cm_state));
2241 cm->cm_state = MPS_CM_STATE_BUSY;
2242 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
2243 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
2245 if (cm->cm_complete != NULL) {
2248 __func__, cm, cm->cm_complete, cm->cm_complete_data,
2249 cm->cm_reply);
2250 cm->cm_complete(sc, cm);
2253 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
2258 if (cm->cm_sc->io_cmds_active != 0) {
2259 cm->cm_sc->io_cmds_active--;
2262 "out of sync - resynching to 0\n");
2319 sc_status = le16toh(mpi_reply->IOCStatus);
2321 mps_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo));
2334 * needed for both INTx interrupts and driver-driven polling
2380 pq = sc->replypostindex;
2383 __func__, sc, sc->replypostindex);
2387 desc = &sc->post_queue[sc->replypostindex];
2399 flags = desc->Default.ReplyFlags &
2402 || (le32toh(desc->Words.High) == 0xffffffff))
2412 if (++sc->replypostindex >= sc->pqdepth)
2413 sc->replypostindex = 0;
2417 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
2418 cm->cm_reply = NULL;
2426 * Re-compose the reply address from the address
2430 * host format, and then use that to provide the
2432 * (sc->reply_frames).
2434 baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
2435 reply = sc->reply_frames +
2436 (baddr - ((uint32_t)sc->reply_busaddr));
2443 if ((reply < sc->reply_frames)
2444 || (reply > (sc->reply_frames +
2445 (sc->fqdepth * sc->replyframesz)))) {
2450 sc->reply_frames, sc->fqdepth,
2451 sc->replyframesz);
2453 /* LSI-TODO. See Linux Code for Graceful exit */
2456 if (le16toh(desc->AddressReply.SMID) == 0) {
2457 if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
2468 if ((le16toh(rel_rep->IOCStatus) &
2473 &sc->fw_diag_buffer_list[
2474 rel_rep->BufferType];
2475 pBuffer->valid_data = TRUE;
2476 pBuffer->owned_by_firmware =
2478 pBuffer->immediate = FALSE;
2490 cm = &sc->commands[
2491 le16toh(desc->AddressReply.SMID)];
2492 if (cm->cm_state == MPS_CM_STATE_INQUEUE) {
2493 cm->cm_reply = reply;
2494 cm->cm_reply_data = le32toh(
2495 desc->AddressReply.ReplyFrameAddress);
2500 cm->cm_state, cm);
2511 desc->Default.ReplyFlags);
2519 if (cm->cm_reply)
2520 mps_display_reply_info(sc,cm->cm_reply);
2525 if (pq != sc->replypostindex) {
2527 __func__, sc, sc->replypostindex);
2529 sc->replypostindex);
2542 event = le16toh(reply->Event);
2543 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2544 if (isset(eh->mask, event)) {
2545 eh->callback(sc, data, reply);
2566 if (cm->cm_reply)
2567 MPS_DPRINT_EVENT(sc, generic,
2568 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
2589 eh->callback = cb;
2590 eh->data = data;
2591 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
2611 bcopy(mask, &handle->mask[0], sizeof(u32) *
2615 sc->event_mask[i] = -1;
2618 sc->event_mask[i] &= ~handle->mask[i];
2622 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2623 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2624 evtreq->MsgFlags = 0;
2625 evtreq->SASBroadcastPrimitiveMasks = 0;
2630 bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) *
2635 evtreq->EventMasks[i] =
2636 htole32(sc->event_mask[i]);
2638 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2639 cm->cm_data = NULL;
2643 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
2645 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
2649 MPS_DPRINT_EVENT(sc, generic, reply);
2671 sc->event_mask[i] = -1;
2673 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2675 sc->event_mask[i] &= ~eh->mask[i];
2680 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2681 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2682 evtreq->MsgFlags = 0;
2683 evtreq->SASBroadcastPrimitiveMasks = 0;
2688 bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) *
2693 evtreq->EventMasks[i] =
2694 htole32(sc->event_mask[i]);
2696 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2697 cm->cm_data = NULL;
2698 cm->cm_complete = mps_reregister_events_complete;
2711 TAILQ_REMOVE(&sc->event_list, handle, eh_list);
2726 if (cm->cm_sglsize < MPS_SGC_SIZE)
2729 chain = mps_alloc_chain(cm->cm_sc);
2733 space = cm->cm_sc->reqframesz;
2736 * Note: a double-linked list is used to make it easier to
2739 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
2741 sgc = (MPI2_SGE_CHAIN64 *)&cm->cm_sge->MpiChain;
2742 sgc->Length = htole16(space);
2743 sgc->NextChainOffset = 0;
2744 /* TODO Looks like bug in Setting sgc->Flags.
2745 * sgc->Flags = ( MPI2_SGE_FLAGS_CHAIN_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
2750 sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
2751 sgc->Address.High = htole32(chain->chain_busaddr >> 32);
2752 sgc->Address.Low = htole32(chain->chain_busaddr);
2754 cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple;
2755 cm->cm_sglsize = space;
2760 * Add one scatter-gather element (chain, simple, transaction context)
2761 * to the scatter-gather list for a command. Maintain cm_sglsize and
2773 type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK);
2778 if (len != tc->DetailsLength + 4)
2780 tc->DetailsLength + 4, len);
2784 /* Driver only uses 64-bit chain elements */
2790 /* Driver only uses 64-bit SGE simple elements */
2794 if (((le32toh(sge->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT) &
2796 panic("SGE simple %p not marked 64-bit?", sge);
2800 panic("Unexpected SGE %p, flags %02x", tc, tc->Flags);
2816 if (cm->cm_sglsize < MPS_SGC_SIZE)
2819 if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) {
2831 cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) {
2839 sge->FlagsLength |= htole32(
2848 cm->cm_sglsize -= len;
2849 bcopy(sgep, cm->cm_sge, len);
2850 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
2856 if (segsleft == 1 && cm->cm_sglsize < len)
2858 cm->cm_sglsize, len);
2861 if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE)
2863 cm->cm_sglsize, len);
2868 * If this is a bi-directional request, need to account for that
2869 * here. Save the pre-filled sge values. These will be used
2871 * cm_out_len is non-zero, this is a bi-directional request, so
2874 * 2 SGL's for a bi-directional request, they both use the same
2877 saved_buf_len = le32toh(sge->FlagsLength) & 0x00FFFFFF;
2878 saved_address_low = sge->Address.Low;
2879 saved_address_high = sge->Address.High;
2880 if (cm->cm_out_len) {
2881 sge->FlagsLength = htole32(cm->cm_out_len |
2887 cm->cm_sglsize -= len;
2888 bcopy(sgep, cm->cm_sge, len);
2889 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge
2899 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) {
2908 sge->FlagsLength = htole32(saved_buf_len);
2909 sge->Address.Low = saved_address_low;
2910 sge->Address.High = saved_address_high;
2913 cm->cm_sglsize -= len;
2914 bcopy(sgep, cm->cm_sge, len);
2915 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
2920 * Add one dma segment to the scatter-gather list for a command.
2929 * This driver always uses 64-bit address elements for simplicity.
2948 sc = cm->cm_sc;
2954 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
2958 cm->cm_max_segs);
2962 * Set up DMA direction flags. Bi-directional requests are also handled
2966 if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) {
2988 } else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) {
2995 if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) && (i != 0)) {
2999 sflags, nsegs - i);
3002 if (ratecheck(&sc->lastfail, &mps_chainfail_interval))
3005 cm->cm_flags |= MPS_CM_FLAGS_CHAIN_FAILED;
3012 cm->cm_state = MPS_CM_STATE_INQUEUE;
3018 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
3035 * assumed that if you have a command in-hand, then you have enough credits
3043 if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) {
3044 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
3045 &cm->cm_uio, mps_data_cb2, cm, 0);
3046 } else if (cm->cm_flags & MPS_CM_FLAGS_USE_CCB) {
3047 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
3048 cm->cm_data, mps_data_cb, cm, 0);
3049 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
3050 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
3051 cm->cm_data, cm->cm_length, mps_data_cb, cm, 0);
3053 /* Add a zero-length element as needed */
3054 if (cm->cm_sge != NULL)
3075 if (sc->mps_flags & MPS_FLAGS_DIAGRESET)
3078 cm->cm_complete = NULL;
3079 cm->cm_flags |= MPS_CM_FLAGS_POLLED;
3089 if (curthread->td_no_sleeping != 0)
3092 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP) {
3093 cm->cm_flags |= MPS_CM_FLAGS_WAKEUP;
3094 error = msleep(cm, &sc->mps_mtx, 0, "mpswait", timeout*hz);
3104 while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) {
3121 if (cm->cm_timeout_handler == NULL) {
3129 cm->cm_timeout_handler(sc, cm);
3130 if (sc->mps_flags & MPS_FLAGS_REALLOCATED) {
3153 if (sc->mps_flags & MPS_FLAGS_BUSY) {
3162 req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
3163 req->Function = MPI2_FUNCTION_CONFIG;
3164 req->Action = params->action;
3165 req->SGLFlags = 0;
3166 req->ChainOffset = 0;
3167 req->PageAddress = params->page_address;
3168 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
3171 hdr = &params->hdr.Ext;
3172 req->ExtPageType = hdr->ExtPageType;
3173 req->ExtPageLength = hdr->ExtPageLength;
3174 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
3175 req->Header.PageLength = 0; /* Must be set to zero */
3176 req->Header.PageNumber = hdr->PageNumber;
3177 req->Header.PageVersion = hdr->PageVersion;
3181 hdr = &params->hdr.Struct;
3182 req->Header.PageType = hdr->PageType;
3183 req->Header.PageNumber = hdr->PageNumber;
3184 req->Header.PageLength = hdr->PageLength;
3185 req->Header.PageVersion = hdr->PageVersion;
3188 cm->cm_data = params->buffer;
3189 cm->cm_length = params->length;
3190 if (cm->cm_data != NULL) {
3191 cm->cm_sge = &req->PageBufferSGE;
3192 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
3193 cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
3195 cm->cm_sge = NULL;
3196 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3198 cm->cm_complete_data = params;
3199 if (params->callback != NULL) {
3200 cm->cm_complete = mps_config_complete;
3230 params = cm->cm_complete_data;
3232 if (cm->cm_data != NULL) {
3233 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3235 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3242 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3243 params->status = MPI2_IOCSTATUS_BUSY;
3247 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
3249 params->status = MPI2_IOCSTATUS_BUSY;
3252 params->status = reply->IOCStatus;
3253 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
3254 params->hdr.Ext.ExtPageType = reply->ExtPageType;
3255 params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
3256 params->hdr.Ext.PageType = reply->Header.PageType;
3257 params->hdr.Ext.PageNumber = reply->Header.PageNumber;
3258 params->hdr.Ext.PageVersion = reply->Header.PageVersion;
3260 params->hdr.Struct.PageType = reply->Header.PageType;
3261 params->hdr.Struct.PageNumber = reply->Header.PageNumber;
3262 params->hdr.Struct.PageLength = reply->Header.PageLength;
3263 params->hdr.Struct.PageVersion = reply->Header.PageVersion;
3268 if (params->callback != NULL)
3269 params->callback(sc, params);