Lines Matching defs:cm
100 struct mpr_command *cm);
103 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm);
106 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm);
126 * Added this union to smoothly convert le64toh cm->cm_desc.Words.
758 struct mpr_command *cm;
816 cm = &sc->commands[i];
817 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
1127 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm)
1132 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n",
1133 cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
1142 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
1143 ("command not busy, state = %u\n", cm->cm_state));
1144 cm->cm_state = MPR_CM_STATE_INQUEUE;
1147 rd.u.low = cm->cm_desc.Words.Low;
1151 rd.u.low = htole32(cm->cm_desc.Words.Low);
1152 rd.u.high = htole32(cm->cm_desc.Words.High);
1484 struct mpr_command *cm;
1583 cm = &sc->commands[i];
1584 cm->cm_req = sc->req_frames + i * sc->reqframesz;
1585 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz;
1586 cm->cm_sense = &sc->sense_frames[i];
1587 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN;
1588 cm->cm_desc.Default.SMID = htole16(i);
1589 cm->cm_sc = sc;
1590 cm->cm_state = MPR_CM_STATE_BUSY;
1591 TAILQ_INIT(&cm->cm_chain_list);
1592 TAILQ_INIT(&cm->cm_prp_page_list);
1593 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0);
1596 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap)
1599 mpr_free_high_priority_command(sc, cm);
1601 mpr_free_command(sc, cm);
2094 struct mpr_command *cm;
2122 cm = &sc->commands[i];
2123 if ((sc->dump_reqs_alltypes == 0) && (cm->cm_state != state))
2126 hdr.state = cm->cm_state;
2128 hdr.deschi = cm->cm_desc.Words.High;
2129 hdr.desclo = cm->cm_desc.Words.Low;
2130 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
2134 sbuf_bcat(sb, cm->cm_req, 128);
2135 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
2379 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm)
2383 if (cm == NULL) {
2388 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE,
2389 ("command not inqueue, state = %u\n", cm->cm_state));
2390 cm->cm_state = MPR_CM_STATE_BUSY;
2391 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
2392 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
2394 if (cm->cm_complete != NULL) {
2396 "%s cm %p calling cm_complete %p data %p reply %p\n",
2397 __func__, cm, cm->cm_complete, cm->cm_complete_data,
2398 cm->cm_reply);
2399 cm->cm_complete(sc, cm);
2402 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
2403 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm);
2404 wakeup(cm);
2523 struct mpr_command *cm = NULL;
2535 cm = NULL;
2555 * and cm completion handlers which decide to do a diag
2568 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
2569 cm->cm_reply = NULL;
2636 cm = &sc->commands[
2638 if (cm->cm_state == MPR_CM_STATE_INQUEUE) {
2639 cm->cm_reply = reply;
2640 cm->cm_reply_data =
2646 " ignoring state %d cm %p\n",
2647 cm->cm_state, cm);
2659 cm = NULL;
2663 if (cm != NULL) {
2665 if (cm->cm_reply)
2666 mpr_display_reply_info(sc,cm->cm_reply);
2667 mpr_complete_command(sc, cm);
2709 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm)
2713 if (cm->cm_reply)
2715 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
2717 mpr_free_command(sc, cm);
2752 struct mpr_command *cm = NULL;
2767 if ((cm = mpr_alloc_command(sc)) == NULL)
2769 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2784 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2785 cm->cm_data = NULL;
2787 error = mpr_request_polled(sc, &cm);
2788 if (cm != NULL)
2789 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
2799 if (cm != NULL)
2800 mpr_free_command(sc, cm);
2808 struct mpr_command *cm;
2823 if ((cm = mpr_alloc_command(sc)) == NULL)
2825 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2840 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2841 cm->cm_data = NULL;
2842 cm->cm_complete = mpr_reregister_events_complete;
2844 error = mpr_map_command(sc, cm);
2909 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm,
2955 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
3105 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm,
3131 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS))
3202 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge;
3227 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
3269 ptr_first_sgl = (uint32_t *)cm->cm_sge;
3379 mpr_add_chain(struct mpr_command *cm, int segsleft)
3381 struct mpr_softc *sc = cm->cm_sc;
3393 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) {
3400 if (cm->cm_sglsize < sgc_size)
3403 chain = mpr_alloc_chain(cm->cm_sc);
3411 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
3419 if (cm->cm_sglsize < (sgc_size * segsleft)) {
3430 current_segs = (cm->cm_sglsize / sgc_size) - 1;
3437 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain;
3446 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple;
3447 req = (MPI2_REQUEST_HEADER *)cm->cm_req;
3450 cm->cm_sglsize = sc->chain_frame_size;
3461 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len,
3472 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) {
3473 mpr_dprint(cm->cm_sc, MPR_ERROR,
3495 * DMA buffer (same cm command).
3500 if (cm->cm_out_len) {
3501 sge->FlagsLength = cm->cm_out_len |
3507 cm->cm_sglsize -= len;
3511 bcopy(sge, cm->cm_sge, len);
3512 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
3521 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) {
3533 cm->cm_sglsize -= len;
3537 bcopy(sge, cm->cm_sge, len);
3538 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
3548 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft)
3565 if (cm->cm_sglsize < ieee_sge_size)
3568 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) {
3569 if ((error = mpr_add_chain(cm, segsleft)) != 0)
3582 * DMA buffer (same cm command).
3587 if (cm->cm_out_len) {
3588 sge->Length = cm->cm_out_len;
3591 cm->cm_sglsize -= ieee_sge_size;
3595 bcopy(sgep, cm->cm_sge, ieee_sge_size);
3596 cm->cm_sge =
3597 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
3608 cm->cm_sglsize -= ieee_sge_size;
3612 bcopy(sgep, cm->cm_sge, ieee_sge_size);
3613 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge +
3622 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags,
3628 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) {
3634 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft));
3646 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft));
3654 struct mpr_command *cm;
3657 cm = (struct mpr_command *)arg;
3658 sc = cm->cm_sc;
3664 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
3667 cm->cm_max_segs);
3675 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) {
3697 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) {
3704 if (cm->cm_targ && cm->cm_targ->is_nvme &&
3705 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) {
3711 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) {
3714 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
3721 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED;
3728 cm->cm_state = MPR_CM_STATE_INQUEUE;
3729 mpr_complete_command(sc, cm);
3735 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
3736 mpr_enqueue_request(sc, cm);
3756 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm)
3760 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) {
3761 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
3762 &cm->cm_uio, mpr_data_cb2, cm, 0);
3763 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) {
3764 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
3765 cm->cm_data, mpr_data_cb, cm, 0);
3766 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
3767 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
3768 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0);
3771 if (cm->cm_sge != NULL)
3772 mpr_add_dmaseg(cm, 0, 0, 0, 1);
3773 mpr_enqueue_request(sc, cm);
3790 struct mpr_command *cm = *cmp;
3795 cm->cm_complete = NULL;
3796 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED);
3797 error = mpr_map_command(sc, cm);
3808 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz);
3818 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
3835 if (cm->cm_timeout_handler == NULL) {
3843 cm->cm_timeout_handler(sc, cm);
3865 struct mpr_command *cm = *cmp;
3869 cm->cm_flags |= MPR_CM_FLAGS_POLLED;
3870 cm->cm_complete = NULL;
3871 mpr_map_command(sc, cm);
3874 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) {
3894 cm->cm_state = MPR_CM_STATE_BUSY;
3920 struct mpr_command *cm;
3927 cm = mpr_alloc_command(sc);
3928 if (cm == NULL) {
3932 req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
3958 cm->cm_data = params->buffer;
3959 cm->cm_length = params->length;
3960 if (cm->cm_data != NULL) {
3961 cm->cm_sge = &req->PageBufferSGE;
3962 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
3963 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN;
3965 cm->cm_sge = NULL;
3966 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3968 cm->cm_complete_data = params;
3970 cm->cm_complete = mpr_config_complete;
3971 return (mpr_map_command(sc, cm));
3973 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP);
3977 if (cm != NULL)
3978 mpr_free_command(sc, cm);
3981 mpr_config_complete(sc, cm);
3994 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm)
4000 params = cm->cm_complete_data;
4002 if (cm->cm_data != NULL) {
4003 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
4005 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
4012 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
4017 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
4037 mpr_free_command(sc, cm);