Lines Matching +full:adc +full:- +full:chan
1 /*-
113 io->tgt_io.state = OCS_CAM_IO_FREE;
114 io->tgt_io.flags = 0;
115 io->tgt_io.app = NULL;
117 if(io->ocs->io_in_use != 0)
118 atomic_subtract_acq_32(&io->ocs->io_in_use, 1);
122 ocs_attach_port(ocs_t *ocs, int chan)
128 ocs_fcport *fcp = FCPORT(ocs, chan);
131 device_get_name(ocs->dev), ocs,
132 device_get_unit(ocs->dev), &ocs->sim_lock,
133 max_io, max_io, ocs->devq))) {
134 device_printf(ocs->dev, "Can't allocate SIM\n");
138 mtx_lock(&ocs->sim_lock);
139 if (CAM_SUCCESS != xpt_bus_register(sim, ocs->dev, chan)) {
140 device_printf(ocs->dev, "Can't register bus %d\n", 0);
141 mtx_unlock(&ocs->sim_lock);
145 mtx_unlock(&ocs->sim_lock);
149 device_printf(ocs->dev, "Can't create path\n");
151 mtx_unlock(&ocs->sim_lock);
156 fcp->ocs = ocs;
157 fcp->sim = sim;
158 fcp->path = path;
160 callout_init_mtx(&fcp->ldt, &ocs->sim_lock, 0);
161 TASK_INIT(&fcp->ltask, 1, ocs_ldt_task, fcp);
167 ocs_detach_port(ocs_t *ocs, int32_t chan)
172 fcp = FCPORT(ocs, chan);
174 sim = fcp->sim;
175 path = fcp->path;
177 callout_drain(&fcp->ldt);
180 if (fcp->sim) {
181 mtx_lock(&ocs->sim_lock);
182 ocs_tgt_resource_abort(ocs, &fcp->targ_rsrc_wildcard);
186 fcp->path = NULL;
191 fcp->sim = NULL;
192 mtx_unlock(&ocs->sim_lock);
206 device_printf(ocs->dev, "Can't allocate SIMQ\n");
207 return -1;
210 ocs->devq = devq;
212 if (mtx_initialized(&ocs->sim_lock) == 0) {
213 mtx_init(&ocs->sim_lock, "ocs_sim_lock", NULL, MTX_DEF);
216 for (i = 0; i < (ocs->num_vports + 1); i++) {
218 ocs_log_err(ocs, "Attach port failed for chan: %d\n", i);
223 ocs->io_high_watermark = max_io;
224 ocs->io_in_use = 0;
228 while (--i >= 0) {
232 cam_simq_free(ocs->devq);
234 if (mtx_initialized(&ocs->sim_lock))
235 mtx_destroy(&ocs->sim_lock);
245 for (i = (ocs->num_vports); i >= 0; i--) {
249 cam_simq_free(ocs->devq);
251 if (mtx_initialized(&ocs->sim_lock))
252 mtx_destroy(&ocs->sim_lock);
269 * @return 0 on success, non-zero otherwise
274 ocs->enable_task_set_full = ocs_scsi_get_property(ocs,
277 ocs->enable_task_set_full ? "enabled" : "disabled");
303 * Called by base drive when new domain is discovered. A target-server
308 * which is declared by the target-server code and is used for target-server
311 * This function will only be called if the base-driver has been enabled for
314 * Note that this call is made to target-server backends,
315 * the ocs_scsi_ini_new_domain() function is called to initiator-client backends.
331 * Called by base-driver when a domain goes away. A target-server will
334 * Note that this call is made to target-server backends,
335 * the ocs_scsi_ini_del_domain() function is called to initiator-client backends.
350 * Called by base drive when new sport is discovered. A target-server
355 * which is declared by the target-server code and is used for
356 * target-server private data.
358 * This function will only be called if the base-driver has been enabled for
361 * Note that this call is made to target-server backends,
362 * the ocs_scsi_tgt_new_domain() is called to initiator-client backends.
371 ocs_t *ocs = sport->ocs;
373 if(!sport->is_vport) {
374 sport->tgt_data = FCPORT(ocs, 0);
384 * Called by base-driver when a sport goes away. A target-server will
387 * Note that this call is made to target-server backends,
388 * the ocs_scsi_ini_del_sport() is called to initiator-client backends.
404 * Sent by base driver to notify a target-server of the presense of a new
405 * remote initiator. The target-server may use this call to prepare for
409 * tgt_node that is declared and used by a target-server for private
423 ocs_t *ocs = node->ocs;
425 struct ac_device_changed *adc;
429 fcp = node->sport->tgt_data;
439 atomic_subtract_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO);
442 adc = (struct ac_device_changed *) ac.contract_data;
443 adc->wwpn = ocs_node_get_wwpn(node);
444 adc->port = node->rnode.fc_id;
445 adc->target = node->instance_index;
446 adc->arrived = 1;
447 xpt_async(AC_CONTRACT, fcp->path, &ac);
456 * Sent by base driver to validate a remote initiatiator. The target-server
478 * Sent by base driver to notify a target-server that a remote initiator
480 * and the target-server will receive appropriate completions.
495 ocs_t *ocs = node->ocs;
498 struct ac_device_changed *adc;
501 fcp = node->sport->tgt_data;
508 adc = (struct ac_device_changed *) ac.contract_data;
509 adc->wwpn = ocs_node_get_wwpn(node);
510 adc->port = node->rnode.fc_id;
511 adc->target = node->instance_index;
512 adc->arrived = 0;
513 xpt_async(AC_CONTRACT, fcp->path, &ac);
523 atomic_add_acq_32(&ocs->io_high_watermark, OCS_RSVD_INI_IO);
533 * target-server will process the command, and issue data and/or response phase
538 * a target-server for private information.
551 ocs_t *ocs = io->ocs;
553 ocs_node_t *node = io->node;
555 int32_t rc = -1;
558 fcp = node->sport->tgt_data;
564 atomic_add_acq_32(&ocs->io_in_use, 1);
567 io->timeout = ocs->target_io_timer_sec;
569 if (ocs->enable_task_set_full &&
570 (ocs->io_in_use >= ocs->io_high_watermark)) {
573 atomic_store_rel_32(&io->node->tgt_node.busy_sent, FALSE);
576 if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) {
577 trsrc = &fcp->targ_rsrc[lun];
578 } else if (fcp->targ_rsrc_wildcard.enabled) {
579 trsrc = &fcp->targ_rsrc_wildcard;
583 atio = (struct ccb_accept_tio *)STAILQ_FIRST(&trsrc->atio);
587 STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe);
589 atio->ccb_h.status = CAM_CDB_RECVD;
590 atio->ccb_h.target_lun = lun;
591 atio->sense_len = 0;
593 atio->init_id = node->instance_index;
594 atio->tag_id = io->tag;
595 atio->ccb_h.ccb_io_ptr = io;
598 atio->tag_action = MSG_SIMPLE_Q_TAG;
600 atio->tag_action = MSG_HEAD_OF_Q_TAG;
602 atio->tag_action = MSG_ORDERED_Q_TAG;
604 atio->tag_action = MSG_ACA_TASK;
606 atio->tag_action = CAM_TAG_ACTION_NONE;
607 atio->priority = (flags & OCS_SCSI_PRIORITY_MASK) >>
610 atio->cdb_len = cdb_len;
611 ocs_memcpy(atio->cdb_io.cdb_bytes, cdb, cdb_len);
613 io->tgt_io.flags = 0;
614 io->tgt_io.state = OCS_CAM_IO_COMMAND;
615 io->tgt_io.lun = lun;
622 ocs->dev, "%s: no ATIO for LUN %lx (en=%s) OX_ID %#x\n",
624 trsrc ? (trsrc->enabled ? "T" : "F") : "X",
625 be16toh(io->init_task_tag));
627 io->tgt_io.state = OCS_CAM_IO_MAX;
655 return -1;
663 * target-server will process the command, aborting commands as needed, and post
667 * tgt_io that is declared and used by a target-server for private information.
669 * If the target-server walks the nodes active_ios linked list, and starts IO
684 ocs_t *ocs = tmfio->ocs;
685 ocs_node_t *node = tmfio->node;
688 int32_t rc = -1;
691 fcp = node->sport->tgt_data;
697 if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) {
698 trsrc = &fcp->targ_rsrc[lun];
699 } else if (fcp->targ_rsrc_wildcard.enabled) {
700 trsrc = &fcp->targ_rsrc_wildcard;
703 device_printf(tmfio->ocs->dev, "%s: io=%u(index) cmd=%#x LU=%lx en=%s\n",
704 __func__, tmfio->instance_index, cmd, (unsigned long)lun,
705 trsrc ? (trsrc->enabled ? "T" : "F") : "X");
707 inot = (struct ccb_immediate_notify *)STAILQ_FIRST(&trsrc->inot);
712 ocs->dev, "%s: no INOT for LUN %llx (en=%s) OX_ID %#x\n",
713 __func__, (unsigned long long)lun, trsrc ? (trsrc->enabled ? "T" : "F") : "X",
714 be16toh(tmfio->init_task_tag));
723 tmfio->tgt_io.app = abortio;
725 STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe);
727 inot->tag_id = tmfio->tag;
728 inot->seq_id = tmfio->tag;
730 if ((lun < OCS_MAX_LUN) && fcp->targ_rsrc[lun].enabled) {
731 inot->initiator_id = node->instance_index;
733 inot->initiator_id = CAM_TARGET_WILDCARD;
736 inot->ccb_h.status = CAM_MESSAGE_RECV;
737 inot->ccb_h.target_lun = lun;
741 inot->arg = MSG_ABORT_TASK;
742 inot->seq_id = abortio->tag;
743 device_printf(ocs->dev, "%s: ABTS IO.%#x st=%#x\n",
744 __func__, abortio->tag, abortio->tgt_io.state);
745 abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_RECV;
746 abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_NOTIFY;
749 device_printf(ocs->dev,
752 STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe);
757 inot->arg = MSG_ABORT_TASK_SET;
760 inot->arg = MSG_CLEAR_TASK_SET;
763 inot->arg = MSG_QUERY_ASYNC_EVENT;
766 inot->arg = MSG_LOGICAL_UNIT_RESET;
769 inot->arg = MSG_CLEAR_ACA;
772 inot->arg = MSG_TARGET_RESET;
775 device_printf(ocs->dev, "%s: unsupported TMF %#x\n",
777 STAILQ_INSERT_TAIL(&trsrc->inot, &inot->ccb_h, sim_links.stqe);
783 xpt_print(inot->ccb_h.path, "%s: func=%#x stat=%#x id=%#x lun=%#x"
785 __func__, inot->ccb_h.func_code, inot->ccb_h.status,
786 inot->ccb_h.target_id,
787 (unsigned int)inot->ccb_h.target_lun, inot->ccb_h.flags,
788 inot->tag_id, inot->seq_id, inot->initiator_id,
789 inot->arg);
793 abortio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_DEV;
840 * Called by base drive when new domain is discovered. An initiator-client
845 * which is declared by the initiator-client code and is used for
846 * initiator-client private data.
848 * This function will only be called if the base-driver has been enabled for
851 * Note that this call is made to initiator-client backends,
852 * the ocs_scsi_tgt_new_domain() function is called to target-server backends.
868 * Called by base-driver when a domain goes away. An initiator-client will
871 * This function will only be called if the base-driver has been enabled for
874 * Note that this call is made to initiator-client backends,
875 * the ocs_scsi_tgt_del_domain() function is called to target-server backends.
891 * A target-server will use this call to prepare for new remote node
894 * This function will only be called if the base-driver has been enabled for
897 * Note that this call is made to target-server backends,
898 * the ocs_scsi_ini_new_sport() function is called to initiator-client backends.
907 ocs_t *ocs = sport->ocs;
910 if (!sport->is_vport) {
911 sport->tgt_data = fcp;
912 fcp->fc_id = sport->fc_id;
922 * Called by base-driver when a sport goes away. A target-server will
925 * Note that this call is made to target-server backends,
926 * the ocs_scsi_ini_del_sport() function is called to initiator-client backends.
935 ocs_t *ocs = sport->ocs;
938 if (!sport->is_vport) {
939 fcp->fc_id = 0;
946 ocs_t *ocs = sport->ocs;
951 if (!sport->is_vport) {
955 fcp = sport->tgt_data;
957 ocs_xport_status(ocs->xport, OCS_XPORT_PORT_STATUS, &value);
964 if ((fcp->role != KNOB_ROLE_NONE)) {
965 if(fcp->vport->sport != NULL) {
970 ocs_sport_vport_alloc(ocs->domain, fcp->vport);
983 tgt = &fcp->tgt[i];
985 if (tgt->state == OCS_TGT_STATE_NONE)
988 if (ocs_node_get_wwpn(node) == tgt->wwpn) {
993 return -1;
1000 * Sent by base driver to notify an initiator-client of the presense of a new
1001 * remote target. The initiator-server may use this call to prepare for
1019 tgt = &fcp->tgt[tgt_id];
1021 tgt->node_id = node->instance_index;
1022 tgt->state = OCS_TGT_STATE_VALID;
1024 tgt->port_id = node->rnode.fc_id;
1025 tgt->wwpn = ocs_node_get_wwpn(node);
1026 tgt->wwnn = ocs_node_get_wwnn(node);
1035 struct ocs_softc *ocs = node->ocs;
1038 if (fcp->tgt[i].state == OCS_TGT_STATE_NONE)
1043 device_printf(ocs->dev, "%s: ccb allocation failed\n", __func__);
1044 return -1;
1047 if (CAM_REQ_CMP != xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1048 cam_sim_path(fcp->sim),
1051 ocs->dev, "%s: target path creation failed\n", __func__);
1053 return -1;
1067 fcp = node->sport->tgt_data;
1089 if (!fcp->sim) {
1090 device_printf(ocs->dev, "%s: calling with NULL sim\n", __func__);
1094 if (CAM_REQ_CMP == xpt_create_path(&cpath, NULL, cam_sim_path(fcp->sim),
1103 * Device Lost Timer Function- when we have decided that a device was lost,
1117 taskqueue_enqueue(taskqueue_thread, &fcp->ltask);
1124 ocs_t *ocs = fcp->ocs;
1129 tgt = &fcp->tgt[i];
1131 if (tgt->state != OCS_TGT_STATE_LOST) {
1135 if ((tgt->gone_timer != 0) && (ocs->attached)){
1136 tgt->gone_timer -= 1;
1143 tgt->state = OCS_TGT_STATE_NONE;
1147 callout_reset(&fcp->ldt, hz, ocs_ldt, fcp);
1149 callout_deactivate(&fcp->ldt);
1158 * Sent by base driver to notify a initiator-client that a remote target
1160 * and the initiator-client will receive appropriate completions.
1163 * ini_node that is declared and used by a target-server for private
1180 struct ocs_softc *ocs = node->ocs;
1187 return -1;
1190 fcp = node->sport->tgt_data;
1193 return -1;
1197 if (tgt_id == -1) {
1199 return -1;
1202 tgt = &fcp->tgt[tgt_id];
1205 if(!ocs->attached) {
1208 tgt->state = OCS_TGT_STATE_LOST;
1209 tgt->gone_timer = 30;
1210 if (!callout_active(&fcp->ldt)) {
1211 callout_reset(&fcp->ldt, hz, ocs_ldt, fcp);
1288 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1289 ccb->ccb_h.status |= status;
1317 ocs_t *ocs = io->ocs;
1326 if (atomic_cmpset_acq_32(&io->node->tgt_node.busy_sent, FALSE, TRUE)) {
1329 io->node->display_name, io->tag,
1330 io->ocs->io_in_use, io->ocs->io_high_watermark);
1333 printf("%s: full tag=%x iiu=%d\n", __func__, io->tag,
1334 io->ocs->io_in_use);
1343 ocs->io_high_watermark);
1372 struct ccb_scsiio *csio = &ccb->csio;
1373 struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr;
1374 uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK;
1376 (ccb->ccb_h.flags & CAM_SEND_STATUS) == CAM_SEND_STATUS;
1378 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1389 bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op);
1390 if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) {
1391 bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap);
1395 if (io->tgt_io.sendresp) {
1396 io->tgt_io.sendresp = 0;
1398 io->tgt_io.state = OCS_CAM_IO_RESP;
1400 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
1401 resp.sense_data = (uint8_t *)&csio->sense_data;
1402 resp.sense_data_length = csio->sense_len;
1404 resp.residual = io->exp_xfer_len - io->transferred;
1421 if ((io->tgt_io.flags & OCS_CAM_IO_F_ABORT_NOTIFY) == 0) {
1425 io->tgt_io.state = OCS_CAM_IO_DATA_DONE;
1426 /*device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n",
1427 __func__, io->tgt_io.state, io->tag);*/
1448 ocs = io->ocs;
1450 io->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_DEV;
1460 device_printf(ocs->dev, "%s: unhandled status %d\n",
1463 rc = -1;
1489 struct ccb_scsiio *csio = &ccb->csio;
1490 struct ocs_softc *ocs = csio->ccb_h.ccb_ocs_ptr;
1491 uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK;
1503 bus_dmamap_sync(ocs->buf_dmat, io->tgt_io.dmap, op);
1504 if (io->tgt_io.flags & OCS_CAM_IO_F_DMAPPED) {
1505 bus_dmamap_unload(ocs->buf_dmat, io->tgt_io.dmap);
1510 csio->scsi_status = rsp->scsi_status;
1511 if (SCSI_STATUS_OK != rsp->scsi_status)
1516 csio->resid = rsp->residual;
1522 if ((rsp->residual < 0) && (ccb_status == CAM_REQ_CMP)) {
1526 if ((rsp->sense_data_length) &&
1527 !(ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR))) {
1530 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1531 if (rsp->sense_data_length < csio->sense_len) {
1532 csio->sense_resid =
1533 csio->sense_len - rsp->sense_data_length;
1534 sense_len = rsp->sense_data_length;
1536 csio->sense_resid = 0;
1537 sense_len = csio->sense_len;
1539 ocs_memcpy(&csio->sense_data, rsp->sense_data, sense_len);
1549 xpt_path_sbuf(ccb->ccb_h.path, &sb);
1558 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1559 scsi_command_string(&ccb->csio, &sb);
1560 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
1604 csio->ccb_h.ccb_io_ptr = NULL;
1605 csio->ccb_h.ccb_ocs_ptr = NULL;
1607 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1610 ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0)) {
1611 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1612 xpt_freeze_devq(ccb->ccb_h.path, 1);
1621 * @brief Load scatter-gather list entries into an IO
1641 sglarg->rc = -1;
1646 if ((sglarg->sgl_count + nseg) > sglarg->sgl_max) {
1648 sglarg->sgl_count, nseg, sglarg->sgl_max);
1649 sglarg->rc = -2;
1653 for (i = 0, c = sglarg->sgl_count; i < nseg; i++, c++) {
1654 sglarg->sgl[c].addr = seg[i].ds_addr;
1655 sglarg->sgl[c].len = seg[i].ds_len;
1658 sglarg->sgl_count = c;
1660 sglarg->rc = 0;
1665 * @brief Build a scatter-gather list from a CAM CCB
1673 * @return 0 on success, non-zero otherwise
1685 return -1;
1688 io->tgt_io.flags &= ~OCS_CAM_IO_F_DMAPPED;
1695 err = bus_dmamap_load_ccb(ocs->buf_dmat, io->tgt_io.dmap, ccb,
1700 ocs->dev, "%s: bus_dmamap_load_ccb error (%d %d)\n",
1702 return -1;
1705 io->tgt_io.flags |= OCS_CAM_IO_F_DMAPPED;
1716 * @return 0 on success, non-zero otherwise
1721 struct ccb_scsiio *csio = &ccb->csio;
1723 uint32_t cam_dir = ccb->ccb_h.flags & CAM_DIR_MASK;
1724 bool sendstatus = ccb->ccb_h.flags & CAM_SEND_STATUS;
1725 uint32_t xferlen = csio->dxfer_len;
1728 io = ocs_scsi_find_io(ocs, csio->tag_id);
1736 if (io->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) {
1737 /*device_printf(ocs->dev,
1739 __func__, io->tgt_io.state, io->tag, io->init_task_tag,
1740 io->tgt_io.flags);*/
1741 io->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM;
1743 if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1754 io->tgt_io.app = ccb;
1757 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1759 csio->ccb_h.ccb_ocs_ptr = ocs;
1760 csio->ccb_h.ccb_io_ptr = io;
1765 ocs_assert(ccb->ccb_h.flags & CAM_SEND_STATUS, -1);
1767 io->tgt_io.state = OCS_CAM_IO_RESP;
1769 resp.scsi_status = csio->scsi_status;
1771 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
1772 resp.sense_data = (uint8_t *)&csio->sense_data;
1773 resp.sense_data_length = csio->sense_len;
1776 resp.residual = io->exp_xfer_len - io->transferred;
1783 io->tgt_io.state = OCS_CAM_IO_DATA;
1786 io->tgt_io.sendresp = 1;
1788 sgl = io->sgl;
1790 sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, io->sgl_allocated);
1794 sgl_count, csio->dxfer_len,
1798 sgl_count, csio->dxfer_len,
1801 device_printf(ocs->dev, "%s:"
1808 device_printf(ocs->dev, "%s: building SGL failed\n",
1814 device_printf(ocs->dev, "%s: Wrong value xfer and sendstatus"
1822 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1823 io->tgt_io.state = OCS_CAM_IO_DATA_DONE;
1824 device_printf(ocs->dev, "%s: CTIO state=%d tag=%#x\n",
1825 __func__, io->tgt_io.state, io->tag);
1839 /*device_printf(io->ocs->dev, "%s: tag=%x io=%p s=%#x\n",
1840 __func__, io->tag, io, scsi_status);*/
1853 * @return 0 on success, non-zero otherwise
1859 struct ccb_scsiio *csio = &ccb->csio;
1860 struct ccb_hdr *ccb_h = &csio->ccb_h;
1867 fcp = FCPORT(ocs, cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path)));
1869 if (fcp->tgt[ccb_h->target_id].state == OCS_TGT_STATE_LOST) {
1870 device_printf(ocs->dev, "%s: device LOST %d\n", __func__,
1871 ccb_h->target_id);
1875 if (fcp->tgt[ccb_h->target_id].state == OCS_TGT_STATE_NONE) {
1876 device_printf(ocs->dev, "%s: device not ready %d\n", __func__,
1877 ccb_h->target_id);
1881 node = ocs_node_get_instance(ocs, fcp->tgt[ccb_h->target_id].node_id);
1883 device_printf(ocs->dev, "%s: no device %d\n", __func__,
1884 ccb_h->target_id);
1888 if (!node->targ) {
1889 device_printf(ocs->dev, "%s: not target device %d\n", __func__,
1890 ccb_h->target_id);
1896 device_printf(ocs->dev, "%s: unable to alloc IO\n", __func__);
1897 return -1;
1902 io->tgt_io.app = ccb;
1904 csio->ccb_h.ccb_ocs_ptr = ocs;
1905 csio->ccb_h.ccb_io_ptr = io;
1906 sgl = io->sgl;
1908 sgl_count = ocs_build_scsi_sgl(ocs, ccb, io, sgl, io->sgl_allocated);
1911 device_printf(ocs->dev, "%s: building SGL failed\n", __func__);
1912 return -1;
1915 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) {
1916 io->timeout = 0;
1917 } else if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
1918 io->timeout = OCS_CAM_IO_TIMEOUT;
1920 if (ccb->ccb_h.timeout < 1000)
1921 io->timeout = 1;
1923 io->timeout = ccb->ccb_h.timeout / 1000;
1927 switch (csio->tag_action) {
1943 flags |= (csio->priority << OCS_SCSI_PRIORITY_SHIFT) &
1946 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1948 rc = ocs_scsi_send_nodata_io(node, io, ccb_h->target_lun,
1949 ccb->ccb_h.flags & CAM_CDB_POINTER ?
1950 csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes,
1951 csio->cdb_len,
1955 rc = ocs_scsi_send_rd_io(node, io, ccb_h->target_lun,
1956 ccb->ccb_h.flags & CAM_CDB_POINTER ?
1957 csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes,
1958 csio->cdb_len,
1960 sgl, sgl_count, csio->dxfer_len,
1964 rc = ocs_scsi_send_wr_io(node, io, ccb_h->target_lun,
1965 ccb->ccb_h.flags & CAM_CDB_POINTER ?
1966 csio->cdb_io.cdb_ptr: csio->cdb_io.cdb_bytes,
1967 csio->cdb_len,
1969 sgl, sgl_count, csio->dxfer_len,
1974 ccb->ccb_h.flags);
1986 ocs_vport_spec_t *vport = fcp->vport;
1988 for (was = 0, i = 0; i < (ocs->num_vports + 1); i++) {
1989 if (FCPORT(ocs, i)->role != KNOB_ROLE_NONE)
1995 fcp->role = new_role;
1997 ocs->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0;
1998 ocs->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0;
2000 vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0;
2001 vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0;
2004 rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE);
2009 rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
2017 if ((fcp->role != KNOB_ROLE_NONE)){
2018 fcp->role = new_role;
2019 vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0;
2020 vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0;
2022 return ocs_sport_vport_del(ocs, ocs->domain, vport->wwpn, vport->wwnn);
2025 fcp->role = new_role;
2027 vport->enable_ini = (new_role & KNOB_ROLE_INITIATOR)? 1:0;
2028 vport->enable_tgt = (new_role & KNOB_ROLE_TARGET)? 1:0;
2030 if (fcp->role != KNOB_ROLE_NONE) {
2031 return ocs_sport_vport_alloc(ocs->domain, vport);
2048 * - populate path inquiry data via info retrieved from SLI port
2054 struct ccb_hdr *ccb_h = &ccb->ccb_h;
2059 switch (ccb_h->func_code) {
2062 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2063 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2064 ccb->ccb_h.status = CAM_REQ_INVALID;
2076 cam_freeze_devq(ccb->ccb_h.path);
2077 cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0);
2078 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2083 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2094 struct ccb_pathinq *cpi = &ccb->cpi;
2095 struct ccb_pathinq_settings_fc *fc = &cpi->xport_specific.fc;
2101 cpi->version_num = 1;
2103 cpi->protocol = PROTO_SCSI;
2104 cpi->protocol_version = SCSI_REV_SPC;
2106 if (ocs->ocs_xport == OCS_XPORT_FC) {
2107 cpi->transport = XPORT_FC;
2109 cpi->transport = XPORT_UNKNOWN;
2112 cpi->transport_version = 0;
2115 ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value);
2116 fc->bitrate = value.value * 1000; /* speed in Mbps */
2119 fc->wwpn = be64toh(wwn);
2122 fc->wwnn = be64toh(wwn);
2124 fc->port = fcp->fc_id;
2126 if (ocs->config_tgt) {
2127 cpi->target_sprt =
2131 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
2132 cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN;
2134 cpi->hba_inquiry = PI_TAG_ABLE;
2135 cpi->max_target = OCS_MAX_TARGETS;
2136 cpi->initiator_id = ocs->max_remote_nodes + 1;
2138 if (!ocs->enable_ini) {
2139 cpi->hba_misc |= PIM_NOINITIATOR;
2142 cpi->max_lun = OCS_MAX_LUN;
2143 cpi->bus_id = cam_sim_bus(sim);
2147 cpi->base_transfer_speed = 1 * 1000 * 1000;
2152 cpi->maxio = PAGE_SIZE *
2153 (ocs_scsi_get_property(ocs, OCS_SCSI_MAX_SGL) - 1);
2155 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2156 strncpy(cpi->hba_vid, "Emulex", HBA_IDLEN);
2157 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2158 cpi->unit_number = cam_sim_unit(sim);
2160 cpi->ccb_h.status = CAM_REQ_CMP;
2166 struct ccb_trans_settings *cts = &ccb->cts;
2167 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
2168 struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc;
2173 if (ocs->ocs_xport != OCS_XPORT_FC) {
2179 if (cts->ccb_h.target_id > OCS_MAX_TARGETS) {
2185 tgt = &fcp->tgt[cts->ccb_h.target_id];
2186 if (tgt->state == OCS_TGT_STATE_NONE) {
2192 cts->protocol = PROTO_SCSI;
2193 cts->protocol_version = SCSI_REV_SPC2;
2194 cts->transport = XPORT_FC;
2195 cts->transport_version = 2;
2197 scsi->valid = CTS_SCSI_VALID_TQ;
2198 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2201 ocs_xport_status(ocs->xport, OCS_XPORT_LINK_SPEED, &value);
2202 fc->bitrate = value.value * 100;
2204 fc->wwpn = tgt->wwpn;
2206 fc->wwnn = tgt->wwnn;
2208 fc->port = tgt->port_id;
2210 fc->valid = CTS_FC_VALID_SPEED |
2225 cam_calc_geometry(&ccb->ccg, TRUE);
2231 struct ccb_sim_knob *knob = &ccb->knob;
2235 if (ocs->ocs_xport != OCS_XPORT_FC) {
2244 knob->xport_specific.fc.wwnn = be64toh(wwn);
2248 knob->xport_specific.fc.wwpn = be64toh(wwn);
2250 knob->xport_specific.fc.wwnn = fcp->vport->wwnn;
2251 knob->xport_specific.fc.wwpn = fcp->vport->wwpn;
2254 knob->xport_specific.fc.role = fcp->role;
2255 knob->xport_specific.fc.valid = KNOB_VALID_ADDRESS |
2264 struct ccb_sim_knob *knob = &ccb->knob;
2268 if (ocs->ocs_xport != OCS_XPORT_FC) {
2274 if (knob->xport_specific.fc.valid & KNOB_VALID_ADDRESS) {
2275 device_printf(ocs->dev,
2278 (unsigned long long)knob->xport_specific.fc.wwnn,
2279 (unsigned long long)knob->xport_specific.fc.wwpn);
2282 if (knob->xport_specific.fc.valid & KNOB_VALID_ROLE) {
2283 switch (knob->xport_specific.fc.role) {
2285 if (fcp->role != KNOB_ROLE_NONE) {
2290 if (fcp->role != KNOB_ROLE_TARGET) {
2295 if (fcp->role != KNOB_ROLE_INITIATOR) {
2300 if (fcp->role != KNOB_ROLE_BOTH) {
2305 device_printf(ocs->dev,
2307 __func__, knob->xport_specific.fc.role);
2311 device_printf(ocs->dev,
2313 bus, fcp->role, knob->xport_specific.fc.role);
2315 ocs_fcp_change_role(ocs, fcp, knob->xport_specific.fc.role);
2327 union ccb *accb = ccb->cab.abort_ccb;
2329 switch (accb->ccb_h.func_code) {
2339 ccb->ccb_h.status = CAM_UA_ABORT;
2341 ccb->ccb_h.status = CAM_REQ_CMP;
2347 accb->ccb_h.func_code);
2348 ccb->ccb_h.status = CAM_REQ_INVALID;
2354 if (ocs_xport_control(ocs->xport, OCS_XPORT_PORT_OFFLINE) == 0) {
2355 rc = ocs_xport_control(ocs->xport, OCS_XPORT_PORT_ONLINE);
2373 node = ocs_node_get_instance(ocs, fcp->tgt[ccb_h->target_id].node_id);
2375 device_printf(ocs->dev, "%s: no device %d\n",
2376 __func__, ccb_h->target_id);
2384 device_printf(ocs->dev, "%s: unable to alloc IO\n",
2391 rc = ocs_scsi_send_tmf(node, io, NULL, ccb_h->target_lun,
2402 if (node->fcp2device) {
2403 ocs_reset_crn(node, ccb_h->target_lun);
2415 device_printf(ocs->dev, "XPT_EN_LUN %sable %d:%d\n",
2416 ccb->cel.enable ? "en" : "dis",
2417 ccb->ccb_h.target_id,
2418 (unsigned int)ccb->ccb_h.target_lun);
2420 trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status);
2422 trsrc->enabled = ccb->cel.enable;
2425 if (trsrc->enabled == FALSE) {
2428 STAILQ_INIT(&trsrc->atio);
2429 STAILQ_INIT(&trsrc->inot);
2440 * - CAM supplies a number of CCBs to the driver used for received
2442 * - when the driver receives a command, it copies the relevant
2444 * - after the target server processes the request, it creates
2447 * - the driver processes the "continue IO" (a.k.a CTIO) CCB
2448 * - once the IO completes, the driver returns the CTIO to the CAM
2460 /*printf("XPT_%s %p\n", ccb_h->func_code == XPT_ACCEPT_TARGET_IO ?
2462 trsrc = ocs_tgt_resource_get(fcp, &ccb->ccb_h, &status);
2469 if (XPT_ACCEPT_TARGET_IO == ccb->ccb_h.func_code) {
2473 atio->init_id = 0x0badbeef;
2474 atio->tag_id = 0xdeadc0de;
2476 STAILQ_INSERT_TAIL(&trsrc->atio, &ccb->ccb_h,
2479 STAILQ_INSERT_TAIL(&trsrc->inot, &ccb->ccb_h,
2482 ccb->ccb_h.ccb_io_ptr = NULL;
2483 ccb->ccb_h.ccb_ocs_ptr = ocs;
2499 io = ocs_scsi_find_io(ocs, ccb->cna2.tag_id);
2501 device_printf(ocs->dev,
2503 __func__, ccb->cna2.tag_id);
2509 abortio = io->tgt_io.app;
2511 abortio->tgt_io.flags &= ~OCS_CAM_IO_F_ABORT_NOTIFY;
2512 device_printf(ocs->dev,
2514 " flags=%#x\n", __func__, abortio->tgt_io.state,
2515 abortio->tag, abortio->init_task_tag,
2516 abortio->tgt_io.flags);
2530 device_printf(ocs->dev,
2532 ccb->ccb_h.flags, ccb->csio.tag_id);
2537 device_printf(ocs->dev, "unhandled func_code = %#x\n",
2538 ccb_h->func_code);
2539 ccb_h->status = CAM_REQ_INVALID;
2569 if (rsp->response_data_length == 0) {
2570 ocs_log_test(io->ocs, "check response without data?!?\n");
2571 rc = -1;
2575 if (rsp->response_data[3] != 0) {
2576 ocs_log_test(io->ocs, "TMF status %08x\n",
2577 be32toh(*((uint32_t *)rsp->response_data)));
2578 rc = -1;
2583 ocs_log_test(io->ocs, "status=%#x\n", scsi_status);
2584 rc = -1;
2596 * - wildcard target ID + LU
2597 * - 0 target ID + non-wildcard LU
2609 target_id_t tid = ccb_h->target_id;
2610 lun_id_t lun = ccb_h->target_lun;
2617 return &fcp->targ_rsrc_wildcard;
2620 return &fcp->targ_rsrc[lun];
2635 ccb = (union ccb *)STAILQ_FIRST(&trsrc->atio);
2637 STAILQ_REMOVE_HEAD(&trsrc->atio, sim_links.stqe);
2638 ccb->ccb_h.status = CAM_REQ_ABORTED;
2644 ccb = (union ccb *)STAILQ_FIRST(&trsrc->inot);
2646 STAILQ_REMOVE_HEAD(&trsrc->inot, sim_links.stqe);
2647 ccb->ccb_h.status = CAM_REQ_ABORTED;
2663 union ccb *accb = ccb->cab.abort_ccb;
2665 int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path));
2668 trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status);
2670 STAILQ_FOREACH(cur, &trsrc->atio, sim_links.stqe) {
2671 if (cur != &accb->ccb_h)
2674 STAILQ_REMOVE(&trsrc->atio, cur, ccb_hdr,
2676 accb->ccb_h.status = CAM_REQ_ABORTED;
2687 aio = accb->ccb_h.ccb_io_ptr;
2689 ccb->ccb_h.status = CAM_UA_ABORT;
2693 device_printf(ocs->dev,
2696 aio->tgt_io.state, aio->tag,
2697 aio->init_task_tag, aio->tgt_io.flags);
2699 * - abort task was received
2700 * - already aborted IO in the DEVICE
2701 * - already received NOTIFY ACKNOWLEDGE */
2703 if ((aio->tgt_io.flags & OCS_CAM_IO_F_ABORT_RECV) == 0) {
2704 device_printf(ocs->dev, "%s: abort not received or io completed \n", __func__);
2709 aio->tgt_io.flags |= OCS_CAM_IO_F_ABORT_CAM;
2722 union ccb *accb = ccb->cab.abort_ccb;
2724 int bus = cam_sim_bus(xpt_path_sim((ccb)->ccb_h.path));
2727 trsrc = ocs_tgt_resource_get(fcp, &accb->ccb_h, &status);
2729 STAILQ_FOREACH(cur, &trsrc->inot, sim_links.stqe) {
2730 if (cur != &accb->ccb_h)
2733 STAILQ_REMOVE(&trsrc->inot, cur, ccb_hdr,
2735 accb->ccb_h.status = CAM_REQ_ABORTED;
2753 struct ccb_scsiio *csio = &accb->csio;
2755 ocs_fcport *fcp = FCPORT(ocs, cam_sim_bus(xpt_path_sim((accb)->ccb_h.path)));
2756 node = ocs_node_get_instance(ocs, fcp->tgt[accb->ccb_h.target_id].node_id);
2758 device_printf(ocs->dev, "%s: no device %d\n",
2759 __func__, accb->ccb_h.target_id);
2762 return (-1);
2767 device_printf(ocs->dev,
2771 return (-1);
2775 (ocs_io_t *)csio->ccb_h.ccb_io_ptr,
2776 accb->ccb_h.target_lun,
2838 switch (io->tgt_io.state) {
2858 if (io->tgt_io.app) {
2860 ((union ccb *)(io->tgt_io.app))->ccb_h.flags);
2862 ((union ccb *)(io->tgt_io.app))->ccb_h.status);
2877 return -1;
2887 lcrn = node->ini_node.lun_crn[idx];
2890 lcrn = ocs_malloc(node->ocs, sizeof(struct ocs_lun_crn),
2896 lcrn->lun = lun;
2897 node->ini_node.lun_crn[idx] = lcrn;
2900 if (lcrn->lun != lun) {
2904 if (lcrn->crnseed == 0)
2905 lcrn->crnseed = 1;
2907 *crn = lcrn->crnseed++;
2918 lcrn = node->ini_node.lun_crn[i];
2920 ocs_free(node->ocs, lcrn, sizeof(*lcrn));
2934 lcrn = node->ini_node.lun_crn[idx];
2936 lcrn->crnseed = 0;