Lines Matching defs:fc_req

232 nvmf_fc_record_req_trace_point(struct spdk_nvmf_fc_request *fc_req,
292 spdk_trace_record(tpoint_id, fc_req->poller_lcore, 0,
293 (uint64_t)(&fc_req->req));
359 struct spdk_nvmf_fc_request *fc_req;
371 fc_req = (struct spdk_nvmf_fc_request *)pooled_req;
372 memset(fc_req, 0, sizeof(struct spdk_nvmf_fc_request));
373 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_INIT);
375 TAILQ_INSERT_TAIL(&hwqp->in_use_reqs, fc_req, link);
376 TAILQ_INSERT_TAIL(&fc_conn->in_use_reqs, fc_req, conn_link);
377 TAILQ_INIT(&fc_req->abort_cbs);
378 return fc_req;
382 nvmf_fc_conn_free_fc_request(struct spdk_nvmf_fc_conn *fc_conn, struct spdk_nvmf_fc_request *fc_req)
384 if (fc_req->state != SPDK_NVMF_FC_REQ_SUCCESS) {
386 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_FAILED);
390 fc_req->magic = 0xDEADBEEF;
392 TAILQ_REMOVE(&fc_conn->hwqp->in_use_reqs, fc_req, link);
393 TAILQ_REMOVE(&fc_conn->in_use_reqs, fc_req, conn_link);
395 STAILQ_INSERT_HEAD(&fc_conn->pool_queue, (struct spdk_nvmf_fc_pooled_request *)fc_req, pool_link);
400 nvmf_fc_request_remove_from_pending(struct spdk_nvmf_fc_request *fc_req)
402 STAILQ_REMOVE(&fc_req->hwqp->fgroup->group.pending_buf_queue, &fc_req->req,
1199 struct spdk_nvmf_fc_request *fc_req = arg1;
1200 struct spdk_nvmf_ctrlr *ctrlr = fc_req->req.qpair->ctrlr;
1222 if (ctrlr->aer_req[i] == &fc_req->req) {
1224 nvmf_qpair_free_aer(fc_req->req.qpair);
1233 struct spdk_nvmf_fc_request *fc_req =
1235 struct spdk_nvmf_fc_hwqp *hwqp = fc_req->hwqp;
1239 /* Make a copy of the cb list from fc_req */
1241 TAILQ_SWAP(&abort_cbs, &fc_req->abort_cbs, spdk_nvmf_fc_caller_ctx, link);
1243 SPDK_NOTICELOG("FC Request(%p) in state :%s aborted\n", fc_req,
1244 fc_req_state_strs[fc_req->state]);
1246 _nvmf_fc_request_free(fc_req);
1260 nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
1276 TAILQ_INSERT_TAIL(&fc_req->abort_cbs, ctx, link);
1279 if (!fc_req->is_aborted) {
1281 fc_req->hwqp->counters.num_aborted++;
1285 kill_req = nvmf_fc_is_port_dead(fc_req->hwqp);
1286 if (kill_req && nvmf_fc_req_in_xfer(fc_req)) {
1287 fc_req->is_aborted = true;
1292 if (fc_req->is_aborted) {
1297 fc_req->is_aborted = true;
1300 if (fc_req->xchg) {
1301 fc_req->xchg->send_abts = send_abts;
1302 fc_req->xchg->aborted = true;
1305 switch (fc_req->state) {
1308 _nvmf_fc_request_free(fc_req);
1315 spdk_thread_send_msg(fc_req->hwqp->thread,
1316 nvmf_fc_req_bdev_abort, (void *)fc_req);
1325 nvmf_fc_issue_abort(fc_req->hwqp, fc_req->xchg, NULL, NULL);
1330 nvmf_fc_request_remove_from_pending(fc_req);
1333 TAILQ_REMOVE(&fc_req->fc_conn->fused_waiting_queue, fc_req, fused_link);
1342 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_ABORTED);
1343 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1344 (void *)fc_req);
1348 nvmf_fc_request_alloc_buffers(struct spdk_nvmf_fc_request *fc_req)
1350 uint32_t length = fc_req->req.length;
1351 struct spdk_nvmf_fc_poll_group *fgroup = fc_req->hwqp->fgroup;
1355 if (spdk_nvmf_request_get_buffers(&fc_req->req, group, transport, length)) {
1363 nvmf_fc_request_execute(struct spdk_nvmf_fc_request *fc_req)
1366 if (!nvmf_fc_use_send_frame(fc_req)) {
1367 fc_req->xchg = nvmf_fc_get_xri(fc_req->hwqp);
1368 if (!fc_req->xchg) {
1369 fc_req->hwqp->counters.no_xchg++;
1374 if (fc_req->req.length) {
1375 if (nvmf_fc_request_alloc_buffers(fc_req) < 0) {
1376 fc_req->hwqp->counters.buf_alloc_err++;
1377 if (fc_req->xchg) {
1378 nvmf_fc_put_xchg(fc_req->hwqp, fc_req->xchg);
1379 fc_req->xchg = NULL;
1385 if (fc_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
1388 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_XFER);
1390 if (nvmf_fc_recv_data(fc_req)) {
1392 fc_req->hwqp->counters.unexpected_err++;
1393 _nvmf_fc_request_free(fc_req);
1398 if (fc_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
1399 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_BDEV);
1401 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_BDEV);
1403 spdk_nvmf_request_exec(&fc_req->req);
1410 nvmf_fc_set_vmid_priority(struct spdk_nvmf_fc_request *fc_req,
1431 fc_req->app_id = from_be32(&vhdr->src_vmid);
1436 fc_req->csctl = fchdr->cs_ctl;
1446 struct spdk_nvmf_fc_request *fc_req = NULL;
1526 fc_req = nvmf_fc_conn_alloc_fc_request(fc_conn);
1527 if (fc_req == NULL) {
1531 fc_req->req.length = from_be32(&cmd_iu->data_len);
1532 fc_req->req.qpair = &fc_conn->qpair;
1533 memcpy(&fc_req->cmd, &cmd_iu->cmd, sizeof(union nvmf_h2c_msg));
1534 fc_req->req.cmd = (union nvmf_h2c_msg *)&fc_req->cmd;
1535 fc_req->req.rsp = (union nvmf_c2h_msg *)&fc_req->ersp.rsp;
1536 fc_req->oxid = frame->ox_id;
1537 fc_req->oxid = from_be16(&fc_req->oxid);
1538 fc_req->rpi = fc_conn->rpi;
1539 fc_req->poller_lcore = hwqp->lcore_id;
1540 fc_req->poller_thread = hwqp->thread;
1541 fc_req->hwqp = hwqp;
1542 fc_req->fc_conn = fc_conn;
1543 fc_req->req.xfer = xfer;
1544 fc_req->s_id = s_id;
1545 fc_req->d_id = d_id;
1546 fc_req->csn = from_be32(&cmd_iu->cmnd_seq_num);
1547 nvmf_fc_set_vmid_priority(fc_req, frame);
1549 nvmf_fc_record_req_trace_point(fc_req, SPDK_NVMF_FC_REQ_INIT);
1551 if (!STAILQ_EMPTY(&hwqp->fgroup->group.pending_buf_queue) || nvmf_fc_request_execute(fc_req)) {
1552 STAILQ_INSERT_TAIL(&hwqp->fgroup->group.pending_buf_queue, &fc_req->req, buf_link);
1553 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_PENDING);
1564 _nvmf_fc_request_free(struct spdk_nvmf_fc_request *fc_req)
1569 if (!fc_req) {
1572 hwqp = fc_req->hwqp;
1574 if (fc_req->xchg) {
1575 nvmf_fc_put_xchg(hwqp, fc_req->xchg);
1576 fc_req->xchg = NULL;
1580 if (fc_req->req.data_from_pool) {
1582 spdk_nvmf_request_free_buffers(&fc_req->req, group,
1585 fc_req->req.iovcnt = 0;
1588 nvmf_fc_conn_free_fc_request(fc_req->fc_conn, fc_req);
1592 nvmf_fc_request_set_state(struct spdk_nvmf_fc_request *fc_req,
1595 assert(fc_req->magic != 0xDEADBEEF);
1598 "FC Request(%p):\n\tState Old:%s New:%s\n", fc_req,
1599 nvmf_fc_request_get_state_str(fc_req->state),
1601 nvmf_fc_record_req_trace_point(fc_req, state);
1602 fc_req->state = state;
1727 struct spdk_nvmf_fc_request *fc_req;
1738 fc_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_fc_request, req);
1739 if (!nvmf_fc_request_execute(fc_req)) {
1741 nvmf_fc_request_remove_from_pending(fc_req);
1799 nvmf_fc_handle_rsp(struct spdk_nvmf_fc_request *fc_req)
1802 struct spdk_nvmf_request *req = &fc_req->req;
1814 if (nvmf_fc_send_ersp_required(fc_req, fc_conn->rsp_count,
1815 fc_req->transferred_len)) {
1819 fc_req->ersp.ersp_len = ersp_len;
1822 to_be32(&fc_req->ersp.response_seq_no, fc_conn->rsn);
1826 to_be32(&fc_req->ersp.transferred_data_len, fc_req->transferred_len);
1829 rc = nvmf_fc_xmt_rsp(fc_req, (uint8_t *)&fc_req->ersp,
1833 rc = nvmf_fc_xmt_rsp(fc_req, NULL, 0);
1840 nvmf_fc_send_ersp_required(struct spdk_nvmf_fc_request *fc_req,
1843 struct spdk_nvmf_request *req = &fc_req->req;
1872 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
1875 if (fc_req->is_aborted) {
1877 nvmf_fc_poller_api_func(fc_req->hwqp, SPDK_NVMF_FC_POLLER_API_REQ_ABORT_COMPLETE,
1878 (void *)fc_req);
1882 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_XFER);
1884 rc = nvmf_fc_send_data(fc_req);
1887 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_WRITE_RSP);
1889 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_READ_RSP);
1891 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_NONE_RSP);
1894 rc = nvmf_fc_handle_rsp(fc_req);
1899 _nvmf_fc_request_free(fc_req);
2197 struct spdk_nvmf_fc_request *fc_req = nvmf_fc_get_fc_req(req);
2199 if (!fc_req->is_aborted) {
2200 nvmf_fc_request_set_state(fc_req, SPDK_NVMF_FC_REQ_BDEV_ABORTED);
2201 nvmf_fc_request_abort(fc_req, true, NULL, NULL);
2203 nvmf_fc_request_abort_complete(fc_req);