Lines Matching defs:qp
40 struct nvmf_tcp_qpair *qp;
65 struct nvmf_qpair qp;
134 #define TQP(qp) ((struct nvmf_tcp_qpair *)(qp))
175 tcp_alloc_command_buffer(struct nvmf_tcp_qpair *qp,
182 cb->qp = qp;
265 nvmf_tcp_write_pdu(struct nvmf_tcp_qpair *qp, struct mbuf *m)
267 struct socket *so = qp->so;
270 mbufq_enqueue(&qp->tx_pdus, m);
273 cv_signal(&qp->tx_cv);
278 nvmf_tcp_report_error(struct nvmf_tcp_qpair *qp, uint16_t fes, uint32_t fei,
293 hdr->common.pdu_type = qp->qp.nq_controller ?
302 nvmf_tcp_write_pdu(qp, m);
306 nvmf_tcp_validate_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu)
323 error = nvmf_tcp_validate_pdu_header(ch, qp->qp.nq_controller,
324 qp->header_digests, qp->data_digests, qp->rxpda, &data_len, &fes,
328 nvmf_tcp_report_error(qp, fes, fei, m, hlen);
338 nvmf_tcp_report_error(qp,
383 nvmf_tcp_save_command_capsule(struct nvmf_tcp_qpair *qp,
392 nc = nvmf_allocate_command(&qp->qp, &cmd->ccsqe, M_WAITOK);
397 nvmf_capsule_received(&qp->qp, nc);
402 nvmf_tcp_save_response_capsule(struct nvmf_tcp_qpair *qp,
411 nc = nvmf_allocate_response(&qp->qp, &rsp->rccqe, M_WAITOK);
421 tcp_purge_command_buffer(&qp->rx_buffers, rsp->rccqe.cid, 0);
422 tcp_purge_command_buffer(&qp->tx_buffers, rsp->rccqe.cid, 0);
424 nvmf_capsule_received(&qp->qp, nc);
434 nvmf_tcp_construct_pdu(struct nvmf_tcp_qpair *qp, void *hdr, size_t hlen,
442 if (qp->header_digests)
446 pdo = roundup(plen, qp->txpda);
449 if (qp->data_digests)
464 if (qp->header_digests)
466 if (qp->data_digests && data_len != 0)
472 if (qp->header_digests) {
487 if (qp->data_digests) {
506 nvmf_tcp_next_r2t(struct nvmf_tcp_qpair *qp)
510 mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
511 MPASS(qp->active_ttags < qp->num_ttags);
513 TAILQ_FOREACH(cb, &qp->rx_buffers.head, link) {
515 if (cb->tc->active_r2ts > qp->maxr2t)
520 TAILQ_REMOVE(&qp->rx_buffers.head, cb, link);
528 nvmf_tcp_allocate_ttag(struct nvmf_tcp_qpair *qp,
533 mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
535 ttag = qp->next_ttag;
537 if (qp->open_ttags[ttag] == NULL)
539 if (ttag == qp->num_ttags - 1)
543 MPASS(ttag != qp->next_ttag);
545 if (ttag == qp->num_ttags - 1)
546 qp->next_ttag = 0;
548 qp->next_ttag = ttag + 1;
551 qp->active_ttags++;
552 qp->open_ttags[ttag] = cb;
563 tcp_send_r2t(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag,
576 m = nvmf_tcp_construct_pdu(qp, &r2t, sizeof(r2t), NULL, 0);
577 nvmf_tcp_write_pdu(qp, m);
586 nvmf_tcp_send_next_r2t(struct nvmf_tcp_qpair *qp,
591 mtx_assert(&qp->rx_buffers.lock, MA_OWNED);
592 MPASS(qp->open_ttags[cb->ttag] == cb);
595 qp->open_ttags[cb->ttag] = NULL;
596 qp->active_ttags--;
600 ncb = nvmf_tcp_next_r2t(qp);
602 nvmf_tcp_allocate_ttag(qp, ncb);
603 mtx_unlock(&qp->rx_buffers.lock);
604 tcp_send_r2t(qp, ncb->cid, ncb->ttag, ncb->data_offset,
607 mtx_unlock(&qp->rx_buffers.lock);
637 nvmf_tcp_handle_h2c_data(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu)
645 if (le32toh(h2c->datal) > qp->maxh2cdata) {
646 nvmf_tcp_report_error(qp,
658 if (ttag >= qp->num_ttags) {
659 nvmf_tcp_report_error(qp,
667 mtx_lock(&qp->rx_buffers.lock);
668 cb = qp->open_ttags[ttag];
670 mtx_unlock(&qp->rx_buffers.lock);
671 nvmf_tcp_report_error(qp,
682 nvmf_tcp_send_next_r2t(qp, cb);
691 mtx_unlock(&qp->rx_buffers.lock);
692 nvmf_tcp_report_error(qp,
703 mtx_unlock(&qp->rx_buffers.lock);
704 nvmf_tcp_report_error(qp,
712 mtx_unlock(&qp->rx_buffers.lock);
713 nvmf_tcp_report_error(qp,
722 mtx_unlock(&qp->rx_buffers.lock);
723 nvmf_tcp_report_error(qp,
733 nvmf_tcp_send_next_r2t(qp, cb);
736 mtx_unlock(&qp->rx_buffers.lock);
747 nvmf_tcp_handle_c2h_data(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu)
755 mtx_lock(&qp->rx_buffers.lock);
756 cb = tcp_find_command_buffer(&qp->rx_buffers, c2h->cccid, 0);
758 mtx_unlock(&qp->rx_buffers.lock);
763 nvmf_tcp_report_error(qp,
774 tcp_remove_command_buffer(&qp->rx_buffers, cb);
775 mtx_unlock(&qp->rx_buffers.lock);
783 mtx_unlock(&qp->rx_buffers.lock);
784 nvmf_tcp_report_error(qp,
795 mtx_unlock(&qp->rx_buffers.lock);
796 nvmf_tcp_report_error(qp,
804 mtx_unlock(&qp->rx_buffers.lock);
805 nvmf_tcp_report_error(qp,
814 mtx_unlock(&qp->rx_buffers.lock);
815 nvmf_tcp_report_error(qp,
825 tcp_remove_command_buffer(&qp->rx_buffers, cb);
828 mtx_unlock(&qp->rx_buffers.lock);
841 nc = nvmf_allocate_response(&qp->qp, &cqe, M_WAITOK);
844 nvmf_capsule_received(&qp->qp, nc);
922 tcp_send_h2c_pdu(struct nvmf_tcp_qpair *qp, uint16_t cid, uint16_t ttag,
937 top = nvmf_tcp_construct_pdu(qp, &h2c, sizeof(h2c), m, len);
938 nvmf_tcp_write_pdu(qp, top);
942 nvmf_tcp_handle_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu)
950 mtx_lock(&qp->tx_buffers.lock);
951 cb = tcp_find_command_buffer(&qp->tx_buffers, r2t->cccid, 0);
953 mtx_unlock(&qp->tx_buffers.lock);
954 nvmf_tcp_report_error(qp,
964 mtx_unlock(&qp->tx_buffers.lock);
965 nvmf_tcp_report_error(qp,
978 mtx_unlock(&qp->tx_buffers.lock);
979 nvmf_tcp_report_error(qp,
988 tcp_remove_command_buffer(&qp->tx_buffers, cb);
991 mtx_unlock(&qp->tx_buffers.lock);
1001 todo = min(data_len, qp->max_tx_data);
1004 tcp_send_h2c_pdu(qp, r2t->cccid, r2t->ttag, data_offset, m,
1049 nvmf_tcp_dispatch_pdu(struct nvmf_tcp_qpair *qp,
1064 return (nvmf_tcp_save_command_capsule(qp, pdu));
1066 return (nvmf_tcp_save_response_capsule(qp, pdu));
1068 return (nvmf_tcp_handle_h2c_data(qp, pdu));
1070 return (nvmf_tcp_handle_c2h_data(qp, pdu));
1072 return (nvmf_tcp_handle_r2t(qp, pdu));
1079 struct nvmf_tcp_qpair *qp = arg;
1080 struct socket *so = qp->so;
1093 while (!qp->rx_shutdown) {
1103 nvmf_qpair_error(&qp->qp, error);
1105 while (!qp->rx_shutdown)
1106 cv_wait(&qp->rx_cv, SOCKBUF_MTX(&so->so_rcv));
1119 cv_wait(&qp->rx_cv, SOCKBUF_MTX(&so->so_rcv));
1183 error = nvmf_tcp_validate_pdu(qp, &pdu);
1187 error = nvmf_tcp_dispatch_pdu(qp, &ch, &pdu);
1202 terror = cv_timedwait(&qp->rx_cv,
1219 tcp_command_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_capsule *tc)
1233 cb = tcp_alloc_command_buffer(qp, &nc->nc_data, 0,
1236 if (nc->nc_send_data && nc->nc_data.io_len <= qp->max_icd) {
1243 mtx_lock(&qp->tx_buffers.lock);
1244 tcp_add_command_buffer(&qp->tx_buffers, cb);
1245 mtx_unlock(&qp->tx_buffers.lock);
1247 mtx_lock(&qp->rx_buffers.lock);
1248 tcp_add_command_buffer(&qp->rx_buffers, cb);
1249 mtx_unlock(&qp->rx_buffers.lock);
1270 top = nvmf_tcp_construct_pdu(qp, &cmd, sizeof(cmd), m, m != NULL ?
1276 tcp_response_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_capsule *tc)
1285 return (nvmf_tcp_construct_pdu(qp, &rsp, sizeof(rsp), NULL, 0));
1289 capsule_to_pdu(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_capsule *tc)
1292 return (tcp_command_pdu(qp, tc));
1294 return (tcp_response_pdu(qp, tc));
1300 struct nvmf_tcp_qpair *qp = arg;
1302 struct socket *so = qp->so;
1309 while (!qp->tx_shutdown) {
1315 nvmf_qpair_error(&qp->qp, error);
1317 while (!qp->tx_shutdown)
1318 cv_wait(&qp->tx_cv, SOCKBUF_MTX(&so->so_snd));
1324 m = mbufq_dequeue(&qp->tx_pdus);
1327 if (STAILQ_EMPTY(&qp->tx_capsules)) {
1328 cv_wait(&qp->tx_cv, SOCKBUF_MTX(&so->so_snd));
1333 tc = STAILQ_FIRST(&qp->tx_capsules);
1334 STAILQ_REMOVE_HEAD(&qp->tx_capsules, link);
1337 n = capsule_to_pdu(qp, tc);
1341 mbufq_enqueue(&qp->tx_pdus, n);
1352 cv_wait(&qp->tx_cv, SOCKBUF_MTX(&so->so_snd));
1399 struct nvmf_tcp_qpair *qp = arg;
1402 cv_signal(&qp->rx_cv);
1409 struct nvmf_tcp_qpair *qp = arg;
1412 cv_signal(&qp->tx_cv);
1419 struct nvmf_tcp_qpair *qp;
1455 qp = malloc(sizeof(*qp), M_NVMF_TCP, M_WAITOK | M_ZERO);
1456 qp->so = so;
1457 refcount_init(&qp->refs, 1);
1458 qp->txpda = nvlist_get_number(nvl, "txpda");
1459 qp->rxpda = nvlist_get_number(nvl, "rxpda");
1460 qp->header_digests = nvlist_get_bool(nvl, "header_digests");
1461 qp->data_digests = nvlist_get_bool(nvl, "data_digests");
1462 qp->maxr2t = nvlist_get_number(nvl, "maxr2t");
1464 qp->maxh2cdata = nvlist_get_number(nvl, "maxh2cdata");
1465 qp->max_tx_data = tcp_max_transmit_data;
1467 qp->max_tx_data = min(qp->max_tx_data,
1469 qp->max_icd = nvlist_get_number(nvl, "max_icd");
1474 qp->send_success = !nvlist_get_bool(nvl, "sq_flow_control");
1477 qp->num_ttags = MIN((u_int)UINT16_MAX + 1,
1479 ((uint64_t)qp->maxr2t + 1));
1480 qp->open_ttags = mallocarray(qp->num_ttags,
1481 sizeof(*qp->open_ttags), M_NVMF_TCP, M_WAITOK | M_ZERO);
1484 TAILQ_INIT(&qp->rx_buffers.head);
1485 TAILQ_INIT(&qp->tx_buffers.head);
1486 mtx_init(&qp->rx_buffers.lock, "nvmf/tcp rx buffers", NULL, MTX_DEF);
1487 mtx_init(&qp->tx_buffers.lock, "nvmf/tcp tx buffers", NULL, MTX_DEF);
1489 cv_init(&qp->rx_cv, "-");
1490 cv_init(&qp->tx_cv, "-");
1491 mbufq_init(&qp->tx_pdus, 0);
1492 STAILQ_INIT(&qp->tx_capsules);
1496 soupcall_set(so, SO_RCV, nvmf_soupcall_receive, qp);
1499 soupcall_set(so, SO_SND, nvmf_soupcall_send, qp);
1503 error = kthread_add(nvmf_tcp_receive, qp, NULL, &qp->rx_thread, 0, 0,
1506 tcp_free_qpair(&qp->qp);
1509 error = kthread_add(nvmf_tcp_send, qp, NULL, &qp->tx_thread, 0, 0,
1512 tcp_free_qpair(&qp->qp);
1516 return (&qp->qp);
1520 tcp_release_qpair(struct nvmf_tcp_qpair *qp)
1522 if (refcount_release(&qp->refs))
1523 free(qp, M_NVMF_TCP);
1529 struct nvmf_tcp_qpair *qp = TQP(nq);
1532 struct socket *so = qp->so;
1536 qp->tx_shutdown = true;
1537 if (qp->tx_thread != NULL) {
1538 cv_signal(&qp->tx_cv);
1539 mtx_sleep(qp->tx_thread, SOCKBUF_MTX(&so->so_snd), 0,
1546 qp->rx_shutdown = true;
1547 if (qp->rx_thread != NULL) {
1548 cv_signal(&qp->rx_cv);
1549 mtx_sleep(qp->rx_thread, SOCKBUF_MTX(&so->so_rcv), 0,
1555 STAILQ_FOREACH_SAFE(tc, &qp->tx_capsules, link, ntc) {
1559 mbufq_drain(&qp->tx_pdus);
1561 cv_destroy(&qp->tx_cv);
1562 cv_destroy(&qp->rx_cv);
1564 if (qp->open_ttags != NULL) {
1565 for (u_int i = 0; i < qp->num_ttags; i++) {
1566 cb = qp->open_ttags[i];
1573 free(qp->open_ttags, M_NVMF_TCP);
1576 mtx_lock(&qp->rx_buffers.lock);
1577 TAILQ_FOREACH_SAFE(cb, &qp->rx_buffers.head, link, ncb) {
1578 tcp_remove_command_buffer(&qp->rx_buffers, cb);
1579 mtx_unlock(&qp->rx_buffers.lock);
1586 mtx_lock(&qp->rx_buffers.lock);
1588 mtx_destroy(&qp->rx_buffers.lock);
1590 mtx_lock(&qp->tx_buffers.lock);
1591 TAILQ_FOREACH_SAFE(cb, &qp->tx_buffers.head, link, ncb) {
1592 tcp_remove_command_buffer(&qp->tx_buffers, cb);
1593 mtx_unlock(&qp->tx_buffers.lock);
1596 mtx_lock(&qp->tx_buffers.lock);
1598 mtx_destroy(&qp->tx_buffers.lock);
1602 tcp_release_qpair(qp);
1608 struct nvmf_tcp_qpair *qp = TQP(nq);
1615 refcount_acquire(&qp->refs);
1622 struct nvmf_tcp_qpair *qp = TQP(tc->nc.nc_qpair);
1632 tcp_release_qpair(qp);
1646 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair);
1648 struct socket *so = qp->so;
1652 STAILQ_INSERT_TAIL(&qp->tx_capsules, tc, link);
1654 cv_signal(&qp->tx_cv);
1705 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair);
1709 cb = tcp_alloc_command_buffer(qp, io, data_offset, io->io_len,
1721 mtx_lock(&qp->rx_buffers.lock);
1722 if (tc->active_r2ts > qp->maxr2t || qp->active_ttags == qp->num_ttags) {
1726 TAILQ_INSERT_TAIL(&qp->rx_buffers.head, cb, link);
1727 mtx_unlock(&qp->rx_buffers.lock);
1731 nvmf_tcp_allocate_ttag(qp, cb);
1732 mtx_unlock(&qp->rx_buffers.lock);
1734 tcp_send_r2t(qp, nc->nc_sqe.cid, cb->ttag, data_offset, io->io_len);
1773 tcp_send_c2h_pdu(struct nvmf_tcp_qpair *qp, uint16_t cid, uint32_t data_offset,
1789 top = nvmf_tcp_construct_pdu(qp, &c2h, sizeof(c2h), m, len);
1790 nvmf_tcp_write_pdu(qp, top);
1797 struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair);
1803 !qp->qp.nq_controller) {
1830 if (m->m_len > qp->max_tx_data) {
1831 n = m_split(m, qp->max_tx_data, M_WAITOK);
1840 if (todo + n->m_len > qp->max_tx_data) {
1852 tcp_send_c2h_pdu(qp, nc->nc_sqe.cid, data_offset, m, todo,
1853 last_pdu, last_pdu && qp->send_success);
1866 else if (qp->send_success)