Lines Matching defs:pdu
84 struct nvme_tcp_pdu *send_pdu; /* only for error pdu and init pdu */
159 struct nvme_tcp_pdu *pdu;
177 static void nvme_tcp_icresp_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu);
223 memset(tcp_req->pdu, 0, sizeof(struct nvme_tcp_pdu));
308 tcp_req->pdu = &tqpair->send_pdus[i];
344 struct nvme_tcp_pdu *pdu;
364 pdu = TAILQ_FIRST(&tqpair->send_queue);
365 /* Remove the pdu from the send_queue to prevent the wrong sending out
368 TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
460 struct nvme_tcp_pdu *pdu = cb_arg;
461 struct nvme_tcp_qpair *tqpair = pdu->qpair;
464 TAILQ_REMOVE(&tqpair->send_queue, pdu, tailq);
471 assert(pdu->cb_fn != NULL);
472 pdu->cb_fn(pdu->cb_arg);
476 pdu_write_fail(struct nvme_tcp_pdu *pdu, int status)
478 struct nvme_tcp_qpair *tqpair = pdu->qpair;
482 TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
483 pdu_write_done(pdu, status);
487 pdu_seq_fail(struct nvme_tcp_pdu *pdu, int status)
489 struct nvme_tcp_req *treq = pdu->req;
492 nvme_tcp_cond_schedule_qpair_polling(pdu->qpair);
498 _tcp_write_pdu(struct nvme_tcp_pdu *pdu)
501 struct nvme_tcp_qpair *tqpair = pdu->qpair;
503 pdu->sock_req.iovcnt = nvme_tcp_build_iovs(pdu->iov, SPDK_COUNTOF(pdu->iov), pdu,
506 TAILQ_INSERT_TAIL(&tqpair->send_queue, pdu, tailq);
507 if (spdk_unlikely(mapped_length < pdu->data_len)) {
508 SPDK_ERRLOG("could not map the whole %u bytes (mapped only %u bytes)\n", pdu->data_len,
510 pdu_write_done(pdu, -EINVAL);
513 pdu->sock_req.cb_fn = pdu_write_done;
514 pdu->sock_req.cb_arg = pdu;
516 spdk_sock_writev_async(tqpair->sock, &pdu->sock_req);
522 struct nvme_tcp_pdu *pdu = ctx;
523 struct nvme_tcp_req *treq = pdu->req;
531 pdu_seq_fail(pdu, status);
535 _tcp_write_pdu(pdu);
539 tcp_write_pdu(struct nvme_tcp_pdu *pdu)
541 struct nvme_tcp_req *treq = pdu->req;
542 struct nvme_tcp_qpair *tqpair = pdu->qpair;
550 pdu->data_len > 0) {
554 tcp_write_pdu_seq_cb, pdu);
559 _tcp_write_pdu(pdu);
565 struct nvme_tcp_pdu *pdu = cb_arg;
567 pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
568 MAKE_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
572 pdu_accel_compute_crc32(struct nvme_tcp_pdu *pdu)
574 struct nvme_tcp_qpair *tqpair = pdu->qpair;
576 struct nvme_request *req = ((struct nvme_tcp_req *)pdu->req)->req;
581 pdu->dif_ctx != NULL ||
582 pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT != 0)) {
592 &pdu->data_digest_crc32,
593 pdu->data_iov, pdu->data_iovcnt, 0,
594 pdu_accel_seq_compute_crc32_done, pdu);
602 pdu_write_fail(pdu, rc);
606 tcp_write_pdu(pdu);
614 struct nvme_tcp_pdu *pdu = cb_arg;
615 struct nvme_tcp_req *treq = pdu->req;
624 pdu_seq_fail(pdu, status);
628 crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
630 MAKE_DIGEST_WORD(pdu->data_digest, crc32c);
632 _tcp_write_pdu(pdu);
636 pdu_compute_crc32(struct nvme_tcp_pdu *pdu)
638 struct nvme_tcp_qpair *tqpair = pdu->qpair;
644 if (pdu->data_len > 0 && g_nvme_tcp_ddgst[pdu->hdr.common.pdu_type] &&
646 if (pdu_accel_compute_crc32(pdu)) {
650 req = ((struct nvme_tcp_req *)pdu->req)->req;
653 nvme_tcp_accel_finish_sequence(tgroup, pdu->req, req->accel_sequence,
654 pdu_compute_crc32_seq_cb, pdu);
658 crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
660 MAKE_DIGEST_WORD(pdu->data_digest, crc32c);
663 tcp_write_pdu(pdu);
668 struct nvme_tcp_pdu *pdu,
675 hlen = pdu->hdr.common.hlen;
676 pdu->cb_fn = cb_fn;
677 pdu->cb_arg = cb_arg;
678 pdu->qpair = tqpair;
681 if (g_nvme_tcp_hdgst[pdu->hdr.common.pdu_type] && tqpair->flags.host_hdgst_enable) {
682 crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
683 MAKE_DIGEST_WORD((uint8_t *)&pdu->hdr.raw[hlen], crc32c);
686 pdu_compute_crc32(pdu);
904 struct nvme_tcp_pdu *pdu;
910 pdu = tcp_req->pdu;
911 pdu->req = tcp_req;
913 capsule_cmd = &pdu->hdr.capsule_cmd;
932 pdu->padding_len = 0;
936 pdu->padding_len = alignment - plen;
950 nvme_tcp_pdu_set_data_buf(pdu, tcp_req->iov, tcp_req->iovcnt,
954 return nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_qpair_cmd_send_complete, tcp_req);
983 spdk_trace_record(TRACE_NVME_TCP_SUBMIT, qpair->id, 0, (uintptr_t)tcp_req->pdu, req->cb_arg,
1033 spdk_trace_record(TRACE_NVME_TCP_COMPLETE, qpair->id, 0, (uintptr_t)tcp_req->pdu, req->cb_arg,
1071 nvme_tcp_qpair_send_h2c_term_req(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1090 copy_len = pdu->hdr.common.hlen;
1096 memcpy((uint8_t *)rsp_pdu->hdr.raw + h2c_term_req_hdr_len, pdu->hdr.raw, copy_len);
1099 /* Contain the header len of the wrong received pdu */
1122 struct nvme_tcp_pdu *pdu;
1128 pdu = tqpair->recv_pdu;
1130 SPDK_DEBUGLOG(nvme, "pdu type = %d\n", pdu->hdr.common.pdu_type);
1131 if (pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP) {
1133 SPDK_ERRLOG("Already received IC_RESP PDU, and we should reject this pdu=%p\n", pdu);
1138 if (pdu->hdr.common.plen != expected_hlen) {
1148 switch (pdu->hdr.common.pdu_type) {
1151 if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
1155 if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
1161 if (pdu->hdr.common.plen < pdu->hdr.common.pdo) {
1167 if ((pdu->hdr.common.plen <= expected_hlen) ||
1168 (pdu->hdr.common.plen > SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE)) {
1174 if (pdu->hdr.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) {
1178 if (pdu->hdr.common.plen != (expected_hlen + hd_len)) {
1191 if (pdu->hdr.common.hlen != expected_hlen) {
1193 expected_hlen, pdu->hdr.common.hlen);
1208 nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1236 pdu_seq_fail(treq->pdu, status);
1245 struct nvme_tcp_pdu *pdu, uint32_t *reaped)
1252 tcp_req = pdu->req;
1256 c2h_data = &pdu->hdr.c2h_data;
1257 tcp_req->datao += pdu->data_len;
1300 SPDK_ERRLOG("Error info of pdu(%p): %s\n", c2h_term_req,
1312 struct nvme_tcp_pdu *pdu)
1314 nvme_tcp_c2h_term_req_dump(&pdu->hdr.term_req);
1321 struct nvme_tcp_pdu *pdu;
1324 pdu = tqpair->recv_pdu;
1326 switch (pdu->hdr.common.pdu_type) {
1328 nvme_tcp_c2h_data_payload_handle(tqpair, pdu, reaped);
1333 nvme_tcp_c2h_term_req_payload_handle(tqpair, pdu);
1344 nvme_tcp_req_copy_pdu(struct nvme_tcp_req *treq, struct nvme_tcp_pdu *pdu)
1346 treq->pdu->hdr = pdu->hdr;
1347 treq->pdu->req = treq;
1348 memcpy(treq->pdu->data_digest, pdu->data_digest, sizeof(pdu->data_digest));
1349 memcpy(treq->pdu->data_iov, pdu->data_iov, sizeof(pdu->data_iov[0]) * pdu->data_iovcnt);
1350 treq->pdu->data_iovcnt = pdu->data_iovcnt;
1351 treq->pdu->data_len = pdu->data_len;
1359 struct nvme_tcp_pdu *pdu = treq->pdu;
1362 pdu->data_digest_crc32 ^= SPDK_CRC32C_XOR;
1363 result = MATCH_DIGEST_WORD(pdu->data_digest, pdu->data_digest_crc32);
1371 nvme_tcp_accel_recv_compute_crc32(struct nvme_tcp_req *treq, struct nvme_tcp_pdu *pdu)
1378 /* Only support this limited case that the request has only one c2h pdu */
1380 tqpair->qpair.poll_group == NULL || pdu->dif_ctx != NULL ||
1381 pdu->data_len % SPDK_NVME_TCP_DIGEST_ALIGNMENT != 0 ||
1382 pdu->data_len != req->payload_size)) {
1390 nvme_tcp_req_copy_pdu(treq, pdu);
1392 &treq->pdu->data_digest_crc32,
1393 treq->pdu->data_iov, treq->pdu->data_iovcnt, 0,
1406 nvme_tcp_c2h_data_payload_handle(tqpair, treq->pdu, &dummy);
1416 struct nvme_tcp_pdu *pdu = tqpair->recv_pdu;
1418 struct nvme_tcp_req *tcp_req = pdu->req;
1425 tcp_req->expected_datao += pdu->data_len;
1429 if (pdu->ddgst_enable) {
1432 if (nvme_tcp_accel_recv_compute_crc32(tcp_req, pdu)) {
1436 crc32c = nvme_tcp_pdu_calc_data_digest(pdu);
1438 rc = MATCH_DIGEST_WORD(pdu->data_digest, crc32c);
1440 SPDK_ERRLOG("data digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1441 tcp_req = pdu->req;
1467 struct nvme_tcp_pdu *pdu)
1469 struct spdk_nvme_tcp_ic_resp *ic_resp = &pdu->hdr.ic_resp;
1535 nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1539 nvme_tcp_capsule_resp_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu,
1544 struct spdk_nvme_tcp_rsp *capsule_resp = &pdu->hdr.capsule_resp;
1564 /* Recv the pdu again */
1582 nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1587 struct nvme_tcp_pdu *pdu)
1589 struct spdk_nvme_tcp_term_req_hdr *c2h_term_req = &pdu->hdr.term_req;
1594 SPDK_ERRLOG("Fatal Error Status(FES) is unknown for c2h_term_req pdu=%p\n", pdu);
1601 nvme_tcp_pdu_set_data(pdu, (uint8_t *)pdu->hdr.raw + c2h_term_req->common.hlen,
1606 nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1610 nvme_tcp_c2h_data_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1613 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data = &pdu->hdr.c2h_data;
1679 nvme_tcp_pdu_set_data_buf(pdu, tcp_req->iov, tcp_req->iovcnt,
1681 pdu->req = tcp_req;
1687 nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1737 rsp_pdu = tcp_req->pdu;
1788 nvme_tcp_r2t_hdr_handle(struct nvme_tcp_qpair *tqpair, struct nvme_tcp_pdu *pdu)
1791 struct spdk_nvme_tcp_r2t_hdr *r2t = &pdu->hdr.r2t;
1860 nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1867 struct nvme_tcp_pdu *pdu;
1873 pdu = tqpair->recv_pdu;
1875 SPDK_DEBUGLOG(nvme, "enter: pdu type =%u\n", pdu->hdr.common.pdu_type);
1877 if (pdu->has_hdgst) {
1878 crc32c = nvme_tcp_pdu_calc_header_digest(pdu);
1879 rc = MATCH_DIGEST_WORD((uint8_t *)pdu->hdr.raw + pdu->hdr.common.hlen, crc32c);
1881 SPDK_ERRLOG("header digest error on tqpair=(%p) with pdu=%p\n", tqpair, pdu);
1883 nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1889 switch (pdu->hdr.common.pdu_type) {
1891 nvme_tcp_icresp_handle(tqpair, pdu);
1894 nvme_tcp_capsule_resp_hdr_handle(tqpair, pdu, reaped);
1897 nvme_tcp_c2h_data_hdr_handle(tqpair, pdu);
1901 nvme_tcp_c2h_term_req_hdr_handle(tqpair, pdu);
1904 nvme_tcp_r2t_hdr_handle(tqpair, pdu);
1911 nvme_tcp_qpair_send_h2c_term_req(tqpair, pdu, fes, error_offset);
1921 struct nvme_tcp_pdu *pdu;
1935 pdu = tqpair->recv_pdu;
1939 memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
1942 /* Wait for the pdu common header */
1944 assert(pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr));
1946 sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - pdu->ch_valid_bytes,
1947 (uint8_t *)&pdu->hdr.common + pdu->ch_valid_bytes);
1952 pdu->ch_valid_bytes += rc;
1953 if (pdu->ch_valid_bytes < sizeof(struct spdk_nvme_tcp_common_pdu_hdr)) {
1960 /* Wait for the pdu specific header */
1962 assert(pdu->psh_valid_bytes < pdu->psh_len);
1964 pdu->psh_len - pdu->psh_valid_bytes,
1965 (uint8_t *)&pdu->hdr.raw + sizeof(struct spdk_nvme_tcp_common_pdu_hdr) + pdu->psh_valid_bytes);
1971 pdu->psh_valid_bytes += rc;
1972 if (pdu->psh_valid_bytes < pdu->psh_len) {
1981 if (!pdu->data_len) {
1985 data_len = pdu->data_len;
1987 if (spdk_unlikely((pdu->hdr.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_DATA) &&
1990 pdu->ddgst_enable = true;
1993 rc = nvme_tcp_read_payload_data(tqpair->sock, pdu);
1999 pdu->rw_offset += rc;
2000 if (pdu->rw_offset < data_len) {
2004 assert(pdu->rw_offset == data_len);
2018 memset(pdu, 0, sizeof(struct nvme_tcp_pdu));
2185 struct nvme_tcp_pdu *pdu;
2188 pdu = tqpair->send_pdu;
2190 ic_req = &pdu->hdr.ic_req;
2201 nvme_tcp_qpair_write_pdu(tqpair, pdu, nvme_tcp_send_icreq_complete, tqpair);