Lines Matching defs:mlx5_task

245 static inline void accel_mlx5_task_complete(struct accel_mlx5_task *mlx5_task);
504 accel_mlx5_configure_crypto_umr(struct accel_mlx5_task *mlx5_task, struct accel_mlx5_sge *sge,
510 struct accel_mlx5_qp *qp = mlx5_task->qp;
512 struct spdk_accel_task *task = &mlx5_task->base;
519 rc = accel_mlx5_fill_block_sge(dev, sge->src_sge, &mlx5_task->src, length, &remaining,
552 accel_mlx5_iov_sgl_unwind(&mlx5_task->src, task->s.iovcnt, dt);
563 cattr.xts_iv = task->iv + mlx5_task->num_processed_blocks;
567 cattr.enc_order = mlx5_task->enc_order;
568 cattr.bs_selector = bs_to_bs_selector(mlx5_task->base.block_size);
570 SPDK_ERRLOG("unsupported block size %u\n", mlx5_task->base.block_size);
576 if (!mlx5_task->inplace) {
578 rc = accel_mlx5_fill_block_sge(dev, sge->dst_sge, &mlx5_task->dst, length, &remaining,
611 accel_mlx5_iov_sgl_unwind(&mlx5_task->dst, task->d.iovcnt, dt);
622 accel_mlx5_iov_sgl_unwind(&mlx5_task->src, task->s.iovcnt, dt);
636 mlx5_task, task->block_size, cattr.xts_iv, mlx5_task->enc_order, cattr.tweak_mode, length, mkey,
641 assert((uint32_t)mlx5_task->num_processed_blocks + num_blocks <= UINT16_MAX);
642 mlx5_task->num_processed_blocks += num_blocks;
650 accel_mlx5_crypto_task_process(struct accel_mlx5_task *mlx5_task)
654 struct accel_mlx5_qp *qp = mlx5_task->qp;
659 uint16_t num_ops = spdk_min(mlx5_task->num_reqs - mlx5_task->num_completed_reqs,
660 mlx5_task->num_ops);
671 rc = spdk_mlx5_crypto_get_dek_data(mlx5_task->base.crypto_key->priv, dev->dev_ctx->pd, &dek_data);
676 mlx5_task->num_wrs = 0;
678 mlx5_task, mlx5_task->num_reqs, mlx5_task->num_submitted_reqs, mlx5_task->num_completed_reqs);
680 if (mlx5_task->num_submitted_reqs + i + 1 == mlx5_task->num_reqs) {
682 assert(mlx5_task->num_blocks > mlx5_task->num_submitted_reqs);
683 num_blocks = mlx5_task->num_blocks - mlx5_task->num_processed_blocks;
685 num_blocks = mlx5_task->blocks_per_req;
688 rc = accel_mlx5_configure_crypto_umr(mlx5_task, &sges[i], mlx5_task->mkeys[i]->mkey, num_blocks,
694 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
701 if (mlx5_task->inplace) {
703 mlx5_task->mkeys[i]->mkey, 0, first_rdma_fence);
706 mlx5_task->mkeys[i]->mkey, 0, first_rdma_fence);
714 assert(mlx5_task->num_submitted_reqs < mlx5_task->num_reqs);
715 assert(mlx5_task->num_submitted_reqs < UINT16_MAX);
716 mlx5_task->num_submitted_reqs++;
717 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
721 if (mlx5_task->inplace) {
723 mlx5_task->mkeys[i]->mkey, (uint64_t)mlx5_task, first_rdma_fence | SPDK_MLX5_WQE_CTRL_CE_CQ_UPDATE);
726 mlx5_task->mkeys[i]->mkey, (uint64_t)mlx5_task, first_rdma_fence | SPDK_MLX5_WQE_CTRL_CE_CQ_UPDATE);
733 assert(mlx5_task->num_submitted_reqs < mlx5_task->num_reqs);
734 assert(mlx5_task->num_submitted_reqs < UINT16_MAX);
735 mlx5_task->num_submitted_reqs++;
736 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
738 STAILQ_INSERT_TAIL(&qp->in_hw, mlx5_task, link);
740 if (spdk_unlikely(mlx5_task->num_submitted_reqs == mlx5_task->num_reqs &&
741 mlx5_task->num_blocks > mlx5_task->num_processed_blocks)) {
747 SPDK_DEBUGLOG(accel_mlx5, "task %p, processed %u/%u blocks, add extra req\n", mlx5_task,
748 mlx5_task->num_processed_blocks, mlx5_task->num_blocks);
749 mlx5_task->num_reqs++;
752 SPDK_DEBUGLOG(accel_mlx5, "end, task, %p, reqs: total %u, submitted %u, completed %u\n", mlx5_task,
753 mlx5_task->num_reqs, mlx5_task->num_submitted_reqs, mlx5_task->num_completed_reqs);
787 accel_mlx5_crypto_task_init(struct accel_mlx5_task *mlx5_task)
789 struct spdk_accel_task *task = &mlx5_task->base;
790 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
800 if (spdk_unlikely((task->nbytes % mlx5_task->base.block_size != 0) || !crypto_key_ok)) {
803 mlx5_task->base.block_size);
810 assert(src_nbytes / mlx5_task->base.block_size <= UINT16_MAX);
811 mlx5_task->num_blocks = src_nbytes / mlx5_task->base.block_size;
812 accel_mlx5_iov_sgl_init(&mlx5_task->src, task->s.iovs, task->s.iovcnt);
815 mlx5_task->inplace = 1;
827 mlx5_task->inplace = 0;
828 accel_mlx5_iov_sgl_init(&mlx5_task->dst, task->d.iovs, task->d.iovcnt);
833 assert(SPDK_CEIL_DIV(mlx5_task->num_blocks, dev->crypto_split_blocks) <= UINT16_MAX);
834 mlx5_task->num_reqs = SPDK_CEIL_DIV(mlx5_task->num_blocks, dev->crypto_split_blocks);
836 mlx5_task->blocks_per_req = spdk_min(mlx5_task->num_blocks, dev->crypto_split_blocks);
842 mlx5_task->num_reqs = SPDK_CEIL_DIV(max_sge_count, ACCEL_MLX5_MAX_SGE);
843 mlx5_task->blocks_per_req = SPDK_CEIL_DIV(mlx5_task->num_blocks, mlx5_task->num_reqs);
845 mlx5_task->num_reqs = 1;
846 mlx5_task->blocks_per_req = mlx5_task->num_blocks;
850 mlx5_task->num_reqs = mlx5_task->num_blocks;
851 mlx5_task->blocks_per_req = 1;
854 if (spdk_unlikely(!accel_mlx5_task_alloc_mkeys(mlx5_task, dev->crypto_mkeys))) {
863 mlx5_task->qp);
870 mlx5_task->num_reqs, mlx5_task->blocks_per_req, mlx5_task->num_blocks, mlx5_task->inplace);
876 accel_mlx5_copy_task_complete(struct accel_mlx5_task *mlx5_task)
878 spdk_accel_task_complete(&mlx5_task->base, 0);
882 accel_mlx5_copy_task_process_one(struct accel_mlx5_task *mlx5_task, struct accel_mlx5_qp *qp,
885 struct spdk_accel_task *task = &mlx5_task->base;
893 assert(mlx5_task->dst.iov->iov_len > mlx5_task->dst.iov_offset);
894 dst_len = mlx5_task->dst.iov->iov_len - mlx5_task->dst.iov_offset;
895 rc = accel_mlx5_fill_block_sge(qp->dev, sge.src_sge, &mlx5_task->src, dst_len, &remaining,
908 rc = accel_mlx5_fill_block_sge(qp->dev, sge.dst_sge, &mlx5_task->dst, dst_len, &remaining,
924 rc = spdk_mlx5_qp_rdma_write(mlx5_task->qp->qp, sge.src_sge, sge.src_sge_count,
936 accel_mlx5_copy_task_process(struct accel_mlx5_task *mlx5_task)
939 struct accel_mlx5_qp *qp = mlx5_task->qp;
944 mlx5_task->num_wrs = 0;
945 assert(mlx5_task->num_reqs > 0);
946 assert(mlx5_task->num_ops > 0);
949 for (i = 0; i < mlx5_task->num_ops - 1; i++) {
950 rc = accel_mlx5_copy_task_process_one(mlx5_task, qp, 0, 0);
954 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
955 mlx5_task->num_submitted_reqs++;
958 rc = accel_mlx5_copy_task_process_one(mlx5_task, qp, (uint64_t)mlx5_task,
963 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
964 mlx5_task->num_submitted_reqs++;
965 STAILQ_INSERT_TAIL(&qp->in_hw, mlx5_task, link);
967 SPDK_DEBUGLOG(accel_mlx5, "end, copy task, %p\n", mlx5_task);
1035 accel_mlx5_copy_task_init(struct accel_mlx5_task *mlx5_task)
1037 struct spdk_accel_task *task = &mlx5_task->base;
1038 struct accel_mlx5_qp *qp = mlx5_task->qp;
1042 mlx5_task->num_reqs = task->d.iovcnt;
1044 mlx5_task->num_reqs = SPDK_CEIL_DIV(task->s.iovcnt, ACCEL_MLX5_MAX_SGE);
1046 mlx5_task->num_reqs = accel_mlx5_get_copy_task_count(task->s.iovs, task->s.iovcnt,
1049 mlx5_task->inplace = 0;
1050 accel_mlx5_iov_sgl_init(&mlx5_task->src, task->s.iovs, task->s.iovcnt);
1051 accel_mlx5_iov_sgl_init(&mlx5_task->dst, task->d.iovs, task->d.iovcnt);
1052 mlx5_task->num_ops = spdk_min(qp_slot, mlx5_task->num_reqs);
1053 if (spdk_unlikely(!mlx5_task->num_ops)) {
1057 SPDK_DEBUGLOG(accel_mlx5, "copy task num_reqs %u, num_ops %u\n", mlx5_task->num_reqs,
1058 mlx5_task->num_ops);
1091 accel_mlx5_crc_task_complete(struct accel_mlx5_task *mlx5_task)
1093 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
1095 *mlx5_task->base.crc_dst = mlx5_task->psv->crc ^ UINT32_MAX;
1097 assert(mlx5_task->num_ops);
1098 spdk_mlx5_mkey_pool_put_bulk(dev->sig_mkeys, mlx5_task->mkeys, mlx5_task->num_ops);
1099 spdk_mempool_put(dev->dev_ctx->psv_pool, mlx5_task->psv);
1100 spdk_accel_task_complete(&mlx5_task->base, 0);
1104 accel_mlx5_crc_task_configure_umr(struct accel_mlx5_task *mlx5_task, struct ibv_sge *sge,
1110 .seed = mlx5_task->base.seed ^ UINT32_MAX,
1111 .psv_index = mlx5_task->psv->psv_index,
1125 return spdk_mlx5_umr_configure_sig(mlx5_task->qp->qp, &umr_attr, &sattr, 0, 0);
1129 accel_mlx5_crc_task_fill_sge(struct accel_mlx5_task *mlx5_task, struct accel_mlx5_sge *sge)
1131 struct spdk_accel_task *task = &mlx5_task->base;
1132 struct accel_mlx5_qp *qp = mlx5_task->qp;
1137 rc = accel_mlx5_fill_block_sge(dev, sge->src_sge, &mlx5_task->src, task->nbytes, &remaining,
1149 if (!mlx5_task->inplace) {
1150 rc = accel_mlx5_fill_block_sge(dev, sge->dst_sge, &mlx5_task->dst, task->nbytes, &remaining,
1167 accel_mlx5_crc_task_process_one_req(struct accel_mlx5_task *mlx5_task)
1170 struct accel_mlx5_qp *qp = mlx5_task->qp;
1172 uint32_t num_ops = spdk_min(mlx5_task->num_reqs - mlx5_task->num_completed_reqs,
1173 mlx5_task->num_ops);
1185 mlx5_task->num_wrs = 0;
1187 rc = accel_mlx5_crc_task_fill_sge(mlx5_task, &sges);
1191 rc = accel_mlx5_crc_task_configure_umr(mlx5_task, sges.src_sge, sges.src_sge_count,
1192 mlx5_task->mkeys[0], SPDK_MLX5_UMR_SIG_DOMAIN_WIRE, mlx5_task->base.nbytes, true, true);
1197 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1200 if (mlx5_task->inplace) {
1213 sge[sge_count].lkey = mlx5_task->psv->crc_lkey;
1214 sge[sge_count].addr = (uintptr_t)&mlx5_task->psv->crc;
1217 if (spdk_unlikely(mlx5_task->psv->bits.error)) {
1218 rc = spdk_mlx5_qp_set_psv(qp->qp, mlx5_task->psv->psv_index, *mlx5_task->base.crc_dst, 0, 0);
1223 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1226 rc = spdk_mlx5_qp_rdma_read(qp->qp, sge, sge_count, 0, mlx5_task->mkeys[0]->mkey,
1227 (uint64_t)mlx5_task, rdma_fence | SPDK_MLX5_WQE_CTRL_CE_CQ_UPDATE);
1232 mlx5_task->num_submitted_reqs++;
1233 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
1318 accel_mlx5_crc_task_process_multi_req(struct accel_mlx5_task *mlx5_task)
1322 struct spdk_accel_task *task = &mlx5_task->base;
1323 struct accel_mlx5_qp *qp = mlx5_task->qp;
1334 uint16_t num_ops = spdk_min(mlx5_task->num_reqs - mlx5_task->num_completed_reqs,
1335 mlx5_task->num_ops);
1344 sig_init = !mlx5_task->num_submitted_reqs;
1356 if (mlx5_task->inplace) {
1357 accel_mlx5_iov_sgl_init(&umr_sgl, mlx5_task->src.iov, mlx5_task->src.iovcnt);
1359 accel_mlx5_iov_sgl_init(&rdma_sgl, mlx5_task->src.iov, mlx5_task->src.iovcnt);
1361 umr_sgl_ptr = &mlx5_task->src;
1362 accel_mlx5_iov_sgl_init(&rdma_sgl, mlx5_task->dst.iov, mlx5_task->dst.iovcnt);
1364 mlx5_task->num_wrs = 0;
1371 assert((mlx5_task->num_completed_reqs + i + 1) == mlx5_task->num_reqs);
1387 mlx5_task->last_umr_len = umr_len[i];
1388 mlx5_task->last_mkey_idx = i;
1391 rc = accel_mlx5_crc_task_configure_umr(mlx5_task, sges, sge_count, mlx5_task->mkeys[i],
1399 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1403 if (spdk_unlikely(mlx5_task->psv->bits.error)) {
1404 rc = spdk_mlx5_qp_set_psv(qp->qp, mlx5_task->psv->psv_index, *mlx5_task->base.crc_dst, 0, 0);
1409 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1413 if (mlx5_task->inplace) {
1414 sge_count = accel_mlx5_fill_block_sge(dev, sges, &mlx5_task->src, umr_len[i], &remaining,
1417 sge_count = accel_mlx5_fill_block_sge(dev, sges, &mlx5_task->dst, umr_len[i], &remaining,
1425 rc = spdk_mlx5_qp_rdma_read(qp->qp, sges, sge_count, 0, mlx5_task->mkeys[i]->mkey,
1431 mlx5_task->num_submitted_reqs++;
1432 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1436 if ((mlx5_task->inplace && mlx5_task->src.iovcnt == 0) || (!mlx5_task->inplace &&
1437 mlx5_task->dst.iovcnt == 0)) {
1443 umr_offset = mlx5_task->last_umr_len;
1446 mlx5_task->last_mkey_idx = i;
1447 if (mlx5_task->inplace) {
1448 sge_count = accel_mlx5_fill_block_sge(dev, sges, &mlx5_task->src, umr_len[i], &remaining,
1451 sge_count = accel_mlx5_fill_block_sge(dev, sges, &mlx5_task->dst, umr_len[i], &remaining,
1461 if ((mlx5_task->num_completed_reqs + i + 1) == mlx5_task->num_reqs) {
1465 sges[sge_count].lkey = mlx5_task->psv->crc_lkey;
1466 sges[sge_count].addr = (uintptr_t)&mlx5_task->psv->crc;
1471 mlx5_task->mkeys[mlx5_task->last_mkey_idx]->mkey,
1472 (uint64_t)mlx5_task, rdma_fence);
1477 mlx5_task->num_submitted_reqs++;
1478 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
1485 accel_mlx5_crc_task_process(struct accel_mlx5_task *mlx5_task)
1489 assert(mlx5_task->mlx5_opcode == ACCEL_MLX5_OPC_CRC32C);
1492 mlx5_task, mlx5_task->num_reqs, mlx5_task->num_submitted_reqs, mlx5_task->num_completed_reqs);
1494 if (mlx5_task->num_reqs == 1) {
1495 rc = accel_mlx5_crc_task_process_one_req(mlx5_task);
1497 rc = accel_mlx5_crc_task_process_multi_req(mlx5_task);
1501 STAILQ_INSERT_TAIL(&mlx5_task->qp->in_hw, mlx5_task, link);
1503 mlx5_task, mlx5_task->num_reqs, mlx5_task->num_submitted_reqs,
1504 mlx5_task->num_completed_reqs);
1668 accel_mlx5_crc_task_init(struct accel_mlx5_task *mlx5_task)
1670 struct spdk_accel_task *task = &mlx5_task->base;
1671 struct accel_mlx5_qp *qp = mlx5_task->qp;
1675 accel_mlx5_iov_sgl_init(&mlx5_task->src, task->s.iovs, task->s.iovcnt);
1676 if (mlx5_task->inplace) {
1678 mlx5_task->num_reqs = SPDK_CEIL_DIV(mlx5_task->src.iovcnt + 1, ACCEL_MLX5_MAX_SGE);
1680 accel_mlx5_iov_sgl_init(&mlx5_task->dst, task->d.iovs, task->d.iovcnt);
1681 mlx5_task->num_reqs = accel_mlx5_get_crc_task_count(mlx5_task->src.iov, mlx5_task->src.iovcnt,
1682 mlx5_task->dst.iov, mlx5_task->dst.iovcnt);
1685 rc = accel_mlx5_task_alloc_crc_ctx(mlx5_task, qp_slot);
1693 mlx5_task->qp);
1701 accel_mlx5_crypto_mkey_task_init(struct accel_mlx5_task *mlx5_task)
1703 struct spdk_accel_task *task = &mlx5_task->base;
1704 struct accel_mlx5_qp *qp = mlx5_task->qp;
1731 if (spdk_unlikely(task->nbytes % mlx5_task->base.block_size != 0)) {
1733 mlx5_task->base.block_size);
1737 num_blocks = task->nbytes / mlx5_task->base.block_size;
1750 accel_mlx5_iov_sgl_init(&mlx5_task->src, task->s.iovs, task->s.iovcnt);
1751 mlx5_task->num_blocks = num_blocks;
1752 mlx5_task->num_processed_blocks = 0;
1753 mlx5_task->num_reqs = 1;
1754 mlx5_task->blocks_per_req = num_blocks;
1757 mlx5_task->num_ops = 0;
1761 rc = spdk_mlx5_mkey_pool_get_bulk(dev->crypto_mkeys, mlx5_task->mkeys, 1);
1763 mlx5_task->num_ops = 0;
1767 mlx5_task->num_ops = 1;
1769 SPDK_DEBUGLOG(accel_mlx5, "crypto_mkey task num_blocks %u, src_len %zu\n", mlx5_task->num_reqs,
1776 accel_mlx5_crypto_mkey_task_process(struct accel_mlx5_task *mlx5_task)
1779 struct spdk_accel_task *task = &mlx5_task->base;
1780 struct accel_mlx5_qp *qp = mlx5_task->qp;
1785 if (spdk_unlikely(!mlx5_task->num_ops)) {
1788 SPDK_DEBUGLOG(accel_mlx5, "begin, task %p, dst_domain_ctx %p\n", mlx5_task, task->dst_domain_ctx);
1790 mlx5_task->num_wrs = 0;
1796 rc = accel_mlx5_configure_crypto_umr(mlx5_task, &sge, mlx5_task->mkeys[0]->mkey,
1797 mlx5_task->num_blocks, &dek_data, (uint64_t)mlx5_task, SPDK_MLX5_WQE_CTRL_CE_CQ_UPDATE);
1803 mlx5_task->num_submitted_reqs++;
1804 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
1805 STAILQ_INSERT_TAIL(&qp->in_hw, mlx5_task, link);
1807 SPDK_DEBUGLOG(accel_mlx5, "end, task %p, dst_domain_ctx %p\n", mlx5_task, task->dst_domain_ctx);
1838 accel_mlx5_crypto_mkey_task_complete(struct accel_mlx5_task *mlx5_task)
1840 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
1842 assert(mlx5_task->num_ops);
1843 assert(mlx5_task->num_processed_blocks == mlx5_task->num_blocks);
1844 assert(mlx5_task->base.seq);
1846 spdk_mlx5_mkey_pool_put_bulk(dev->crypto_mkeys, mlx5_task->mkeys, 1);
1847 spdk_accel_task_complete(&mlx5_task->base, 0);
1851 accel_mlx5_mkey_task_init(struct accel_mlx5_task *mlx5_task)
1853 struct spdk_accel_task *task = &mlx5_task->base;
1854 struct accel_mlx5_qp *qp = mlx5_task->qp;
1873 accel_mlx5_iov_sgl_init(&mlx5_task->src, task->s.iovs, task->s.iovcnt);
1874 mlx5_task->num_reqs = 1;
1877 mlx5_task->num_ops = 0;
1881 rc = spdk_mlx5_mkey_pool_get_bulk(dev->mkeys, mlx5_task->mkeys, 1);
1883 mlx5_task->num_ops = 0;
1887 mlx5_task->num_ops = 1;
1889 SPDK_DEBUGLOG(accel_mlx5, "crypto_mkey task num_blocks %u, src_len %zu\n", mlx5_task->num_reqs,
1896 accel_mlx5_mkey_task_process(struct accel_mlx5_task *mlx5_task)
1900 struct spdk_accel_task *task = &mlx5_task->base;
1901 struct accel_mlx5_qp *qp = mlx5_task->qp;
1906 if (spdk_unlikely(!mlx5_task->num_ops)) {
1909 SPDK_DEBUGLOG(accel_mlx5, "begin, task %p, dst_domain_ctx %p\n", mlx5_task, task->dst_domain_ctx);
1911 mlx5_task->num_wrs = 0;
1913 rc = accel_mlx5_fill_block_sge(dev, src_sge, &mlx5_task->src, task->nbytes, &remaining,
1920 umr_attr.mkey = mlx5_task->mkeys[0]->mkey;
1925 rc = spdk_mlx5_umr_configure(qp->qp, &umr_attr, (uint64_t)mlx5_task,
1932 mlx5_task->num_submitted_reqs++;
1933 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
1934 STAILQ_INSERT_TAIL(&qp->in_hw, mlx5_task, link);
1936 SPDK_DEBUGLOG(accel_mlx5, "end, task %p, dst_domain_ctx %p\n", mlx5_task, task->dst_domain_ctx);
1967 accel_mlx5_mkey_task_complete(struct accel_mlx5_task *mlx5_task)
1969 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
1971 assert(mlx5_task->num_ops);
1972 assert(mlx5_task->base.seq);
1974 spdk_mlx5_mkey_pool_put_bulk(dev->mkeys, mlx5_task->mkeys, 1);
1975 spdk_accel_task_complete(&mlx5_task->base, 0);
1979 accel_mlx5_task_op_not_implemented(struct accel_mlx5_task *mlx5_task)
1986 accel_mlx5_task_op_not_implemented_v(struct accel_mlx5_task *mlx5_task)
1993 accel_mlx5_task_op_not_supported(struct accel_mlx5_task *mlx5_task)
1995 SPDK_ERRLOG("Unsupported opcode %d\n", mlx5_task->base.op_code);
2130 accel_mlx5_task_init_opcode(struct accel_mlx5_task *mlx5_task)
2132 uint8_t base_opcode = mlx5_task->base.op_code;
2136 mlx5_task->mlx5_opcode = ACCEL_MLX5_OPC_COPY;
2140 mlx5_task->enc_order = SPDK_MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_WIRE;
2141 mlx5_task->mlx5_opcode = ACCEL_MLX5_OPC_CRYPTO;
2145 mlx5_task->enc_order = SPDK_MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_MEMORY;
2146 mlx5_task->mlx5_opcode = ACCEL_MLX5_OPC_CRYPTO;
2149 mlx5_task->inplace = 1;
2150 mlx5_task->mlx5_opcode = ACCEL_MLX5_OPC_CRC32C;
2153 mlx5_task->inplace = 0;
2154 mlx5_task->mlx5_opcode = ACCEL_MLX5_OPC_CRC32C;
2158 mlx5_task->mlx5_opcode = ACCEL_MLX5_OPC_LAST;
2184 struct accel_mlx5_task *mlx5_task = SPDK_CONTAINEROF(task, struct accel_mlx5_task, base);
2185 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
2191 dev->stats.opcodes[mlx5_task->mlx5_opcode]++;
2192 rc = g_accel_mlx5_tasks_ops[mlx5_task->mlx5_opcode].init(mlx5_task);
2195 SPDK_DEBUGLOG(accel_mlx5, "no reqs to handle new task %p (required %u), put to queue\n", mlx5_task,
2196 mlx5_task->num_reqs);
2197 STAILQ_INSERT_TAIL(&dev->nomem, mlx5_task, link);
2204 if (spdk_unlikely(mlx5_task->qp->recovering)) {
2205 STAILQ_INSERT_TAIL(&dev->nomem, mlx5_task, link);
2216 return g_accel_mlx5_tasks_ops[mlx5_task->mlx5_opcode].process(mlx5_task);
2220 accel_mlx5_task_assign_qp(struct accel_mlx5_task *mlx5_task, struct accel_mlx5_io_channel *accel_ch)
2230 mlx5_task->qp = &dev->qp;
2234 accel_mlx5_task_reset(struct accel_mlx5_task *mlx5_task)
2236 mlx5_task->num_completed_reqs = 0;
2237 mlx5_task->num_submitted_reqs = 0;
2238 mlx5_task->num_ops = 0;
2239 mlx5_task->num_processed_blocks = 0;
2240 mlx5_task->raw = 0;
2246 struct accel_mlx5_task *mlx5_task = SPDK_CONTAINEROF(task, struct accel_mlx5_task, base);
2249 accel_mlx5_task_assign_qp(mlx5_task, accel_ch);
2250 accel_mlx5_task_reset(mlx5_task);
2251 accel_mlx5_task_init_opcode(mlx5_task);
3586 struct accel_mlx5_task *mlx5_task;
3595 mlx5_task = SPDK_CONTAINEROF(task, struct accel_mlx5_task, base);
3596 mlx5_task->driver_seq = 1;