Lines Matching defs:qp

35 #define ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, task)	\
37 assert((qp)->wrs_submitted < (qp)->wrs_max); \
38 (qp)->wrs_submitted++; \
39 (qp)->ring_db = true; \
44 #define ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, task) \
48 assert((qp)->wrs_submitted < (qp)->wrs_max); \
49 (qp)->wrs_submitted++; \
50 (qp)->ring_db = true; \
141 struct accel_mlx5_qp *qp;
147 uint16_t num_wrs; /* Number of outstanding operations which consume qp slot */
178 struct spdk_mlx5_qp *qp;
192 struct accel_mlx5_qp qp;
322 struct accel_mlx5_dev *dev = task->qp->dev;
332 struct accel_mlx5_dev *dev = task->qp->dev;
361 struct spdk_io_channel *ch = task->qp->dev->ch;
384 local_ctx.rdma.ibv_qp = dev->qp.verbs_qp;
451 accel_mlx5_dev_get_available_slots(struct accel_mlx5_dev *dev, struct accel_mlx5_qp *qp)
453 assert(qp->wrs_max >= qp->wrs_submitted);
461 return qp->wrs_max - qp->wrs_submitted;
510 struct accel_mlx5_qp *qp = mlx5_task->qp;
511 struct accel_mlx5_dev *dev = qp->dev;
644 rc = spdk_mlx5_umr_configure_crypto(qp->qp, &umr_attr, &cattr, wr_id, flags);
654 struct accel_mlx5_qp *qp = mlx5_task->qp;
655 struct accel_mlx5_dev *dev = qp->dev;
661 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
694 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
702 rc = spdk_mlx5_qp_rdma_read(qp->qp, sges[i].src_sge, sges[i].src_sge_count, 0,
705 rc = spdk_mlx5_qp_rdma_read(qp->qp, sges[i].dst_sge, sges[i].dst_sge_count, 0,
717 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
722 rc = spdk_mlx5_qp_rdma_read(qp->qp, sges[i].src_sge, sges[i].src_sge_count, 0,
725 rc = spdk_mlx5_qp_rdma_read(qp->qp, sges[i].dst_sge, sges[i].dst_sge_count, 0,
736 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
738 STAILQ_INSERT_TAIL(&qp->in_hw, mlx5_task, link);
761 struct accel_mlx5_qp *qp = task->qp;
762 struct accel_mlx5_dev *dev = qp->dev;
763 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
779 task->qp->dev->stats.nomem_qdepth++;
790 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
860 if (spdk_unlikely(accel_mlx5_dev_get_available_slots(dev, &dev->qp) < 2)) {
862 SPDK_DEBUGLOG(accel_mlx5, "dev %s qp %p is full\n", dev->dev_ctx->context->device->name,
863 mlx5_task->qp);
882 accel_mlx5_copy_task_process_one(struct accel_mlx5_task *mlx5_task, struct accel_mlx5_qp *qp,
895 rc = accel_mlx5_fill_block_sge(qp->dev, sge.src_sge, &mlx5_task->src, dst_len, &remaining,
908 rc = accel_mlx5_fill_block_sge(qp->dev, sge.dst_sge, &mlx5_task->dst, dst_len, &remaining,
924 rc = spdk_mlx5_qp_rdma_write(mlx5_task->qp->qp, sge.src_sge, sge.src_sge_count,
930 qp->dev->stats.rdma_writes++;
939 struct accel_mlx5_qp *qp = mlx5_task->qp;
940 struct accel_mlx5_dev *dev = qp->dev;
950 rc = accel_mlx5_copy_task_process_one(mlx5_task, qp, 0, 0);
954 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
958 rc = accel_mlx5_copy_task_process_one(mlx5_task, qp, (uint64_t)mlx5_task,
963 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
965 STAILQ_INSERT_TAIL(&qp->in_hw, mlx5_task, link);
975 struct accel_mlx5_qp *qp = task->qp;
976 struct accel_mlx5_dev *dev = qp->dev;
977 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
1038 struct accel_mlx5_qp *qp = mlx5_task->qp;
1039 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(qp->dev, qp);
1054 qp->dev->stats.nomem_qdepth++;
1093 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
1125 return spdk_mlx5_umr_configure_sig(mlx5_task->qp->qp, &umr_attr, &sattr, 0, 0);
1132 struct accel_mlx5_qp *qp = mlx5_task->qp;
1133 struct accel_mlx5_dev *dev = qp->dev;
1170 struct accel_mlx5_qp *qp = mlx5_task->qp;
1171 struct accel_mlx5_dev *dev = qp->dev;
1174 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
1186 /* At this moment we have as many requests as can be submitted to a qp */
1197 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1218 rc = spdk_mlx5_qp_set_psv(qp->qp, mlx5_task->psv->psv_index, *mlx5_task->base.crc_dst, 0, 0);
1223 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1226 rc = spdk_mlx5_qp_rdma_read(qp->qp, sge, sge_count, 0, mlx5_task->mkeys[0]->mkey,
1233 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
1240 accel_mlx5_crc_task_fill_umr_sge(struct accel_mlx5_qp *qp, struct ibv_sge *sge,
1301 rc = accel_mlx5_translate_addr(sge_addr, sge_len, domain, domain_ctx, qp->dev, &sge[umr_idx]);
1323 struct accel_mlx5_qp *qp = mlx5_task->qp;
1324 struct accel_mlx5_dev *dev = qp->dev;
1336 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
1374 sge_count = accel_mlx5_crc_task_fill_umr_sge(qp, sges, umr_sgl_ptr, task->src_domain,
1399 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1404 rc = spdk_mlx5_qp_set_psv(qp->qp, mlx5_task->psv->psv_index, *mlx5_task->base.crc_dst, 0, 0);
1409 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1425 rc = spdk_mlx5_qp_rdma_read(qp->qp, sges, sge_count, 0, mlx5_task->mkeys[i]->mkey,
1432 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED(qp, mlx5_task);
1470 rc = spdk_mlx5_qp_rdma_read(qp->qp, sges, sge_count, umr_offset,
1478 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
1501 STAILQ_INSERT_TAIL(&mlx5_task->qp->in_hw, mlx5_task, link);
1513 struct accel_mlx5_qp *qp = task->qp;
1514 struct accel_mlx5_dev *dev = qp->dev;
1549 struct accel_mlx5_qp *qp = task->qp;
1550 struct accel_mlx5_dev *dev = qp->dev;
1551 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
1671 struct accel_mlx5_qp *qp = mlx5_task->qp;
1672 uint32_t qp_slot = accel_mlx5_dev_get_available_slots(qp->dev, qp);
1692 SPDK_DEBUGLOG(accel_mlx5, "dev %s qp %p is full\n", qp->dev->dev_ctx->context->device->name,
1693 mlx5_task->qp);
1694 qp->dev->stats.nomem_qdepth++;
1704 struct accel_mlx5_qp *qp = mlx5_task->qp;
1705 struct accel_mlx5_dev *dev = qp->dev;
1708 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
1780 struct accel_mlx5_qp *qp = mlx5_task->qp;
1781 struct accel_mlx5_dev *dev = qp->dev;
1804 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
1805 STAILQ_INSERT_TAIL(&qp->in_hw, mlx5_task, link);
1815 struct accel_mlx5_qp *qp = task->qp;
1816 struct accel_mlx5_dev *dev = qp->dev;
1818 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
1840 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
1854 struct accel_mlx5_qp *qp = mlx5_task->qp;
1855 struct accel_mlx5_dev *dev = qp->dev;
1857 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
1901 struct accel_mlx5_qp *qp = mlx5_task->qp;
1902 struct accel_mlx5_dev *dev = qp->dev;
1925 rc = spdk_mlx5_umr_configure(qp->qp, &umr_attr, (uint64_t)mlx5_task,
1933 ACCEL_MLX5_UPDATE_ON_WR_SUBMITTED_SIGNALED(dev, qp, mlx5_task);
1934 STAILQ_INSERT_TAIL(&qp->in_hw, mlx5_task, link);
1944 struct accel_mlx5_qp *qp = task->qp;
1945 struct accel_mlx5_dev *dev = qp->dev;
1947 uint16_t qp_slot = accel_mlx5_dev_get_available_slots(dev, qp);
1969 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
2061 struct accel_mlx5_dev *dev = task->qp->dev;
2105 struct spdk_io_channel *ch = task->qp->dev->ch;
2119 struct accel_mlx5_qp *qp = task->qp;
2120 struct accel_mlx5_dev *dev = qp->dev;
2122 if (spdk_unlikely(qp->recovering)) {
2172 if (dev->qp.ring_db) {
2173 spdk_mlx5_qp_complete_send(dev->qp.qp);
2174 dev->qp.ring_db = false;
2185 struct accel_mlx5_dev *dev = mlx5_task->qp->dev;
2204 if (spdk_unlikely(mlx5_task->qp->recovering)) {
2230 mlx5_task->qp = &dev->qp;
2256 static void accel_mlx5_recover_qp(struct accel_mlx5_qp *qp);
2261 struct accel_mlx5_qp *qp = arg;
2263 spdk_poller_unregister(&qp->recover_poller);
2264 accel_mlx5_recover_qp(qp);
2269 accel_mlx5_recover_qp(struct accel_mlx5_qp *qp)
2271 struct accel_mlx5_dev *dev = qp->dev;
2275 SPDK_NOTICELOG("Recovering qp %p, core %u\n", qp, spdk_env_get_current_core());
2276 if (qp->qp) {
2277 spdk_mlx5_qp_destroy(qp->qp);
2278 qp->qp = NULL;
2286 rc = spdk_mlx5_qp_create(dev->dev_ctx->pd, dev->cq, &mlx5_qp_attr, &qp->qp);
2290 qp->recover_poller = SPDK_POLLER_REGISTER(accel_mlx5_recover_qp_poller, qp,
2295 qp->recovering = false;
2301 struct accel_mlx5_qp *qp = task->qp;
2304 SPDK_WARNLOG("RDMA: qp %p, task %p, WC status %d, core %u\n",
2305 qp, task, wc->status, spdk_env_get_current_core());
2308 "RDMA: qp %p, task %p, WC status %d, core %u\n",
2309 qp, task, wc->status, spdk_env_get_current_core());
2312 qp->recovering = true;
2315 STAILQ_REMOVE_HEAD(&qp->in_hw, link);
2325 struct accel_mlx5_qp *qp;
2349 qp = task->qp;
2350 assert(task == STAILQ_FIRST(&qp->in_hw) && "submission mismatch");
2355 assert(qp->wrs_submitted >= task->num_wrs);
2356 qp->wrs_submitted -= task->num_wrs;
2362 if (qp->wrs_submitted == 0) {
2363 assert(STAILQ_EMPTY(&qp->in_hw));
2364 accel_mlx5_recover_qp(qp);
2372 STAILQ_REMOVE_HEAD(&qp->in_hw, link);
2377 STAILQ_REMOVE_HEAD(&qp->in_hw, link);
2433 if (dev->qp.ring_db) {
2434 spdk_mlx5_qp_complete_send(dev->qp.qp);
2435 dev->qp.ring_db = false;
2474 accel_mlx5_create_qp(struct accel_mlx5_dev *dev, struct accel_mlx5_qp *qp)
2484 rc = spdk_mlx5_qp_create(dev->dev_ctx->pd, dev->cq, &mlx5_qp_attr, &qp->qp);
2489 STAILQ_INIT(&qp->in_hw);
2490 qp->dev = dev;
2491 qp->verbs_qp = spdk_mlx5_qp_get_verbs_qp(qp->qp);
2492 assert(qp->verbs_qp);
2493 qp->wrs_max = g_accel_mlx5.attr.qp_size;
2528 spdk_mlx5_qp_destroy(dev->qp.qp);
2532 spdk_poller_unregister(&dev->qp.recover_poller);
2614 rc = accel_mlx5_create_qp(dev, &dev->qp);
3487 task->qp = &dev->qp;