1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <stddef.h> 6 #include <errno.h> 7 #include <stdbool.h> 8 #include <string.h> 9 #include <stdint.h> 10 #include <sys/queue.h> 11 12 #include <rte_malloc.h> 13 #include <rte_common.h> 14 #include <rte_eal_paging.h> 15 16 #include <mlx5_glue.h> 17 #include <mlx5_devx_cmds.h> 18 #include <mlx5_malloc.h> 19 20 #include "mlx5.h" 21 #include "mlx5_common_os.h" 22 #include "mlx5_rxtx.h" 23 #include "mlx5_utils.h" 24 #include "mlx5_devx.h" 25 #include "mlx5_flow.h" 26 27 28 /** 29 * Modify RQ vlan stripping offload 30 * 31 * @param rxq_obj 32 * Rx queue object. 33 * 34 * @return 35 * 0 on success, non-0 otherwise 36 */ 37 static int 38 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 39 { 40 struct mlx5_devx_modify_rq_attr rq_attr; 41 42 memset(&rq_attr, 0, sizeof(rq_attr)); 43 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 44 rq_attr.state = MLX5_RQC_STATE_RDY; 45 rq_attr.vsd = (on ? 0 : 1); 46 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 47 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 48 } 49 50 /** 51 * Modify RQ using DevX API. 52 * 53 * @param rxq_obj 54 * DevX Rx queue object. 55 * @param type 56 * Type of change queue state. 57 * 58 * @return 59 * 0 on success, a negative errno value otherwise and rte_errno is set. 60 */ 61 static int 62 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type) 63 { 64 struct mlx5_devx_modify_rq_attr rq_attr; 65 66 memset(&rq_attr, 0, sizeof(rq_attr)); 67 switch (type) { 68 case MLX5_RXQ_MOD_ERR2RST: 69 rq_attr.rq_state = MLX5_RQC_STATE_ERR; 70 rq_attr.state = MLX5_RQC_STATE_RST; 71 break; 72 case MLX5_RXQ_MOD_RST2RDY: 73 rq_attr.rq_state = MLX5_RQC_STATE_RST; 74 rq_attr.state = MLX5_RQC_STATE_RDY; 75 break; 76 case MLX5_RXQ_MOD_RDY2ERR: 77 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 78 rq_attr.state = MLX5_RQC_STATE_ERR; 79 break; 80 case MLX5_RXQ_MOD_RDY2RST: 81 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 82 rq_attr.state = MLX5_RQC_STATE_RST; 83 break; 84 default: 85 break; 86 } 87 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 88 } 89 90 /** 91 * Modify SQ using DevX API. 92 * 93 * @param txq_obj 94 * DevX Tx queue object. 95 * @param type 96 * Type of change queue state. 97 * @param dev_port 98 * Unnecessary. 99 * 100 * @return 101 * 0 on success, a negative errno value otherwise and rte_errno is set. 102 */ 103 static int 104 mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type, 105 uint8_t dev_port) 106 { 107 struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 108 int ret; 109 110 if (type != MLX5_TXQ_MOD_RST2RDY) { 111 /* Change queue state to reset. */ 112 if (type == MLX5_TXQ_MOD_ERR2RDY) 113 msq_attr.sq_state = MLX5_SQC_STATE_ERR; 114 else 115 msq_attr.sq_state = MLX5_SQC_STATE_RDY; 116 msq_attr.state = MLX5_SQC_STATE_RST; 117 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr); 118 if (ret) { 119 DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET" 120 " %s", strerror(errno)); 121 rte_errno = errno; 122 return ret; 123 } 124 } 125 if (type != MLX5_TXQ_MOD_RDY2RST) { 126 /* Change queue state to ready. */ 127 msq_attr.sq_state = MLX5_SQC_STATE_RST; 128 msq_attr.state = MLX5_SQC_STATE_RDY; 129 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr); 130 if (ret) { 131 DRV_LOG(ERR, "Cannot change the Tx SQ state to READY" 132 " %s", strerror(errno)); 133 rte_errno = errno; 134 return ret; 135 } 136 } 137 /* 138 * The dev_port variable is relevant only in Verbs API, and there is a 139 * pointer that points to this function and a parallel function in verbs 140 * intermittently, so they should have the same parameters. 141 */ 142 (void)dev_port; 143 return 0; 144 } 145 146 /** 147 * Release the resources allocated for an RQ DevX object. 148 * 149 * @param rxq_ctrl 150 * DevX Rx queue object. 151 */ 152 static void 153 mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 154 { 155 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page; 156 157 if (rxq_ctrl->rxq.wqes) { 158 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes); 159 rxq_ctrl->rxq.wqes = NULL; 160 } 161 if (rxq_ctrl->wq_umem) { 162 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem); 163 rxq_ctrl->wq_umem = NULL; 164 } 165 if (dbr_page) { 166 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 167 mlx5_os_get_umem_id(dbr_page->umem), 168 rxq_ctrl->rq_dbr_offset)); 169 rxq_ctrl->rq_dbrec_page = NULL; 170 } 171 } 172 173 /** 174 * Release the resources allocated for the Rx CQ DevX object. 175 * 176 * @param rxq_ctrl 177 * DevX Rx queue object. 178 */ 179 static void 180 mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 181 { 182 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page; 183 184 if (rxq_ctrl->rxq.cqes) { 185 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes); 186 rxq_ctrl->rxq.cqes = NULL; 187 } 188 if (rxq_ctrl->cq_umem) { 189 mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem); 190 rxq_ctrl->cq_umem = NULL; 191 } 192 if (dbr_page) { 193 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 194 mlx5_os_get_umem_id(dbr_page->umem), 195 rxq_ctrl->cq_dbr_offset)); 196 rxq_ctrl->cq_dbrec_page = NULL; 197 } 198 } 199 200 /** 201 * Release an Rx DevX queue object. 202 * 203 * @param rxq_obj 204 * DevX Rx queue object. 205 */ 206 static void 207 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) 208 { 209 MLX5_ASSERT(rxq_obj); 210 MLX5_ASSERT(rxq_obj->rq); 211 if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { 212 mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST); 213 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 214 } else { 215 MLX5_ASSERT(rxq_obj->devx_cq); 216 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 217 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq)); 218 if (rxq_obj->devx_channel) 219 mlx5_glue->devx_destroy_event_channel 220 (rxq_obj->devx_channel); 221 mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl); 222 mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl); 223 } 224 } 225 226 /** 227 * Get event for an Rx DevX queue object. 228 * 229 * @param rxq_obj 230 * DevX Rx queue object. 231 * 232 * @return 233 * 0 on success, a negative errno value otherwise and rte_errno is set. 234 */ 235 static int 236 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 237 { 238 #ifdef HAVE_IBV_DEVX_EVENT 239 union { 240 struct mlx5dv_devx_async_event_hdr event_resp; 241 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 242 } out; 243 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 244 &out.event_resp, 245 sizeof(out.buf)); 246 247 if (ret < 0) { 248 rte_errno = errno; 249 return -rte_errno; 250 } 251 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) { 252 rte_errno = EINVAL; 253 return -rte_errno; 254 } 255 return 0; 256 #else 257 (void)rxq_obj; 258 rte_errno = ENOTSUP; 259 return -rte_errno; 260 #endif /* HAVE_IBV_DEVX_EVENT */ 261 } 262 263 /** 264 * Fill common fields of create RQ attributes structure. 265 * 266 * @param rxq_data 267 * Pointer to Rx queue data. 268 * @param cqn 269 * CQ number to use with this RQ. 270 * @param rq_attr 271 * RQ attributes structure to fill.. 272 */ 273 static void 274 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn, 275 struct mlx5_devx_create_rq_attr *rq_attr) 276 { 277 rq_attr->state = MLX5_RQC_STATE_RST; 278 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1; 279 rq_attr->cqn = cqn; 280 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 281 } 282 283 /** 284 * Fill common fields of DevX WQ attributes structure. 285 * 286 * @param priv 287 * Pointer to device private data. 288 * @param rxq_ctrl 289 * Pointer to Rx queue control structure. 290 * @param wq_attr 291 * WQ attributes structure to fill.. 292 */ 293 static void 294 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl, 295 struct mlx5_devx_wq_attr *wq_attr) 296 { 297 wq_attr->end_padding_mode = priv->config.cqe_pad ? 298 MLX5_WQ_END_PAD_MODE_ALIGN : 299 MLX5_WQ_END_PAD_MODE_NONE; 300 wq_attr->pd = priv->sh->pdn; 301 wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset; 302 wq_attr->dbr_umem_id = 303 mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem); 304 wq_attr->dbr_umem_valid = 1; 305 wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem); 306 wq_attr->wq_umem_valid = 1; 307 } 308 309 /** 310 * Create a RQ object using DevX. 311 * 312 * @param dev 313 * Pointer to Ethernet device. 314 * @param idx 315 * Queue index in DPDK Rx queue array. 316 * 317 * @return 318 * The DevX RQ object initialized, NULL otherwise and rte_errno is set. 319 */ 320 static struct mlx5_devx_obj * 321 mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) 322 { 323 struct mlx5_priv *priv = dev->data->dev_private; 324 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 325 struct mlx5_rxq_ctrl *rxq_ctrl = 326 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 327 struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 328 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n); 329 uint32_t cqn = rxq_ctrl->obj->devx_cq->id; 330 struct mlx5_devx_dbr_page *dbr_page; 331 int64_t dbr_offset; 332 uint32_t wq_size = 0; 333 uint32_t wqe_size = 0; 334 uint32_t log_wqe_size = 0; 335 void *buf = NULL; 336 struct mlx5_devx_obj *rq; 337 338 /* Fill RQ attributes. */ 339 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 340 rq_attr.flush_in_error_en = 1; 341 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr); 342 /* Fill WQ attributes for this RQ. */ 343 if (mlx5_rxq_mprq_enabled(rxq_data)) { 344 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 345 /* 346 * Number of strides in each WQE: 347 * 512*2^single_wqe_log_num_of_strides. 348 */ 349 rq_attr.wq_attr.single_wqe_log_num_of_strides = 350 rxq_data->strd_num_n - 351 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 352 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 353 rq_attr.wq_attr.single_stride_log_num_of_bytes = 354 rxq_data->strd_sz_n - 355 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 356 wqe_size = sizeof(struct mlx5_wqe_mprq); 357 } else { 358 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 359 wqe_size = sizeof(struct mlx5_wqe_data_seg); 360 } 361 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 362 rq_attr.wq_attr.log_wq_stride = log_wqe_size; 363 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n; 364 /* Calculate and allocate WQ memory space. */ 365 wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 366 wq_size = wqe_n * wqe_size; 367 size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 368 if (alignment == (size_t)-1) { 369 DRV_LOG(ERR, "Failed to get mem page size"); 370 rte_errno = ENOMEM; 371 return NULL; 372 } 373 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size, 374 alignment, rxq_ctrl->socket); 375 if (!buf) 376 return NULL; 377 rxq_data->wqes = buf; 378 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, 379 buf, wq_size, 0); 380 if (!rxq_ctrl->wq_umem) 381 goto error; 382 /* Allocate RQ door-bell. */ 383 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 384 if (dbr_offset < 0) { 385 DRV_LOG(ERR, "Failed to allocate RQ door-bell."); 386 goto error; 387 } 388 rxq_ctrl->rq_dbr_offset = dbr_offset; 389 rxq_ctrl->rq_dbrec_page = dbr_page; 390 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 391 (uintptr_t)rxq_ctrl->rq_dbr_offset); 392 /* Create RQ using DevX API. */ 393 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr); 394 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket); 395 if (!rq) 396 goto error; 397 return rq; 398 error: 399 mlx5_rxq_release_devx_rq_resources(rxq_ctrl); 400 return NULL; 401 } 402 403 /** 404 * Create a DevX CQ object for an Rx queue. 405 * 406 * @param dev 407 * Pointer to Ethernet device. 408 * @param idx 409 * Queue index in DPDK Rx queue array. 410 * 411 * @return 412 * The DevX CQ object initialized, NULL otherwise and rte_errno is set. 413 */ 414 static struct mlx5_devx_obj * 415 mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 416 { 417 struct mlx5_devx_obj *cq_obj = 0; 418 struct mlx5_devx_cq_attr cq_attr = { 0 }; 419 struct mlx5_priv *priv = dev->data->dev_private; 420 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 421 struct mlx5_rxq_ctrl *rxq_ctrl = 422 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 423 size_t page_size = rte_mem_page_size(); 424 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 425 struct mlx5_devx_dbr_page *dbr_page; 426 int64_t dbr_offset; 427 void *buf = NULL; 428 uint16_t event_nums[1] = {0}; 429 uint32_t log_cqe_n; 430 uint32_t cq_size; 431 int ret = 0; 432 433 if (page_size == (size_t)-1) { 434 DRV_LOG(ERR, "Failed to get page_size."); 435 goto error; 436 } 437 if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 438 !rxq_data->lro) { 439 cq_attr.cqe_comp_en = 1u; 440 /* 441 * Select CSUM miniCQE format only for non-vectorized MPRQ 442 * Rx burst, use HASH miniCQE format for everything else. 443 */ 444 if (mlx5_rxq_check_vec_support(rxq_data) < 0 && 445 mlx5_rxq_mprq_enabled(rxq_data)) 446 cq_attr.mini_cqe_res_format = 447 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX; 448 else 449 cq_attr.mini_cqe_res_format = 450 MLX5_CQE_RESP_FORMAT_HASH; 451 /* 452 * For vectorized Rx, it must not be doubled in order to 453 * make cq_ci and rq_ci aligned. 454 */ 455 if (mlx5_rxq_check_vec_support(rxq_data) < 0) 456 cqe_n *= 2; 457 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 458 DRV_LOG(DEBUG, 459 "Port %u Rx CQE compression is disabled for HW" 460 " timestamp.", 461 dev->data->port_id); 462 } else if (priv->config.cqe_comp && rxq_data->lro) { 463 DRV_LOG(DEBUG, 464 "Port %u Rx CQE compression is disabled for LRO.", 465 dev->data->port_id); 466 } 467 if (priv->config.cqe_pad) 468 cq_attr.cqe_size = MLX5_CQE_SIZE_128B; 469 log_cqe_n = log2above(cqe_n); 470 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n); 471 buf = rte_calloc_socket(__func__, 1, cq_size, page_size, 472 rxq_ctrl->socket); 473 if (!buf) { 474 DRV_LOG(ERR, "Failed to allocate memory for CQ."); 475 goto error; 476 } 477 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf; 478 rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf, 479 cq_size, 480 IBV_ACCESS_LOCAL_WRITE); 481 if (!rxq_ctrl->cq_umem) { 482 DRV_LOG(ERR, "Failed to register umem for CQ."); 483 goto error; 484 } 485 /* Allocate CQ door-bell. */ 486 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 487 if (dbr_offset < 0) { 488 DRV_LOG(ERR, "Failed to allocate CQ door-bell."); 489 goto error; 490 } 491 rxq_ctrl->cq_dbr_offset = dbr_offset; 492 rxq_ctrl->cq_dbrec_page = dbr_page; 493 rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 494 (uintptr_t)rxq_ctrl->cq_dbr_offset); 495 rxq_data->cq_uar = 496 mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar); 497 /* Create CQ using DevX API. */ 498 cq_attr.eqn = priv->sh->eqn; 499 cq_attr.uar_page_id = 500 mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar); 501 cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem); 502 cq_attr.q_umem_valid = 1; 503 cq_attr.log_cq_size = log_cqe_n; 504 cq_attr.log_page_size = rte_log2_u32(page_size); 505 cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset; 506 cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem); 507 cq_attr.db_umem_valid = 1; 508 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr); 509 if (!cq_obj) 510 goto error; 511 rxq_data->cqe_n = log_cqe_n; 512 rxq_data->cqn = cq_obj->id; 513 if (rxq_ctrl->obj->devx_channel) { 514 ret = mlx5_glue->devx_subscribe_devx_event 515 (rxq_ctrl->obj->devx_channel, 516 cq_obj->obj, 517 sizeof(event_nums), 518 event_nums, 519 (uint64_t)(uintptr_t)cq_obj); 520 if (ret) { 521 DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 522 rte_errno = errno; 523 goto error; 524 } 525 } 526 /* Initialise CQ to 1's to mark HW ownership for all CQEs. */ 527 memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size); 528 return cq_obj; 529 error: 530 if (cq_obj) 531 mlx5_devx_cmd_destroy(cq_obj); 532 mlx5_rxq_release_devx_cq_resources(rxq_ctrl); 533 return NULL; 534 } 535 536 /** 537 * Create the Rx hairpin queue object. 538 * 539 * @param dev 540 * Pointer to Ethernet device. 541 * @param idx 542 * Queue index in DPDK Rx queue array. 543 * 544 * @return 545 * 0 on success, a negative errno value otherwise and rte_errno is set. 546 */ 547 static int 548 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 549 { 550 struct mlx5_priv *priv = dev->data->dev_private; 551 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 552 struct mlx5_rxq_ctrl *rxq_ctrl = 553 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 554 struct mlx5_devx_create_rq_attr attr = { 0 }; 555 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 556 uint32_t max_wq_data; 557 558 MLX5_ASSERT(rxq_data); 559 MLX5_ASSERT(tmpl); 560 tmpl->rxq_ctrl = rxq_ctrl; 561 attr.hairpin = 1; 562 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 563 /* Jumbo frames > 9KB should be supported, and more packets. */ 564 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 565 if (priv->config.log_hp_size > max_wq_data) { 566 DRV_LOG(ERR, "Total data size %u power of 2 is " 567 "too large for hairpin.", 568 priv->config.log_hp_size); 569 rte_errno = ERANGE; 570 return -rte_errno; 571 } 572 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 573 } else { 574 attr.wq_attr.log_hairpin_data_sz = 575 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 576 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 577 } 578 /* Set the packets number to the maximum value for performance. */ 579 attr.wq_attr.log_hairpin_num_packets = 580 attr.wq_attr.log_hairpin_data_sz - 581 MLX5_HAIRPIN_QUEUE_STRIDE; 582 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, 583 rxq_ctrl->socket); 584 if (!tmpl->rq) { 585 DRV_LOG(ERR, 586 "Port %u Rx hairpin queue %u can't create rq object.", 587 dev->data->port_id, idx); 588 rte_errno = errno; 589 return -rte_errno; 590 } 591 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 592 return 0; 593 } 594 595 /** 596 * Create the Rx queue DevX object. 597 * 598 * @param dev 599 * Pointer to Ethernet device. 600 * @param idx 601 * Queue index in DPDK Rx queue array. 602 * 603 * @return 604 * 0 on success, a negative errno value otherwise and rte_errno is set. 605 */ 606 static int 607 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 608 { 609 struct mlx5_priv *priv = dev->data->dev_private; 610 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 611 struct mlx5_rxq_ctrl *rxq_ctrl = 612 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 613 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 614 int ret = 0; 615 616 MLX5_ASSERT(rxq_data); 617 MLX5_ASSERT(tmpl); 618 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 619 return mlx5_rxq_obj_hairpin_new(dev, idx); 620 tmpl->rxq_ctrl = rxq_ctrl; 621 if (rxq_ctrl->irq) { 622 int devx_ev_flag = 623 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 624 625 tmpl->devx_channel = mlx5_glue->devx_create_event_channel 626 (priv->sh->ctx, 627 devx_ev_flag); 628 if (!tmpl->devx_channel) { 629 rte_errno = errno; 630 DRV_LOG(ERR, "Failed to create event channel %d.", 631 rte_errno); 632 goto error; 633 } 634 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 635 } 636 /* Create CQ using DevX API. */ 637 tmpl->devx_cq = mlx5_rxq_create_devx_cq_resources(dev, idx); 638 if (!tmpl->devx_cq) { 639 DRV_LOG(ERR, "Failed to create CQ."); 640 goto error; 641 } 642 /* Create RQ using DevX API. */ 643 tmpl->rq = mlx5_rxq_create_devx_rq_resources(dev, idx); 644 if (!tmpl->rq) { 645 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 646 dev->data->port_id, idx); 647 rte_errno = ENOMEM; 648 goto error; 649 } 650 /* Change queue state to ready. */ 651 ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY); 652 if (ret) 653 goto error; 654 rxq_data->cq_arm_sn = 0; 655 mlx5_rxq_initialize(rxq_data); 656 rxq_data->cq_ci = 0; 657 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 658 rxq_ctrl->wqn = tmpl->rq->id; 659 return 0; 660 error: 661 ret = rte_errno; /* Save rte_errno before cleanup. */ 662 if (tmpl->rq) 663 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq)); 664 if (tmpl->devx_cq) 665 claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq)); 666 if (tmpl->devx_channel) 667 mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel); 668 mlx5_rxq_release_devx_rq_resources(rxq_ctrl); 669 mlx5_rxq_release_devx_cq_resources(rxq_ctrl); 670 rte_errno = ret; /* Restore rte_errno. */ 671 return -rte_errno; 672 } 673 674 /** 675 * Create RQT using DevX API as a filed of indirection table. 676 * 677 * @param dev 678 * Pointer to Ethernet device. 679 * @param log_n 680 * Log of number of queues in the array. 681 * @param ind_tbl 682 * DevX indirection table object. 683 * 684 * @return 685 * 0 on success, a negative errno value otherwise and rte_errno is set. 686 */ 687 static int 688 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 689 struct mlx5_ind_table_obj *ind_tbl) 690 { 691 struct mlx5_priv *priv = dev->data->dev_private; 692 struct mlx5_devx_rqt_attr *rqt_attr = NULL; 693 const unsigned int rqt_n = 1 << log_n; 694 unsigned int i, j; 695 696 MLX5_ASSERT(ind_tbl); 697 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 698 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 699 if (!rqt_attr) { 700 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 701 dev->data->port_id); 702 rte_errno = ENOMEM; 703 return -rte_errno; 704 } 705 rqt_attr->rqt_max_size = priv->config.ind_table_max_size; 706 rqt_attr->rqt_actual_size = rqt_n; 707 for (i = 0; i != ind_tbl->queues_n; ++i) { 708 struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; 709 struct mlx5_rxq_ctrl *rxq_ctrl = 710 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 711 712 rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id; 713 } 714 MLX5_ASSERT(i > 0); 715 for (j = 0; i != rqt_n; ++j, ++i) 716 rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 717 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr); 718 mlx5_free(rqt_attr); 719 if (!ind_tbl->rqt) { 720 DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 721 dev->data->port_id); 722 rte_errno = errno; 723 return -rte_errno; 724 } 725 return 0; 726 } 727 728 /** 729 * Destroy the DevX RQT object. 730 * 731 * @param ind_table 732 * Indirection table to release. 733 */ 734 static void 735 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 736 { 737 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 738 } 739 740 /** 741 * Create an Rx Hash queue. 742 * 743 * @param dev 744 * Pointer to Ethernet device. 745 * @param hrxq 746 * Pointer to Rx Hash queue. 747 * @param tunnel 748 * Tunnel type. 749 * 750 * @return 751 * 0 on success, a negative errno value otherwise and rte_errno is set. 752 */ 753 static int 754 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 755 int tunnel __rte_unused) 756 { 757 struct mlx5_priv *priv = dev->data->dev_private; 758 struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table; 759 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]]; 760 struct mlx5_rxq_ctrl *rxq_ctrl = 761 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 762 struct mlx5_devx_tir_attr tir_attr; 763 const uint8_t *rss_key = hrxq->rss_key; 764 uint64_t hash_fields = hrxq->hash_fields; 765 bool lro = true; 766 uint32_t i; 767 int err; 768 769 /* Enable TIR LRO only if all the queues were configured for. */ 770 for (i = 0; i < ind_tbl->queues_n; ++i) { 771 if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { 772 lro = false; 773 break; 774 } 775 } 776 memset(&tir_attr, 0, sizeof(tir_attr)); 777 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 778 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 779 tir_attr.tunneled_offload_en = !!tunnel; 780 /* If needed, translate hash_fields bitmap to PRM format. */ 781 if (hash_fields) { 782 struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL; 783 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 784 rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ? 785 &tir_attr.rx_hash_field_selector_inner : 786 &tir_attr.rx_hash_field_selector_outer; 787 #else 788 rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer; 789 #endif 790 /* 1 bit: 0: IPv4, 1: IPv6. */ 791 rx_hash_field_select->l3_prot_type = 792 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 793 /* 1 bit: 0: TCP, 1: UDP. */ 794 rx_hash_field_select->l4_prot_type = 795 !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 796 /* Bitmask which sets which fields to use in RX Hash. */ 797 rx_hash_field_select->selected_fields = 798 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 799 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 800 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 801 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 802 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 803 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 804 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 805 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; 806 } 807 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 808 tir_attr.transport_domain = priv->sh->td->id; 809 else 810 tir_attr.transport_domain = priv->sh->tdn; 811 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 812 tir_attr.indirect_table = ind_tbl->rqt->id; 813 if (dev->data->dev_conf.lpbk_mode) 814 tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 815 if (lro) { 816 tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout; 817 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; 818 tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 819 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 820 } 821 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); 822 if (!hrxq->tir) { 823 DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 824 dev->data->port_id); 825 rte_errno = errno; 826 goto error; 827 } 828 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 829 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir 830 (hrxq->tir->obj); 831 if (!hrxq->action) { 832 rte_errno = errno; 833 goto error; 834 } 835 #endif 836 return 0; 837 error: 838 err = rte_errno; /* Save rte_errno before cleanup. */ 839 if (hrxq->tir) 840 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 841 rte_errno = err; /* Restore rte_errno. */ 842 return -rte_errno; 843 } 844 845 /** 846 * Destroy a DevX TIR object. 847 * 848 * @param hrxq 849 * Hash Rx queue to release its tir. 850 */ 851 static void 852 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 853 { 854 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 855 } 856 857 /** 858 * Create a DevX drop action for Rx Hash queue. 859 * 860 * @param dev 861 * Pointer to Ethernet device. 862 * 863 * @return 864 * 0 on success, a negative errno value otherwise and rte_errno is set. 865 */ 866 static int 867 mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 868 { 869 (void)dev; 870 DRV_LOG(ERR, "DevX drop action is not supported yet."); 871 rte_errno = ENOTSUP; 872 return -rte_errno; 873 } 874 875 /** 876 * Release a drop hash Rx queue. 877 * 878 * @param dev 879 * Pointer to Ethernet device. 880 */ 881 static void 882 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 883 { 884 (void)dev; 885 DRV_LOG(ERR, "DevX drop action is not supported yet."); 886 rte_errno = ENOTSUP; 887 } 888 889 /** 890 * Create the Tx hairpin queue object. 891 * 892 * @param dev 893 * Pointer to Ethernet device. 894 * @param idx 895 * Queue index in DPDK Tx queue array. 896 * 897 * @return 898 * 0 on success, a negative errno value otherwise and rte_errno is set. 899 */ 900 static int 901 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 902 { 903 struct mlx5_priv *priv = dev->data->dev_private; 904 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 905 struct mlx5_txq_ctrl *txq_ctrl = 906 container_of(txq_data, struct mlx5_txq_ctrl, txq); 907 struct mlx5_devx_create_sq_attr attr = { 0 }; 908 struct mlx5_txq_obj *tmpl = txq_ctrl->obj; 909 uint32_t max_wq_data; 910 911 MLX5_ASSERT(txq_data); 912 MLX5_ASSERT(tmpl); 913 tmpl->txq_ctrl = txq_ctrl; 914 attr.hairpin = 1; 915 attr.tis_lst_sz = 1; 916 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 917 /* Jumbo frames > 9KB should be supported, and more packets. */ 918 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 919 if (priv->config.log_hp_size > max_wq_data) { 920 DRV_LOG(ERR, "Total data size %u power of 2 is " 921 "too large for hairpin.", 922 priv->config.log_hp_size); 923 rte_errno = ERANGE; 924 return -rte_errno; 925 } 926 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 927 } else { 928 attr.wq_attr.log_hairpin_data_sz = 929 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 930 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 931 } 932 /* Set the packets number to the maximum value for performance. */ 933 attr.wq_attr.log_hairpin_num_packets = 934 attr.wq_attr.log_hairpin_data_sz - 935 MLX5_HAIRPIN_QUEUE_STRIDE; 936 attr.tis_num = priv->sh->tis->id; 937 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr); 938 if (!tmpl->sq) { 939 DRV_LOG(ERR, 940 "Port %u tx hairpin queue %u can't create SQ object.", 941 dev->data->port_id, idx); 942 rte_errno = errno; 943 return -rte_errno; 944 } 945 return 0; 946 } 947 948 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 949 /** 950 * Release DevX SQ resources. 951 * 952 * @param txq_obj 953 * DevX Tx queue object. 954 */ 955 static void 956 mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj) 957 { 958 if (txq_obj->sq_devx) 959 claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx)); 960 if (txq_obj->sq_umem) 961 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem)); 962 if (txq_obj->sq_buf) 963 mlx5_free(txq_obj->sq_buf); 964 if (txq_obj->sq_dbrec_page) 965 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs, 966 mlx5_os_get_umem_id 967 (txq_obj->sq_dbrec_page->umem), 968 txq_obj->sq_dbrec_offset)); 969 } 970 971 /** 972 * Release DevX Tx CQ resources. 973 * 974 * @param txq_obj 975 * DevX Tx queue object. 976 */ 977 static void 978 mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj) 979 { 980 if (txq_obj->cq_devx) 981 claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx)); 982 if (txq_obj->cq_umem) 983 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem)); 984 if (txq_obj->cq_buf) 985 mlx5_free(txq_obj->cq_buf); 986 if (txq_obj->cq_dbrec_page) 987 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs, 988 mlx5_os_get_umem_id 989 (txq_obj->cq_dbrec_page->umem), 990 txq_obj->cq_dbrec_offset)); 991 } 992 993 /** 994 * Destroy the Tx queue DevX object. 995 * 996 * @param txq_obj 997 * Txq object to destroy. 998 */ 999 static void 1000 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) 1001 { 1002 mlx5_txq_release_devx_cq_resources(txq_obj); 1003 mlx5_txq_release_devx_sq_resources(txq_obj); 1004 } 1005 1006 /** 1007 * Create a DevX CQ object and its resources for an Tx queue. 1008 * 1009 * @param dev 1010 * Pointer to Ethernet device. 1011 * @param idx 1012 * Queue index in DPDK Tx queue array. 1013 * 1014 * @return 1015 * Number of CQEs in CQ, 0 otherwise and rte_errno is set. 1016 */ 1017 static uint32_t 1018 mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 1019 { 1020 struct mlx5_priv *priv = dev->data->dev_private; 1021 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 1022 struct mlx5_txq_ctrl *txq_ctrl = 1023 container_of(txq_data, struct mlx5_txq_ctrl, txq); 1024 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 1025 struct mlx5_devx_cq_attr cq_attr = { 0 }; 1026 struct mlx5_cqe *cqe; 1027 size_t page_size; 1028 size_t alignment; 1029 uint32_t cqe_n; 1030 uint32_t i; 1031 int ret; 1032 1033 MLX5_ASSERT(txq_data); 1034 MLX5_ASSERT(txq_obj); 1035 page_size = rte_mem_page_size(); 1036 if (page_size == (size_t)-1) { 1037 DRV_LOG(ERR, "Failed to get mem page size."); 1038 rte_errno = ENOMEM; 1039 return 0; 1040 } 1041 /* Allocate memory buffer for CQEs. */ 1042 alignment = MLX5_CQE_BUF_ALIGNMENT; 1043 if (alignment == (size_t)-1) { 1044 DRV_LOG(ERR, "Failed to get CQE buf alignment."); 1045 rte_errno = ENOMEM; 1046 return 0; 1047 } 1048 /* Create the Completion Queue. */ 1049 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + 1050 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 1051 cqe_n = 1UL << log2above(cqe_n); 1052 if (cqe_n > UINT16_MAX) { 1053 DRV_LOG(ERR, 1054 "Port %u Tx queue %u requests to many CQEs %u.", 1055 dev->data->port_id, txq_data->idx, cqe_n); 1056 rte_errno = EINVAL; 1057 return 0; 1058 } 1059 txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 1060 cqe_n * sizeof(struct mlx5_cqe), 1061 alignment, 1062 priv->sh->numa_node); 1063 if (!txq_obj->cq_buf) { 1064 DRV_LOG(ERR, 1065 "Port %u Tx queue %u cannot allocate memory (CQ).", 1066 dev->data->port_id, txq_data->idx); 1067 rte_errno = ENOMEM; 1068 return 0; 1069 } 1070 /* Register allocated buffer in user space with DevX. */ 1071 txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, 1072 (void *)txq_obj->cq_buf, 1073 cqe_n * sizeof(struct mlx5_cqe), 1074 IBV_ACCESS_LOCAL_WRITE); 1075 if (!txq_obj->cq_umem) { 1076 rte_errno = errno; 1077 DRV_LOG(ERR, 1078 "Port %u Tx queue %u cannot register memory (CQ).", 1079 dev->data->port_id, txq_data->idx); 1080 goto error; 1081 } 1082 /* Allocate doorbell record for completion queue. */ 1083 txq_obj->cq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx, 1084 &priv->dbrpgs, 1085 &txq_obj->cq_dbrec_page); 1086 if (txq_obj->cq_dbrec_offset < 0) { 1087 rte_errno = errno; 1088 DRV_LOG(ERR, "Failed to allocate CQ door-bell."); 1089 goto error; 1090 } 1091 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? 1092 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; 1093 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar); 1094 cq_attr.eqn = priv->sh->eqn; 1095 cq_attr.q_umem_valid = 1; 1096 cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size; 1097 cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem); 1098 cq_attr.db_umem_valid = 1; 1099 cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset; 1100 cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem); 1101 cq_attr.log_cq_size = rte_log2_u32(cqe_n); 1102 cq_attr.log_page_size = rte_log2_u32(page_size); 1103 /* Create completion queue object with DevX. */ 1104 txq_obj->cq_devx = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr); 1105 if (!txq_obj->cq_devx) { 1106 rte_errno = errno; 1107 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 1108 dev->data->port_id, idx); 1109 goto error; 1110 } 1111 /* Initial fill CQ buffer with invalid CQE opcode. */ 1112 cqe = (struct mlx5_cqe *)txq_obj->cq_buf; 1113 for (i = 0; i < cqe_n; i++) { 1114 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; 1115 ++cqe; 1116 } 1117 return cqe_n; 1118 error: 1119 ret = rte_errno; 1120 mlx5_txq_release_devx_cq_resources(txq_obj); 1121 rte_errno = ret; 1122 return 0; 1123 } 1124 1125 /** 1126 * Create a SQ object and its resources using DevX. 1127 * 1128 * @param dev 1129 * Pointer to Ethernet device. 1130 * @param idx 1131 * Queue index in DPDK Tx queue array. 1132 * 1133 * @return 1134 * Number of WQEs in SQ, 0 otherwise and rte_errno is set. 1135 */ 1136 static uint32_t 1137 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx) 1138 { 1139 struct mlx5_priv *priv = dev->data->dev_private; 1140 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 1141 struct mlx5_txq_ctrl *txq_ctrl = 1142 container_of(txq_data, struct mlx5_txq_ctrl, txq); 1143 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 1144 struct mlx5_devx_create_sq_attr sq_attr = { 0 }; 1145 size_t page_size; 1146 uint32_t wqe_n; 1147 int ret; 1148 1149 MLX5_ASSERT(txq_data); 1150 MLX5_ASSERT(txq_obj); 1151 page_size = rte_mem_page_size(); 1152 if (page_size == (size_t)-1) { 1153 DRV_LOG(ERR, "Failed to get mem page size."); 1154 rte_errno = ENOMEM; 1155 return 0; 1156 } 1157 wqe_n = RTE_MIN(1UL << txq_data->elts_n, 1158 (uint32_t)priv->sh->device_attr.max_qp_wr); 1159 txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 1160 wqe_n * sizeof(struct mlx5_wqe), 1161 page_size, priv->sh->numa_node); 1162 if (!txq_obj->sq_buf) { 1163 DRV_LOG(ERR, 1164 "Port %u Tx queue %u cannot allocate memory (SQ).", 1165 dev->data->port_id, txq_data->idx); 1166 rte_errno = ENOMEM; 1167 goto error; 1168 } 1169 /* Register allocated buffer in user space with DevX. */ 1170 txq_obj->sq_umem = mlx5_glue->devx_umem_reg 1171 (priv->sh->ctx, 1172 (void *)txq_obj->sq_buf, 1173 wqe_n * sizeof(struct mlx5_wqe), 1174 IBV_ACCESS_LOCAL_WRITE); 1175 if (!txq_obj->sq_umem) { 1176 rte_errno = errno; 1177 DRV_LOG(ERR, 1178 "Port %u Tx queue %u cannot register memory (SQ).", 1179 dev->data->port_id, txq_data->idx); 1180 goto error; 1181 } 1182 /* Allocate doorbell record for send queue. */ 1183 txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx, 1184 &priv->dbrpgs, 1185 &txq_obj->sq_dbrec_page); 1186 if (txq_obj->sq_dbrec_offset < 0) { 1187 rte_errno = errno; 1188 DRV_LOG(ERR, "Failed to allocate SQ door-bell."); 1189 goto error; 1190 } 1191 sq_attr.tis_lst_sz = 1; 1192 sq_attr.tis_num = priv->sh->tis->id; 1193 sq_attr.state = MLX5_SQC_STATE_RST; 1194 sq_attr.cqn = txq_obj->cq_devx->id; 1195 sq_attr.flush_in_error_en = 1; 1196 sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps; 1197 sq_attr.allow_swp = !!priv->config.swp; 1198 sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode; 1199 sq_attr.wq_attr.uar_page = 1200 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar); 1201 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 1202 sq_attr.wq_attr.pd = priv->sh->pdn; 1203 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 1204 sq_attr.wq_attr.log_wq_sz = log2above(wqe_n); 1205 sq_attr.wq_attr.dbr_umem_valid = 1; 1206 sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset; 1207 sq_attr.wq_attr.dbr_umem_id = 1208 mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem); 1209 sq_attr.wq_attr.wq_umem_valid = 1; 1210 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem); 1211 sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size; 1212 /* Create Send Queue object with DevX. */ 1213 txq_obj->sq_devx = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr); 1214 if (!txq_obj->sq_devx) { 1215 rte_errno = errno; 1216 DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.", 1217 dev->data->port_id, idx); 1218 goto error; 1219 } 1220 return wqe_n; 1221 error: 1222 ret = rte_errno; 1223 mlx5_txq_release_devx_sq_resources(txq_obj); 1224 rte_errno = ret; 1225 return 0; 1226 } 1227 #endif 1228 1229 /** 1230 * Create the Tx queue DevX object. 1231 * 1232 * @param dev 1233 * Pointer to Ethernet device. 1234 * @param idx 1235 * Queue index in DPDK Tx queue array. 1236 * 1237 * @return 1238 * 0 on success, a negative errno value otherwise and rte_errno is set. 1239 */ 1240 int 1241 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 1242 { 1243 struct mlx5_priv *priv = dev->data->dev_private; 1244 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 1245 struct mlx5_txq_ctrl *txq_ctrl = 1246 container_of(txq_data, struct mlx5_txq_ctrl, txq); 1247 1248 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 1249 return mlx5_txq_obj_hairpin_new(dev, idx); 1250 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET 1251 DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.", 1252 dev->data->port_id, idx); 1253 rte_errno = ENOMEM; 1254 return -rte_errno; 1255 #else 1256 struct mlx5_dev_ctx_shared *sh = priv->sh; 1257 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj; 1258 void *reg_addr; 1259 uint32_t cqe_n; 1260 uint32_t wqe_n; 1261 int ret = 0; 1262 1263 MLX5_ASSERT(txq_data); 1264 MLX5_ASSERT(txq_obj); 1265 txq_obj->txq_ctrl = txq_ctrl; 1266 txq_obj->dev = dev; 1267 cqe_n = mlx5_txq_create_devx_cq_resources(dev, idx); 1268 if (!cqe_n) { 1269 rte_errno = errno; 1270 goto error; 1271 } 1272 txq_data->cqe_n = log2above(cqe_n); 1273 txq_data->cqe_s = 1 << txq_data->cqe_n; 1274 txq_data->cqe_m = txq_data->cqe_s - 1; 1275 txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf; 1276 txq_data->cq_ci = 0; 1277 txq_data->cq_pi = 0; 1278 txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs + 1279 txq_obj->cq_dbrec_offset); 1280 *txq_data->cq_db = 0; 1281 /* Create Send Queue object with DevX. */ 1282 wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx); 1283 if (!wqe_n) { 1284 rte_errno = errno; 1285 goto error; 1286 } 1287 /* Create the Work Queue. */ 1288 txq_data->wqe_n = log2above(wqe_n); 1289 txq_data->wqe_s = 1 << txq_data->wqe_n; 1290 txq_data->wqe_m = txq_data->wqe_s - 1; 1291 txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf; 1292 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 1293 txq_data->wqe_ci = 0; 1294 txq_data->wqe_pi = 0; 1295 txq_data->wqe_comp = 0; 1296 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 1297 txq_data->qp_db = (volatile uint32_t *) 1298 (txq_obj->sq_dbrec_page->dbrs + 1299 txq_obj->sq_dbrec_offset + 1300 MLX5_SND_DBR * sizeof(uint32_t)); 1301 *txq_data->qp_db = 0; 1302 txq_data->qp_num_8s = txq_obj->sq_devx->id << 8; 1303 /* Change Send Queue state to Ready-to-Send. */ 1304 ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0); 1305 if (ret) { 1306 rte_errno = errno; 1307 DRV_LOG(ERR, 1308 "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.", 1309 dev->data->port_id, idx); 1310 goto error; 1311 } 1312 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 1313 /* 1314 * If using DevX need to query and store TIS transport domain value. 1315 * This is done once per port. 1316 * Will use this value on Rx, when creating matching TIR. 1317 */ 1318 if (!priv->sh->tdn) 1319 priv->sh->tdn = priv->sh->td->id; 1320 #endif 1321 MLX5_ASSERT(sh->tx_uar); 1322 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); 1323 MLX5_ASSERT(reg_addr); 1324 txq_ctrl->bf_reg = reg_addr; 1325 txq_ctrl->uar_mmap_offset = 1326 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); 1327 txq_uar_init(txq_ctrl); 1328 return 0; 1329 error: 1330 ret = rte_errno; /* Save rte_errno before cleanup. */ 1331 mlx5_txq_release_devx_resources(txq_obj); 1332 rte_errno = ret; /* Restore rte_errno. */ 1333 return -rte_errno; 1334 #endif 1335 } 1336 1337 /** 1338 * Release an Tx DevX queue object. 1339 * 1340 * @param txq_obj 1341 * DevX Tx queue object. 1342 */ 1343 void 1344 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj) 1345 { 1346 MLX5_ASSERT(txq_obj); 1347 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { 1348 if (txq_obj->tis) 1349 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); 1350 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 1351 } else { 1352 mlx5_txq_release_devx_resources(txq_obj); 1353 #endif 1354 } 1355 } 1356 1357 struct mlx5_obj_ops devx_obj_ops = { 1358 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 1359 .rxq_obj_new = mlx5_rxq_devx_obj_new, 1360 .rxq_event_get = mlx5_rx_devx_get_event, 1361 .rxq_obj_modify = mlx5_devx_modify_rq, 1362 .rxq_obj_release = mlx5_rxq_devx_obj_release, 1363 .ind_table_new = mlx5_devx_ind_table_new, 1364 .ind_table_destroy = mlx5_devx_ind_table_destroy, 1365 .hrxq_new = mlx5_devx_hrxq_new, 1366 .hrxq_destroy = mlx5_devx_tir_destroy, 1367 .drop_action_create = mlx5_devx_drop_action_create, 1368 .drop_action_destroy = mlx5_devx_drop_action_destroy, 1369 .txq_obj_new = mlx5_txq_devx_obj_new, 1370 .txq_obj_modify = mlx5_devx_modify_sq, 1371 .txq_obj_release = mlx5_txq_devx_obj_release, 1372 }; 1373