1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <stddef.h> 6 #include <errno.h> 7 #include <stdbool.h> 8 #include <string.h> 9 #include <stdint.h> 10 #include <sys/queue.h> 11 12 #include <rte_malloc.h> 13 #include <rte_common.h> 14 #include <rte_eal_paging.h> 15 16 #include <mlx5_glue.h> 17 #include <mlx5_devx_cmds.h> 18 #include <mlx5_malloc.h> 19 20 #include "mlx5.h" 21 #include "mlx5_common_os.h" 22 #include "mlx5_rxtx.h" 23 #include "mlx5_utils.h" 24 #include "mlx5_devx.h" 25 #include "mlx5_flow.h" 26 27 28 /** 29 * Modify RQ vlan stripping offload 30 * 31 * @param rxq_obj 32 * Rx queue object. 33 * 34 * @return 35 * 0 on success, non-0 otherwise 36 */ 37 static int 38 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on) 39 { 40 struct mlx5_devx_modify_rq_attr rq_attr; 41 42 memset(&rq_attr, 0, sizeof(rq_attr)); 43 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 44 rq_attr.state = MLX5_RQC_STATE_RDY; 45 rq_attr.vsd = (on ? 0 : 1); 46 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD; 47 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 48 } 49 50 /** 51 * Modify RQ using DevX API. 52 * 53 * @param rxq_obj 54 * DevX Rx queue object. 55 * 56 * @return 57 * 0 on success, a negative errno value otherwise and rte_errno is set. 58 */ 59 static int 60 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, bool is_start) 61 { 62 struct mlx5_devx_modify_rq_attr rq_attr; 63 64 memset(&rq_attr, 0, sizeof(rq_attr)); 65 if (is_start) { 66 rq_attr.rq_state = MLX5_RQC_STATE_RST; 67 rq_attr.state = MLX5_RQC_STATE_RDY; 68 } else { 69 rq_attr.rq_state = MLX5_RQC_STATE_RDY; 70 rq_attr.state = MLX5_RQC_STATE_RST; 71 } 72 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr); 73 } 74 75 /** 76 * Release the resources allocated for an RQ DevX object. 77 * 78 * @param rxq_ctrl 79 * DevX Rx queue object. 80 */ 81 static void 82 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 83 { 84 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page; 85 86 if (rxq_ctrl->rxq.wqes) { 87 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes); 88 rxq_ctrl->rxq.wqes = NULL; 89 } 90 if (rxq_ctrl->wq_umem) { 91 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem); 92 rxq_ctrl->wq_umem = NULL; 93 } 94 if (dbr_page) { 95 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 96 mlx5_os_get_umem_id(dbr_page->umem), 97 rxq_ctrl->rq_dbr_offset)); 98 rxq_ctrl->rq_dbrec_page = NULL; 99 } 100 } 101 102 /** 103 * Release the resources allocated for the Rx CQ DevX object. 104 * 105 * @param rxq_ctrl 106 * DevX Rx queue object. 107 */ 108 static void 109 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl) 110 { 111 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page; 112 113 if (rxq_ctrl->rxq.cqes) { 114 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes); 115 rxq_ctrl->rxq.cqes = NULL; 116 } 117 if (rxq_ctrl->cq_umem) { 118 mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem); 119 rxq_ctrl->cq_umem = NULL; 120 } 121 if (dbr_page) { 122 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs, 123 mlx5_os_get_umem_id(dbr_page->umem), 124 rxq_ctrl->cq_dbr_offset)); 125 rxq_ctrl->cq_dbrec_page = NULL; 126 } 127 } 128 129 /** 130 * Release an Rx DevX queue object. 131 * 132 * @param rxq_obj 133 * DevX Rx queue object. 134 */ 135 static void 136 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj) 137 { 138 MLX5_ASSERT(rxq_obj); 139 MLX5_ASSERT(rxq_obj->rq); 140 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) { 141 mlx5_devx_modify_rq(rxq_obj, false); 142 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 143 } else { 144 MLX5_ASSERT(rxq_obj->devx_cq); 145 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq)); 146 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq)); 147 if (rxq_obj->devx_channel) 148 mlx5_glue->devx_destroy_event_channel 149 (rxq_obj->devx_channel); 150 rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl); 151 rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl); 152 } 153 } 154 155 /** 156 * Get event for an Rx DevX queue object. 157 * 158 * @param rxq_obj 159 * DevX Rx queue object. 160 * 161 * @return 162 * 0 on success, a negative errno value otherwise and rte_errno is set. 163 */ 164 static int 165 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj) 166 { 167 #ifdef HAVE_IBV_DEVX_EVENT 168 union { 169 struct mlx5dv_devx_async_event_hdr event_resp; 170 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128]; 171 } out; 172 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel, 173 &out.event_resp, 174 sizeof(out.buf)); 175 176 if (ret < 0) { 177 rte_errno = errno; 178 return -rte_errno; 179 } 180 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) { 181 rte_errno = EINVAL; 182 return -rte_errno; 183 } 184 return 0; 185 #else 186 (void)rxq_obj; 187 rte_errno = ENOTSUP; 188 return -rte_errno; 189 #endif /* HAVE_IBV_DEVX_EVENT */ 190 } 191 192 /** 193 * Fill common fields of create RQ attributes structure. 194 * 195 * @param rxq_data 196 * Pointer to Rx queue data. 197 * @param cqn 198 * CQ number to use with this RQ. 199 * @param rq_attr 200 * RQ attributes structure to fill.. 201 */ 202 static void 203 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn, 204 struct mlx5_devx_create_rq_attr *rq_attr) 205 { 206 rq_attr->state = MLX5_RQC_STATE_RST; 207 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1; 208 rq_attr->cqn = cqn; 209 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0; 210 } 211 212 /** 213 * Fill common fields of DevX WQ attributes structure. 214 * 215 * @param priv 216 * Pointer to device private data. 217 * @param rxq_ctrl 218 * Pointer to Rx queue control structure. 219 * @param wq_attr 220 * WQ attributes structure to fill.. 221 */ 222 static void 223 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl, 224 struct mlx5_devx_wq_attr *wq_attr) 225 { 226 wq_attr->end_padding_mode = priv->config.cqe_pad ? 227 MLX5_WQ_END_PAD_MODE_ALIGN : 228 MLX5_WQ_END_PAD_MODE_NONE; 229 wq_attr->pd = priv->sh->pdn; 230 wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset; 231 wq_attr->dbr_umem_id = 232 mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem); 233 wq_attr->dbr_umem_valid = 1; 234 wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem); 235 wq_attr->wq_umem_valid = 1; 236 } 237 238 /** 239 * Create a RQ object using DevX. 240 * 241 * @param dev 242 * Pointer to Ethernet device. 243 * @param idx 244 * Queue index in DPDK Rx queue array. 245 * 246 * @return 247 * The DevX RQ object initialized, NULL otherwise and rte_errno is set. 248 */ 249 static struct mlx5_devx_obj * 250 rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx) 251 { 252 struct mlx5_priv *priv = dev->data->dev_private; 253 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 254 struct mlx5_rxq_ctrl *rxq_ctrl = 255 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 256 struct mlx5_devx_create_rq_attr rq_attr = { 0 }; 257 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n); 258 uint32_t cqn = rxq_ctrl->obj->devx_cq->id; 259 struct mlx5_devx_dbr_page *dbr_page; 260 int64_t dbr_offset; 261 uint32_t wq_size = 0; 262 uint32_t wqe_size = 0; 263 uint32_t log_wqe_size = 0; 264 void *buf = NULL; 265 struct mlx5_devx_obj *rq; 266 267 /* Fill RQ attributes. */ 268 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE; 269 rq_attr.flush_in_error_en = 1; 270 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr); 271 /* Fill WQ attributes for this RQ. */ 272 if (mlx5_rxq_mprq_enabled(rxq_data)) { 273 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ; 274 /* 275 * Number of strides in each WQE: 276 * 512*2^single_wqe_log_num_of_strides. 277 */ 278 rq_attr.wq_attr.single_wqe_log_num_of_strides = 279 rxq_data->strd_num_n - 280 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; 281 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */ 282 rq_attr.wq_attr.single_stride_log_num_of_bytes = 283 rxq_data->strd_sz_n - 284 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; 285 wqe_size = sizeof(struct mlx5_wqe_mprq); 286 } else { 287 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 288 wqe_size = sizeof(struct mlx5_wqe_data_seg); 289 } 290 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n; 291 rq_attr.wq_attr.log_wq_stride = log_wqe_size; 292 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n; 293 /* Calculate and allocate WQ memory space. */ 294 wqe_size = 1 << log_wqe_size; /* round up power of two.*/ 295 wq_size = wqe_n * wqe_size; 296 size_t alignment = MLX5_WQE_BUF_ALIGNMENT; 297 if (alignment == (size_t)-1) { 298 DRV_LOG(ERR, "Failed to get mem page size"); 299 rte_errno = ENOMEM; 300 return NULL; 301 } 302 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size, 303 alignment, rxq_ctrl->socket); 304 if (!buf) 305 return NULL; 306 rxq_data->wqes = buf; 307 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, 308 buf, wq_size, 0); 309 if (!rxq_ctrl->wq_umem) 310 goto error; 311 /* Allocate RQ door-bell. */ 312 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 313 if (dbr_offset < 0) { 314 DRV_LOG(ERR, "Failed to allocate RQ door-bell."); 315 goto error; 316 } 317 rxq_ctrl->rq_dbr_offset = dbr_offset; 318 rxq_ctrl->rq_dbrec_page = dbr_page; 319 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 320 (uintptr_t)rxq_ctrl->rq_dbr_offset); 321 /* Create RQ using DevX API. */ 322 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr); 323 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket); 324 if (!rq) 325 goto error; 326 return rq; 327 error: 328 rxq_release_devx_rq_resources(rxq_ctrl); 329 return NULL; 330 } 331 332 /** 333 * Create a DevX CQ object for an Rx queue. 334 * 335 * @param dev 336 * Pointer to Ethernet device. 337 * @param idx 338 * Queue index in DPDK Rx queue array. 339 * 340 * @return 341 * The DevX CQ object initialized, NULL otherwise and rte_errno is set. 342 */ 343 static struct mlx5_devx_obj * 344 rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx) 345 { 346 struct mlx5_devx_obj *cq_obj = 0; 347 struct mlx5_devx_cq_attr cq_attr = { 0 }; 348 struct mlx5_priv *priv = dev->data->dev_private; 349 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 350 struct mlx5_rxq_ctrl *rxq_ctrl = 351 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 352 size_t page_size = rte_mem_page_size(); 353 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data); 354 struct mlx5_devx_dbr_page *dbr_page; 355 int64_t dbr_offset; 356 void *buf = NULL; 357 uint16_t event_nums[1] = {0}; 358 uint32_t log_cqe_n; 359 uint32_t cq_size; 360 int ret = 0; 361 362 if (page_size == (size_t)-1) { 363 DRV_LOG(ERR, "Failed to get page_size."); 364 goto error; 365 } 366 if (priv->config.cqe_comp && !rxq_data->hw_timestamp && 367 !rxq_data->lro) { 368 cq_attr.cqe_comp_en = 1u; 369 cq_attr.mini_cqe_res_format = 370 mlx5_rxq_mprq_enabled(rxq_data) ? 371 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX : 372 MLX5_CQE_RESP_FORMAT_HASH; 373 /* 374 * For vectorized Rx, it must not be doubled in order to 375 * make cq_ci and rq_ci aligned. 376 */ 377 if (mlx5_rxq_check_vec_support(rxq_data) < 0) 378 cqe_n *= 2; 379 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) { 380 DRV_LOG(DEBUG, 381 "Port %u Rx CQE compression is disabled for HW" 382 " timestamp.", 383 dev->data->port_id); 384 } else if (priv->config.cqe_comp && rxq_data->lro) { 385 DRV_LOG(DEBUG, 386 "Port %u Rx CQE compression is disabled for LRO.", 387 dev->data->port_id); 388 } 389 if (priv->config.cqe_pad) 390 cq_attr.cqe_size = MLX5_CQE_SIZE_128B; 391 log_cqe_n = log2above(cqe_n); 392 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n); 393 buf = rte_calloc_socket(__func__, 1, cq_size, page_size, 394 rxq_ctrl->socket); 395 if (!buf) { 396 DRV_LOG(ERR, "Failed to allocate memory for CQ."); 397 goto error; 398 } 399 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf; 400 rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf, 401 cq_size, 402 IBV_ACCESS_LOCAL_WRITE); 403 if (!rxq_ctrl->cq_umem) { 404 DRV_LOG(ERR, "Failed to register umem for CQ."); 405 goto error; 406 } 407 /* Allocate CQ door-bell. */ 408 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page); 409 if (dbr_offset < 0) { 410 DRV_LOG(ERR, "Failed to allocate CQ door-bell."); 411 goto error; 412 } 413 rxq_ctrl->cq_dbr_offset = dbr_offset; 414 rxq_ctrl->cq_dbrec_page = dbr_page; 415 rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs + 416 (uintptr_t)rxq_ctrl->cq_dbr_offset); 417 rxq_data->cq_uar = 418 mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar); 419 /* Create CQ using DevX API. */ 420 cq_attr.eqn = priv->sh->eqn; 421 cq_attr.uar_page_id = 422 mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar); 423 cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem); 424 cq_attr.q_umem_valid = 1; 425 cq_attr.log_cq_size = log_cqe_n; 426 cq_attr.log_page_size = rte_log2_u32(page_size); 427 cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset; 428 cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem); 429 cq_attr.db_umem_valid = 1; 430 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr); 431 if (!cq_obj) 432 goto error; 433 rxq_data->cqe_n = log_cqe_n; 434 rxq_data->cqn = cq_obj->id; 435 if (rxq_ctrl->obj->devx_channel) { 436 ret = mlx5_glue->devx_subscribe_devx_event 437 (rxq_ctrl->obj->devx_channel, 438 cq_obj->obj, 439 sizeof(event_nums), 440 event_nums, 441 (uint64_t)(uintptr_t)cq_obj); 442 if (ret) { 443 DRV_LOG(ERR, "Fail to subscribe CQ to event channel."); 444 rte_errno = errno; 445 goto error; 446 } 447 } 448 /* Initialise CQ to 1's to mark HW ownership for all CQEs. */ 449 memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size); 450 return cq_obj; 451 error: 452 if (cq_obj) 453 mlx5_devx_cmd_destroy(cq_obj); 454 rxq_release_devx_cq_resources(rxq_ctrl); 455 return NULL; 456 } 457 458 /** 459 * Create the Rx hairpin queue object. 460 * 461 * @param dev 462 * Pointer to Ethernet device. 463 * @param idx 464 * Queue index in DPDK Rx queue array. 465 * 466 * @return 467 * 0 on success, a negative errno value otherwise and rte_errno is set. 468 */ 469 static int 470 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 471 { 472 struct mlx5_priv *priv = dev->data->dev_private; 473 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 474 struct mlx5_rxq_ctrl *rxq_ctrl = 475 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 476 struct mlx5_devx_create_rq_attr attr = { 0 }; 477 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 478 uint32_t max_wq_data; 479 480 MLX5_ASSERT(rxq_data); 481 MLX5_ASSERT(tmpl); 482 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN; 483 tmpl->rxq_ctrl = rxq_ctrl; 484 attr.hairpin = 1; 485 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 486 /* Jumbo frames > 9KB should be supported, and more packets. */ 487 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 488 if (priv->config.log_hp_size > max_wq_data) { 489 DRV_LOG(ERR, "Total data size %u power of 2 is " 490 "too large for hairpin.", 491 priv->config.log_hp_size); 492 rte_errno = ERANGE; 493 return -rte_errno; 494 } 495 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 496 } else { 497 attr.wq_attr.log_hairpin_data_sz = 498 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 499 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 500 } 501 /* Set the packets number to the maximum value for performance. */ 502 attr.wq_attr.log_hairpin_num_packets = 503 attr.wq_attr.log_hairpin_data_sz - 504 MLX5_HAIRPIN_QUEUE_STRIDE; 505 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr, 506 rxq_ctrl->socket); 507 if (!tmpl->rq) { 508 DRV_LOG(ERR, 509 "Port %u Rx hairpin queue %u can't create rq object.", 510 dev->data->port_id, idx); 511 rte_errno = errno; 512 return -rte_errno; 513 } 514 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN; 515 return 0; 516 } 517 518 /** 519 * Create the Rx queue DevX object. 520 * 521 * @param dev 522 * Pointer to Ethernet device. 523 * @param idx 524 * Queue index in DPDK Rx queue array. 525 * 526 * @return 527 * 0 on success, a negative errno value otherwise and rte_errno is set. 528 */ 529 static int 530 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 531 { 532 struct mlx5_priv *priv = dev->data->dev_private; 533 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; 534 struct mlx5_rxq_ctrl *rxq_ctrl = 535 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 536 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj; 537 int ret = 0; 538 539 MLX5_ASSERT(rxq_data); 540 MLX5_ASSERT(tmpl); 541 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 542 return mlx5_rxq_obj_hairpin_new(dev, idx); 543 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ; 544 tmpl->rxq_ctrl = rxq_ctrl; 545 if (rxq_ctrl->irq) { 546 int devx_ev_flag = 547 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA; 548 549 tmpl->devx_channel = mlx5_glue->devx_create_event_channel 550 (priv->sh->ctx, 551 devx_ev_flag); 552 if (!tmpl->devx_channel) { 553 rte_errno = errno; 554 DRV_LOG(ERR, "Failed to create event channel %d.", 555 rte_errno); 556 goto error; 557 } 558 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel); 559 } 560 /* Create CQ using DevX API. */ 561 tmpl->devx_cq = rxq_create_devx_cq_resources(dev, idx); 562 if (!tmpl->devx_cq) { 563 DRV_LOG(ERR, "Failed to create CQ."); 564 goto error; 565 } 566 /* Create RQ using DevX API. */ 567 tmpl->rq = rxq_create_devx_rq_resources(dev, idx); 568 if (!tmpl->rq) { 569 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.", 570 dev->data->port_id, idx); 571 rte_errno = ENOMEM; 572 goto error; 573 } 574 /* Change queue state to ready. */ 575 ret = mlx5_devx_modify_rq(tmpl, true); 576 if (ret) 577 goto error; 578 rxq_data->cq_arm_sn = 0; 579 mlx5_rxq_initialize(rxq_data); 580 rxq_data->cq_ci = 0; 581 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 582 rxq_ctrl->wqn = tmpl->rq->id; 583 return 0; 584 error: 585 ret = rte_errno; /* Save rte_errno before cleanup. */ 586 if (tmpl->rq) 587 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq)); 588 if (tmpl->devx_cq) 589 claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq)); 590 if (tmpl->devx_channel) 591 mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel); 592 rxq_release_devx_rq_resources(rxq_ctrl); 593 rxq_release_devx_cq_resources(rxq_ctrl); 594 rte_errno = ret; /* Restore rte_errno. */ 595 return -rte_errno; 596 } 597 598 /** 599 * Create RQT using DevX API as a filed of indirection table. 600 * 601 * @param dev 602 * Pointer to Ethernet device. 603 * @param log_n 604 * Log of number of queues in the array. 605 * @param ind_tbl 606 * DevX indirection table object. 607 * 608 * @return 609 * 0 on success, a negative errno value otherwise and rte_errno is set. 610 */ 611 static int 612 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n, 613 struct mlx5_ind_table_obj *ind_tbl) 614 { 615 struct mlx5_priv *priv = dev->data->dev_private; 616 struct mlx5_devx_rqt_attr *rqt_attr = NULL; 617 const unsigned int rqt_n = 1 << log_n; 618 unsigned int i, j; 619 620 MLX5_ASSERT(ind_tbl); 621 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) + 622 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY); 623 if (!rqt_attr) { 624 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.", 625 dev->data->port_id); 626 rte_errno = ENOMEM; 627 return -rte_errno; 628 } 629 rqt_attr->rqt_max_size = priv->config.ind_table_max_size; 630 rqt_attr->rqt_actual_size = rqt_n; 631 for (i = 0; i != ind_tbl->queues_n; ++i) { 632 struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]]; 633 struct mlx5_rxq_ctrl *rxq_ctrl = 634 container_of(rxq, struct mlx5_rxq_ctrl, rxq); 635 636 rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id; 637 } 638 MLX5_ASSERT(i > 0); 639 for (j = 0; i != rqt_n; ++j, ++i) 640 rqt_attr->rq_list[i] = rqt_attr->rq_list[j]; 641 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr); 642 mlx5_free(rqt_attr); 643 if (!ind_tbl->rqt) { 644 DRV_LOG(ERR, "Port %u cannot create DevX RQT.", 645 dev->data->port_id); 646 rte_errno = errno; 647 return -rte_errno; 648 } 649 return 0; 650 } 651 652 /** 653 * Destroy the DevX RQT object. 654 * 655 * @param ind_table 656 * Indirection table to release. 657 */ 658 static void 659 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl) 660 { 661 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); 662 } 663 664 /** 665 * Create an Rx Hash queue. 666 * 667 * @param dev 668 * Pointer to Ethernet device. 669 * @param hrxq 670 * Pointer to Rx Hash queue. 671 * @param tunnel 672 * Tunnel type. 673 * 674 * @return 675 * 0 on success, a negative errno value otherwise and rte_errno is set. 676 */ 677 static int 678 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq, 679 int tunnel __rte_unused) 680 { 681 struct mlx5_priv *priv = dev->data->dev_private; 682 struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table; 683 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]]; 684 struct mlx5_rxq_ctrl *rxq_ctrl = 685 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); 686 struct mlx5_devx_tir_attr tir_attr; 687 const uint8_t *rss_key = hrxq->rss_key; 688 uint64_t hash_fields = hrxq->hash_fields; 689 bool lro = true; 690 uint32_t i; 691 int err; 692 693 /* Enable TIR LRO only if all the queues were configured for. */ 694 for (i = 0; i < ind_tbl->queues_n; ++i) { 695 if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) { 696 lro = false; 697 break; 698 } 699 } 700 memset(&tir_attr, 0, sizeof(tir_attr)); 701 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; 702 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; 703 tir_attr.tunneled_offload_en = !!tunnel; 704 /* If needed, translate hash_fields bitmap to PRM format. */ 705 if (hash_fields) { 706 struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL; 707 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT 708 rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ? 709 &tir_attr.rx_hash_field_selector_inner : 710 &tir_attr.rx_hash_field_selector_outer; 711 #else 712 rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer; 713 #endif 714 /* 1 bit: 0: IPv4, 1: IPv6. */ 715 rx_hash_field_select->l3_prot_type = 716 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); 717 /* 1 bit: 0: TCP, 1: UDP. */ 718 rx_hash_field_select->l4_prot_type = 719 !!(hash_fields & MLX5_UDP_IBV_RX_HASH); 720 /* Bitmask which sets which fields to use in RX Hash. */ 721 rx_hash_field_select->selected_fields = 722 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << 723 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | 724 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << 725 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | 726 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << 727 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | 728 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << 729 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; 730 } 731 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 732 tir_attr.transport_domain = priv->sh->td->id; 733 else 734 tir_attr.transport_domain = priv->sh->tdn; 735 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); 736 tir_attr.indirect_table = ind_tbl->rqt->id; 737 if (dev->data->dev_conf.lpbk_mode) 738 tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; 739 if (lro) { 740 tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout; 741 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; 742 tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | 743 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; 744 } 745 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); 746 if (!hrxq->tir) { 747 DRV_LOG(ERR, "Port %u cannot create DevX TIR.", 748 dev->data->port_id); 749 rte_errno = errno; 750 goto error; 751 } 752 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 753 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir 754 (hrxq->tir->obj); 755 if (!hrxq->action) { 756 rte_errno = errno; 757 goto error; 758 } 759 #endif 760 return 0; 761 error: 762 err = rte_errno; /* Save rte_errno before cleanup. */ 763 if (hrxq->tir) 764 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 765 rte_errno = err; /* Restore rte_errno. */ 766 return -rte_errno; 767 } 768 769 /** 770 * Destroy a DevX TIR object. 771 * 772 * @param hrxq 773 * Hash Rx queue to release its tir. 774 */ 775 static void 776 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) 777 { 778 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); 779 } 780 781 /** 782 * Create a DevX drop action for Rx Hash queue. 783 * 784 * @param dev 785 * Pointer to Ethernet device. 786 * 787 * @return 788 * 0 on success, a negative errno value otherwise and rte_errno is set. 789 */ 790 static int 791 mlx5_devx_drop_action_create(struct rte_eth_dev *dev) 792 { 793 (void)dev; 794 DRV_LOG(ERR, "DevX drop action is not supported yet."); 795 rte_errno = ENOTSUP; 796 return -rte_errno; 797 } 798 799 /** 800 * Release a drop hash Rx queue. 801 * 802 * @param dev 803 * Pointer to Ethernet device. 804 */ 805 static void 806 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev) 807 { 808 (void)dev; 809 DRV_LOG(ERR, "DevX drop action is not supported yet."); 810 rte_errno = ENOTSUP; 811 } 812 813 /** 814 * Create the Tx hairpin queue object. 815 * 816 * @param dev 817 * Pointer to Ethernet device. 818 * @param idx 819 * Queue index in DPDK Tx queue array. 820 * 821 * @return 822 * The hairpin DevX object initialized, NULL otherwise and rte_errno is set. 823 */ 824 static struct mlx5_txq_obj * 825 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx) 826 { 827 struct mlx5_priv *priv = dev->data->dev_private; 828 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 829 struct mlx5_txq_ctrl *txq_ctrl = 830 container_of(txq_data, struct mlx5_txq_ctrl, txq); 831 struct mlx5_devx_create_sq_attr attr = { 0 }; 832 struct mlx5_txq_obj *tmpl = NULL; 833 uint32_t max_wq_data; 834 835 MLX5_ASSERT(txq_data); 836 MLX5_ASSERT(!txq_ctrl->obj); 837 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0, 838 txq_ctrl->socket); 839 if (!tmpl) { 840 DRV_LOG(ERR, 841 "Port %u Tx queue %u cannot allocate memory resources.", 842 dev->data->port_id, txq_data->idx); 843 rte_errno = ENOMEM; 844 return NULL; 845 } 846 tmpl->type = MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN; 847 tmpl->txq_ctrl = txq_ctrl; 848 attr.hairpin = 1; 849 attr.tis_lst_sz = 1; 850 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz; 851 /* Jumbo frames > 9KB should be supported, and more packets. */ 852 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) { 853 if (priv->config.log_hp_size > max_wq_data) { 854 DRV_LOG(ERR, "Total data size %u power of 2 is " 855 "too large for hairpin.", 856 priv->config.log_hp_size); 857 mlx5_free(tmpl); 858 rte_errno = ERANGE; 859 return NULL; 860 } 861 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size; 862 } else { 863 attr.wq_attr.log_hairpin_data_sz = 864 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ? 865 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE; 866 } 867 /* Set the packets number to the maximum value for performance. */ 868 attr.wq_attr.log_hairpin_num_packets = 869 attr.wq_attr.log_hairpin_data_sz - 870 MLX5_HAIRPIN_QUEUE_STRIDE; 871 attr.tis_num = priv->sh->tis->id; 872 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr); 873 if (!tmpl->sq) { 874 DRV_LOG(ERR, 875 "Port %u tx hairpin queue %u can't create SQ object.", 876 dev->data->port_id, idx); 877 mlx5_free(tmpl); 878 rte_errno = errno; 879 return NULL; 880 } 881 DRV_LOG(DEBUG, "Port %u sxq %u updated with %p.", dev->data->port_id, 882 idx, (void *)&tmpl); 883 LIST_INSERT_HEAD(&priv->txqsobj, tmpl, next); 884 return tmpl; 885 } 886 887 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 888 /** 889 * Release DevX SQ resources. 890 * 891 * @param txq_obj 892 * DevX Tx queue object. 893 */ 894 static void 895 txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj) 896 { 897 if (txq_obj->sq_devx) 898 claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx)); 899 if (txq_obj->sq_umem) 900 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem)); 901 if (txq_obj->sq_buf) 902 mlx5_free(txq_obj->sq_buf); 903 if (txq_obj->sq_dbrec_page) 904 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs, 905 mlx5_os_get_umem_id 906 (txq_obj->sq_dbrec_page->umem), 907 txq_obj->sq_dbrec_offset)); 908 } 909 910 /** 911 * Release DevX Tx CQ resources. 912 * 913 * @param txq_obj 914 * DevX Tx queue object. 915 */ 916 static void 917 txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj) 918 { 919 if (txq_obj->cq_devx) 920 claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx)); 921 if (txq_obj->cq_umem) 922 claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->cq_umem)); 923 if (txq_obj->cq_buf) 924 mlx5_free(txq_obj->cq_buf); 925 if (txq_obj->cq_dbrec_page) 926 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs, 927 mlx5_os_get_umem_id 928 (txq_obj->cq_dbrec_page->umem), 929 txq_obj->cq_dbrec_offset)); 930 } 931 932 /** 933 * Destroy the Tx queue DevX object. 934 * 935 * @param txq_obj 936 * Txq object to destroy. 937 */ 938 static void 939 txq_release_devx_resources(struct mlx5_txq_obj *txq_obj) 940 { 941 MLX5_ASSERT(txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_SQ); 942 943 txq_release_devx_cq_resources(txq_obj); 944 txq_release_devx_sq_resources(txq_obj); 945 } 946 947 /** 948 * Create a DevX CQ object for an Tx queue. 949 * 950 * @param dev 951 * Pointer to Ethernet device. 952 * @param cqe_n 953 * Number of entries in the CQ. 954 * @param idx 955 * Queue index in DPDK Tx queue array. 956 * @param rxq_obj 957 * Pointer to Tx queue object data. 958 * 959 * @return 960 * The DevX CQ object initialized, NULL otherwise and rte_errno is set. 961 */ 962 static struct mlx5_devx_obj * 963 mlx5_tx_devx_cq_new(struct rte_eth_dev *dev, uint32_t cqe_n, uint16_t idx, 964 struct mlx5_txq_obj *txq_obj) 965 { 966 struct mlx5_priv *priv = dev->data->dev_private; 967 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 968 struct mlx5_devx_obj *cq_obj = NULL; 969 struct mlx5_devx_cq_attr cq_attr = { 0 }; 970 struct mlx5_cqe *cqe; 971 size_t page_size; 972 size_t alignment; 973 uint32_t i; 974 int ret; 975 976 MLX5_ASSERT(txq_data); 977 MLX5_ASSERT(txq_obj); 978 page_size = rte_mem_page_size(); 979 if (page_size == (size_t)-1) { 980 DRV_LOG(ERR, "Failed to get mem page size."); 981 rte_errno = ENOMEM; 982 return NULL; 983 } 984 /* Allocate memory buffer for CQEs. */ 985 alignment = MLX5_CQE_BUF_ALIGNMENT; 986 if (alignment == (size_t)-1) { 987 DRV_LOG(ERR, "Failed to get CQE buf alignment."); 988 rte_errno = ENOMEM; 989 return NULL; 990 } 991 cqe_n = 1UL << log2above(cqe_n); 992 if (cqe_n > UINT16_MAX) { 993 DRV_LOG(ERR, 994 "Port %u Tx queue %u requests to many CQEs %u.", 995 dev->data->port_id, txq_data->idx, cqe_n); 996 rte_errno = EINVAL; 997 return NULL; 998 } 999 txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 1000 cqe_n * sizeof(struct mlx5_cqe), 1001 alignment, 1002 priv->sh->numa_node); 1003 if (!txq_obj->cq_buf) { 1004 DRV_LOG(ERR, 1005 "Port %u Tx queue %u cannot allocate memory (CQ).", 1006 dev->data->port_id, txq_data->idx); 1007 rte_errno = ENOMEM; 1008 return NULL; 1009 } 1010 /* Register allocated buffer in user space with DevX. */ 1011 txq_obj->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, 1012 (void *)txq_obj->cq_buf, 1013 cqe_n * sizeof(struct mlx5_cqe), 1014 IBV_ACCESS_LOCAL_WRITE); 1015 if (!txq_obj->cq_umem) { 1016 rte_errno = errno; 1017 DRV_LOG(ERR, 1018 "Port %u Tx queue %u cannot register memory (CQ).", 1019 dev->data->port_id, txq_data->idx); 1020 goto error; 1021 } 1022 /* Allocate doorbell record for completion queue. */ 1023 txq_obj->cq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx, 1024 &priv->dbrpgs, 1025 &txq_obj->cq_dbrec_page); 1026 if (txq_obj->cq_dbrec_offset < 0) { 1027 rte_errno = errno; 1028 DRV_LOG(ERR, "Failed to allocate CQ door-bell."); 1029 goto error; 1030 } 1031 cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ? 1032 MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B; 1033 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar); 1034 cq_attr.eqn = priv->sh->eqn; 1035 cq_attr.q_umem_valid = 1; 1036 cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size; 1037 cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem); 1038 cq_attr.db_umem_valid = 1; 1039 cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset; 1040 cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem); 1041 cq_attr.log_cq_size = rte_log2_u32(cqe_n); 1042 cq_attr.log_page_size = rte_log2_u32(page_size); 1043 /* Create completion queue object with DevX. */ 1044 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr); 1045 if (!cq_obj) { 1046 rte_errno = errno; 1047 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.", 1048 dev->data->port_id, idx); 1049 goto error; 1050 } 1051 txq_data->cqe_n = log2above(cqe_n); 1052 txq_data->cqe_s = 1 << txq_data->cqe_n; 1053 /* Initial fill CQ buffer with invalid CQE opcode. */ 1054 cqe = (struct mlx5_cqe *)txq_obj->cq_buf; 1055 for (i = 0; i < txq_data->cqe_s; i++) { 1056 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK; 1057 ++cqe; 1058 } 1059 return cq_obj; 1060 error: 1061 ret = rte_errno; 1062 txq_release_devx_cq_resources(txq_obj); 1063 rte_errno = ret; 1064 return NULL; 1065 } 1066 1067 /** 1068 * Create a SQ object using DevX. 1069 * 1070 * @param dev 1071 * Pointer to Ethernet device. 1072 * @param idx 1073 * Queue index in DPDK Tx queue array. 1074 * @param rxq_obj 1075 * Pointer to Tx queue object data. 1076 * 1077 * @return 1078 * The DevX SQ object initialized, NULL otherwise and rte_errno is set. 1079 */ 1080 static struct mlx5_devx_obj * 1081 mlx5_devx_sq_new(struct rte_eth_dev *dev, uint16_t idx, 1082 struct mlx5_txq_obj *txq_obj) 1083 { 1084 struct mlx5_priv *priv = dev->data->dev_private; 1085 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 1086 struct mlx5_devx_create_sq_attr sq_attr = { 0 }; 1087 struct mlx5_devx_obj *sq_obj = NULL; 1088 size_t page_size; 1089 uint32_t wqe_n; 1090 int ret; 1091 1092 MLX5_ASSERT(txq_data); 1093 MLX5_ASSERT(txq_obj); 1094 page_size = rte_mem_page_size(); 1095 if (page_size == (size_t)-1) { 1096 DRV_LOG(ERR, "Failed to get mem page size."); 1097 rte_errno = ENOMEM; 1098 return NULL; 1099 } 1100 wqe_n = RTE_MIN(1UL << txq_data->elts_n, 1101 (uint32_t)priv->sh->device_attr.max_qp_wr); 1102 txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 1103 wqe_n * sizeof(struct mlx5_wqe), 1104 page_size, priv->sh->numa_node); 1105 if (!txq_obj->sq_buf) { 1106 DRV_LOG(ERR, 1107 "Port %u Tx queue %u cannot allocate memory (SQ).", 1108 dev->data->port_id, txq_data->idx); 1109 rte_errno = ENOMEM; 1110 goto error; 1111 } 1112 /* Register allocated buffer in user space with DevX. */ 1113 txq_obj->sq_umem = mlx5_glue->devx_umem_reg 1114 (priv->sh->ctx, 1115 (void *)txq_obj->sq_buf, 1116 wqe_n * sizeof(struct mlx5_wqe), 1117 IBV_ACCESS_LOCAL_WRITE); 1118 if (!txq_obj->sq_umem) { 1119 rte_errno = errno; 1120 DRV_LOG(ERR, 1121 "Port %u Tx queue %u cannot register memory (SQ).", 1122 dev->data->port_id, txq_data->idx); 1123 goto error; 1124 } 1125 /* Allocate doorbell record for send queue. */ 1126 txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx, 1127 &priv->dbrpgs, 1128 &txq_obj->sq_dbrec_page); 1129 if (txq_obj->sq_dbrec_offset < 0) { 1130 rte_errno = errno; 1131 DRV_LOG(ERR, "Failed to allocate SQ door-bell."); 1132 goto error; 1133 } 1134 txq_data->wqe_n = log2above(wqe_n); 1135 sq_attr.tis_lst_sz = 1; 1136 sq_attr.tis_num = priv->sh->tis->id; 1137 sq_attr.state = MLX5_SQC_STATE_RST; 1138 sq_attr.cqn = txq_obj->cq_devx->id; 1139 sq_attr.flush_in_error_en = 1; 1140 sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps; 1141 sq_attr.allow_swp = !!priv->config.swp; 1142 sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode; 1143 sq_attr.wq_attr.uar_page = 1144 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar); 1145 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC; 1146 sq_attr.wq_attr.pd = priv->sh->pdn; 1147 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE); 1148 sq_attr.wq_attr.log_wq_sz = txq_data->wqe_n; 1149 sq_attr.wq_attr.dbr_umem_valid = 1; 1150 sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset; 1151 sq_attr.wq_attr.dbr_umem_id = 1152 mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem); 1153 sq_attr.wq_attr.wq_umem_valid = 1; 1154 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem); 1155 sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size; 1156 /* Create Send Queue object with DevX. */ 1157 sq_obj = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr); 1158 if (!sq_obj) { 1159 rte_errno = errno; 1160 DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.", 1161 dev->data->port_id, idx); 1162 goto error; 1163 } 1164 return sq_obj; 1165 error: 1166 ret = rte_errno; 1167 txq_release_devx_sq_resources(txq_obj); 1168 rte_errno = ret; 1169 return NULL; 1170 } 1171 #endif 1172 1173 /** 1174 * Create the Tx queue DevX object. 1175 * 1176 * @param dev 1177 * Pointer to Ethernet device. 1178 * @param idx 1179 * Queue index in DPDK Tx queue array. 1180 * 1181 * @return 1182 * The DevX object initialized, NULL otherwise and rte_errno is set. 1183 */ 1184 struct mlx5_txq_obj * 1185 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx) 1186 { 1187 struct mlx5_priv *priv = dev->data->dev_private; 1188 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; 1189 struct mlx5_txq_ctrl *txq_ctrl = 1190 container_of(txq_data, struct mlx5_txq_ctrl, txq); 1191 1192 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) 1193 return mlx5_txq_obj_hairpin_new(dev, idx); 1194 #ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET 1195 DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.", 1196 dev->data->port_id, idx); 1197 rte_errno = ENOMEM; 1198 return NULL; 1199 #else 1200 struct mlx5_dev_ctx_shared *sh = priv->sh; 1201 struct mlx5_devx_modify_sq_attr msq_attr = { 0 }; 1202 struct mlx5_txq_obj *txq_obj = NULL; 1203 void *reg_addr; 1204 uint32_t cqe_n; 1205 int ret = 0; 1206 1207 MLX5_ASSERT(txq_data); 1208 MLX5_ASSERT(!txq_ctrl->obj); 1209 txq_obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 1210 sizeof(struct mlx5_txq_obj), 0, 1211 txq_ctrl->socket); 1212 if (!txq_obj) { 1213 DRV_LOG(ERR, 1214 "Port %u Tx queue %u cannot allocate memory resources.", 1215 dev->data->port_id, txq_data->idx); 1216 rte_errno = ENOMEM; 1217 return NULL; 1218 } 1219 txq_obj->type = MLX5_TXQ_OBJ_TYPE_DEVX_SQ; 1220 txq_obj->txq_ctrl = txq_ctrl; 1221 txq_obj->dev = dev; 1222 /* Create the Completion Queue. */ 1223 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH + 1224 1 + MLX5_TX_COMP_THRESH_INLINE_DIV; 1225 /* Create completion queue object with DevX. */ 1226 txq_obj->cq_devx = mlx5_tx_devx_cq_new(dev, cqe_n, idx, txq_obj); 1227 if (!txq_obj->cq_devx) { 1228 rte_errno = errno; 1229 goto error; 1230 } 1231 txq_data->cqe_m = txq_data->cqe_s - 1; 1232 txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf; 1233 txq_data->cq_ci = 0; 1234 txq_data->cq_pi = 0; 1235 txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs + 1236 txq_obj->cq_dbrec_offset); 1237 *txq_data->cq_db = 0; 1238 /* Create Send Queue object with DevX. */ 1239 txq_obj->sq_devx = mlx5_devx_sq_new(dev, idx, txq_obj); 1240 if (!txq_obj->sq_devx) { 1241 rte_errno = errno; 1242 goto error; 1243 } 1244 /* Create the Work Queue. */ 1245 txq_data->wqe_s = 1 << txq_data->wqe_n; 1246 txq_data->wqe_m = txq_data->wqe_s - 1; 1247 txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf; 1248 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s; 1249 txq_data->wqe_ci = 0; 1250 txq_data->wqe_pi = 0; 1251 txq_data->wqe_comp = 0; 1252 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV; 1253 txq_data->qp_db = (volatile uint32_t *) 1254 (txq_obj->sq_dbrec_page->dbrs + 1255 txq_obj->sq_dbrec_offset + 1256 MLX5_SND_DBR * sizeof(uint32_t)); 1257 *txq_data->qp_db = 0; 1258 txq_data->qp_num_8s = txq_obj->sq_devx->id << 8; 1259 /* Change Send Queue state to Ready-to-Send. */ 1260 msq_attr.sq_state = MLX5_SQC_STATE_RST; 1261 msq_attr.state = MLX5_SQC_STATE_RDY; 1262 ret = mlx5_devx_cmd_modify_sq(txq_obj->sq_devx, &msq_attr); 1263 if (ret) { 1264 rte_errno = errno; 1265 DRV_LOG(ERR, 1266 "Port %u Tx queue %u SP state to SQC_STATE_RDY failed.", 1267 dev->data->port_id, idx); 1268 goto error; 1269 } 1270 txq_data->fcqs = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 1271 txq_data->cqe_s * sizeof(*txq_data->fcqs), 1272 RTE_CACHE_LINE_SIZE, 1273 txq_ctrl->socket); 1274 if (!txq_data->fcqs) { 1275 DRV_LOG(ERR, 1276 "Port %u Tx queue %u cannot allocate memory (FCQ).", 1277 dev->data->port_id, idx); 1278 rte_errno = ENOMEM; 1279 goto error; 1280 } 1281 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 1282 /* 1283 * If using DevX need to query and store TIS transport domain value. 1284 * This is done once per port. 1285 * Will use this value on Rx, when creating matching TIR. 1286 */ 1287 if (!priv->sh->tdn) 1288 priv->sh->tdn = priv->sh->td->id; 1289 #endif 1290 MLX5_ASSERT(sh->tx_uar); 1291 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar); 1292 MLX5_ASSERT(reg_addr); 1293 txq_ctrl->bf_reg = reg_addr; 1294 txq_ctrl->uar_mmap_offset = 1295 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar); 1296 txq_uar_init(txq_ctrl); 1297 LIST_INSERT_HEAD(&priv->txqsobj, txq_obj, next); 1298 return txq_obj; 1299 error: 1300 ret = rte_errno; /* Save rte_errno before cleanup. */ 1301 txq_release_devx_resources(txq_obj); 1302 if (txq_data->fcqs) { 1303 mlx5_free(txq_data->fcqs); 1304 txq_data->fcqs = NULL; 1305 } 1306 mlx5_free(txq_obj); 1307 rte_errno = ret; /* Restore rte_errno. */ 1308 return NULL; 1309 #endif 1310 } 1311 1312 /** 1313 * Release an Tx DevX queue object. 1314 * 1315 * @param txq_obj 1316 * DevX Tx queue object. 1317 */ 1318 void 1319 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj) 1320 { 1321 MLX5_ASSERT(txq_obj); 1322 if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) { 1323 if (txq_obj->tis) 1324 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis)); 1325 #ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET 1326 } else { 1327 txq_release_devx_resources(txq_obj); 1328 #endif 1329 } 1330 if (txq_obj->txq_ctrl->txq.fcqs) { 1331 mlx5_free(txq_obj->txq_ctrl->txq.fcqs); 1332 txq_obj->txq_ctrl->txq.fcqs = NULL; 1333 } 1334 LIST_REMOVE(txq_obj, next); 1335 mlx5_free(txq_obj); 1336 } 1337 1338 struct mlx5_obj_ops devx_obj_ops = { 1339 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, 1340 .rxq_obj_new = mlx5_rxq_devx_obj_new, 1341 .rxq_event_get = mlx5_rx_devx_get_event, 1342 .rxq_obj_modify = mlx5_devx_modify_rq, 1343 .rxq_obj_release = mlx5_rxq_devx_obj_release, 1344 .ind_table_new = mlx5_devx_ind_table_new, 1345 .ind_table_destroy = mlx5_devx_ind_table_destroy, 1346 .hrxq_new = mlx5_devx_hrxq_new, 1347 .hrxq_destroy = mlx5_devx_tir_destroy, 1348 .drop_action_create = mlx5_devx_drop_action_create, 1349 .drop_action_destroy = mlx5_devx_drop_action_destroy, 1350 .txq_obj_new = mlx5_txq_devx_obj_new, 1351 .txq_obj_release = mlx5_txq_devx_obj_release, 1352 }; 1353