1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #include <stddef.h> 7 #include <errno.h> 8 #include <string.h> 9 #include <stdint.h> 10 #include <fcntl.h> 11 #include <sys/queue.h> 12 13 #include <rte_mbuf.h> 14 #include <rte_malloc.h> 15 #include <ethdev_driver.h> 16 #include <rte_common.h> 17 #include <rte_interrupts.h> 18 #include <rte_debug.h> 19 #include <rte_io.h> 20 #include <rte_eal_paging.h> 21 22 #include <mlx5_glue.h> 23 #include <mlx5_malloc.h> 24 #include <mlx5_common.h> 25 #include <mlx5_common_mr.h> 26 27 #include "mlx5_defs.h" 28 #include "mlx5.h" 29 #include "mlx5_rx.h" 30 #include "mlx5_utils.h" 31 #include "mlx5_autoconf.h" 32 #include "mlx5_devx.h" 33 34 35 /* Default RSS hash key also used for ConnectX-3. */ 36 uint8_t rss_hash_default_key[] = { 37 0x2c, 0xc6, 0x81, 0xd1, 38 0x5b, 0xdb, 0xf4, 0xf7, 39 0xfc, 0xa2, 0x83, 0x19, 40 0xdb, 0x1a, 0x3e, 0x94, 41 0x6b, 0x9e, 0x38, 0xd9, 42 0x2c, 0x9c, 0x03, 0xd1, 43 0xad, 0x99, 0x44, 0xa7, 44 0xd9, 0x56, 0x3d, 0x59, 45 0x06, 0x3c, 0x25, 0xf3, 46 0xfc, 0x1f, 0xdc, 0x2a, 47 }; 48 49 /* Length of the default RSS hash key. */ 50 static_assert(MLX5_RSS_HASH_KEY_LEN == 51 (unsigned int)sizeof(rss_hash_default_key), 52 "wrong RSS default key size."); 53 54 /** 55 * Calculate the number of CQEs in CQ for the Rx queue. 56 * 57 * @param rxq_data 58 * Pointer to receive queue structure. 59 * 60 * @return 61 * Number of CQEs in CQ. 62 */ 63 unsigned int 64 mlx5_rxq_cqe_num(struct mlx5_rxq_data *rxq_data) 65 { 66 unsigned int cqe_n; 67 unsigned int wqe_n = 1 << rxq_data->elts_n; 68 69 if (mlx5_rxq_mprq_enabled(rxq_data)) 70 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1; 71 else 72 cqe_n = wqe_n - 1; 73 return cqe_n; 74 } 75 76 /** 77 * Allocate RX queue elements for Multi-Packet RQ. 78 * 79 * @param rxq_ctrl 80 * Pointer to RX queue structure. 81 * 82 * @return 83 * 0 on success, a negative errno value otherwise and rte_errno is set. 84 */ 85 static int 86 rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) 87 { 88 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; 89 unsigned int wqe_n = 1 << rxq->elts_n; 90 unsigned int i; 91 int err; 92 93 /* Iterate on segments. */ 94 for (i = 0; i <= wqe_n; ++i) { 95 struct mlx5_mprq_buf *buf; 96 97 if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) { 98 DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id); 99 rte_errno = ENOMEM; 100 goto error; 101 } 102 if (i < wqe_n) 103 (*rxq->mprq_bufs)[i] = buf; 104 else 105 rxq->mprq_repl = buf; 106 } 107 DRV_LOG(DEBUG, 108 "port %u MPRQ queue %u allocated and configured %u segments", 109 rxq->port_id, rxq->idx, wqe_n); 110 return 0; 111 error: 112 err = rte_errno; /* Save rte_errno before cleanup. */ 113 wqe_n = i; 114 for (i = 0; (i != wqe_n); ++i) { 115 if ((*rxq->mprq_bufs)[i] != NULL) 116 rte_mempool_put(rxq->mprq_mp, 117 (*rxq->mprq_bufs)[i]); 118 (*rxq->mprq_bufs)[i] = NULL; 119 } 120 DRV_LOG(DEBUG, "port %u MPRQ queue %u failed, freed everything", 121 rxq->port_id, rxq->idx); 122 rte_errno = err; /* Restore rte_errno. */ 123 return -rte_errno; 124 } 125 126 /** 127 * Allocate RX queue elements for Single-Packet RQ. 128 * 129 * @param rxq_ctrl 130 * Pointer to RX queue structure. 131 * 132 * @return 133 * 0 on success, negative errno value on failure. 134 */ 135 static int 136 rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) 137 { 138 const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; 139 unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? 140 (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) : 141 (1 << rxq_ctrl->rxq.elts_n); 142 unsigned int i; 143 int err; 144 145 /* Iterate on segments. */ 146 for (i = 0; (i != elts_n); ++i) { 147 struct mlx5_eth_rxseg *seg = &rxq_ctrl->rxq.rxseg[i % sges_n]; 148 struct rte_mbuf *buf; 149 150 buf = rte_pktmbuf_alloc(seg->mp); 151 if (buf == NULL) { 152 if (rxq_ctrl->share_group == 0) 153 DRV_LOG(ERR, "port %u queue %u empty mbuf pool", 154 RXQ_PORT_ID(rxq_ctrl), 155 rxq_ctrl->rxq.idx); 156 else 157 DRV_LOG(ERR, "share group %u queue %u empty mbuf pool", 158 rxq_ctrl->share_group, 159 rxq_ctrl->share_qid); 160 rte_errno = ENOMEM; 161 goto error; 162 } 163 /* Headroom is reserved by rte_pktmbuf_alloc(). */ 164 MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); 165 /* Buffer is supposed to be empty. */ 166 MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0); 167 MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0); 168 MLX5_ASSERT(!buf->next); 169 SET_DATA_OFF(buf, seg->offset); 170 PORT(buf) = rxq_ctrl->rxq.port_id; 171 DATA_LEN(buf) = seg->length; 172 PKT_LEN(buf) = seg->length; 173 NB_SEGS(buf) = 1; 174 (*rxq_ctrl->rxq.elts)[i] = buf; 175 } 176 /* If Rx vector is activated. */ 177 if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { 178 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; 179 struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; 180 struct rte_pktmbuf_pool_private *priv = 181 (struct rte_pktmbuf_pool_private *) 182 rte_mempool_get_priv(rxq_ctrl->rxq.mp); 183 int j; 184 185 /* Initialize default rearm_data for vPMD. */ 186 mbuf_init->data_off = RTE_PKTMBUF_HEADROOM; 187 rte_mbuf_refcnt_set(mbuf_init, 1); 188 mbuf_init->nb_segs = 1; 189 /* For shared queues port is provided in CQE */ 190 mbuf_init->port = rxq->shared ? 0 : rxq->port_id; 191 if (priv->flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) 192 mbuf_init->ol_flags = RTE_MBUF_F_EXTERNAL; 193 /* 194 * prevent compiler reordering: 195 * rearm_data covers previous fields. 196 */ 197 rte_compiler_barrier(); 198 rxq->mbuf_initializer = 199 *(rte_xmm_t *)&mbuf_init->rearm_data; 200 /* Padding with a fake mbuf for vectorized Rx. */ 201 for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j) 202 (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf; 203 } 204 if (rxq_ctrl->share_group == 0) 205 DRV_LOG(DEBUG, 206 "port %u SPRQ queue %u allocated and configured %u segments (max %u packets)", 207 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n, 208 elts_n / (1 << rxq_ctrl->rxq.sges_n)); 209 else 210 DRV_LOG(DEBUG, 211 "share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)", 212 rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n, 213 elts_n / (1 << rxq_ctrl->rxq.sges_n)); 214 return 0; 215 error: 216 err = rte_errno; /* Save rte_errno before cleanup. */ 217 elts_n = i; 218 for (i = 0; (i != elts_n); ++i) { 219 if ((*rxq_ctrl->rxq.elts)[i] != NULL) 220 rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); 221 (*rxq_ctrl->rxq.elts)[i] = NULL; 222 } 223 if (rxq_ctrl->share_group == 0) 224 DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything", 225 RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx); 226 else 227 DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything", 228 rxq_ctrl->share_group, rxq_ctrl->share_qid); 229 rte_errno = err; /* Restore rte_errno. */ 230 return -rte_errno; 231 } 232 233 /** 234 * Allocate RX queue elements. 235 * 236 * @param rxq_ctrl 237 * Pointer to RX queue structure. 238 * 239 * @return 240 * 0 on success, negative errno value on failure. 241 */ 242 int 243 rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) 244 { 245 int ret = 0; 246 247 /** 248 * For MPRQ we need to allocate both MPRQ buffers 249 * for WQEs and simple mbufs for vector processing. 250 */ 251 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) 252 ret = rxq_alloc_elts_mprq(rxq_ctrl); 253 if (ret == 0) 254 ret = rxq_alloc_elts_sprq(rxq_ctrl); 255 return ret; 256 } 257 258 /** 259 * Free RX queue elements for Multi-Packet RQ. 260 * 261 * @param rxq_ctrl 262 * Pointer to RX queue structure. 263 */ 264 static void 265 rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl) 266 { 267 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; 268 uint16_t i; 269 270 DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing %d WRs", 271 rxq->port_id, rxq->idx, (1u << rxq->elts_n)); 272 if (rxq->mprq_bufs == NULL) 273 return; 274 for (i = 0; (i != (1u << rxq->elts_n)); ++i) { 275 if ((*rxq->mprq_bufs)[i] != NULL) 276 mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]); 277 (*rxq->mprq_bufs)[i] = NULL; 278 } 279 if (rxq->mprq_repl != NULL) { 280 mlx5_mprq_buf_free(rxq->mprq_repl); 281 rxq->mprq_repl = NULL; 282 } 283 } 284 285 /** 286 * Free RX queue elements for Single-Packet RQ. 287 * 288 * @param rxq_ctrl 289 * Pointer to RX queue structure. 290 */ 291 static void 292 rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl) 293 { 294 struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; 295 const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? 296 (1 << rxq->elts_n) * (1 << rxq->strd_num_n) : 297 (1 << rxq->elts_n); 298 const uint16_t q_mask = q_n - 1; 299 uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? 300 rxq->elts_ci : rxq->rq_ci; 301 uint16_t used = q_n - (elts_ci - rxq->rq_pi); 302 uint16_t i; 303 304 if (rxq_ctrl->share_group == 0) 305 DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs", 306 RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n); 307 else 308 DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs", 309 rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n); 310 if (rxq->elts == NULL) 311 return; 312 /** 313 * Some mbuf in the Ring belongs to the application. 314 * They cannot be freed. 315 */ 316 if (mlx5_rxq_check_vec_support(rxq) > 0) { 317 for (i = 0; i < used; ++i) 318 (*rxq->elts)[(elts_ci + i) & q_mask] = NULL; 319 rxq->rq_pi = elts_ci; 320 } 321 for (i = 0; i != q_n; ++i) { 322 if ((*rxq->elts)[i] != NULL) 323 rte_pktmbuf_free_seg((*rxq->elts)[i]); 324 (*rxq->elts)[i] = NULL; 325 } 326 } 327 328 /** 329 * Free RX queue elements. 330 * 331 * @param rxq_ctrl 332 * Pointer to RX queue structure. 333 */ 334 static void 335 rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) 336 { 337 /* 338 * For MPRQ we need to allocate both MPRQ buffers 339 * for WQEs and simple mbufs for vector processing. 340 */ 341 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) 342 rxq_free_elts_mprq(rxq_ctrl); 343 rxq_free_elts_sprq(rxq_ctrl); 344 } 345 346 /** 347 * Returns the per-queue supported offloads. 348 * 349 * @param dev 350 * Pointer to Ethernet device. 351 * 352 * @return 353 * Supported Rx offloads. 354 */ 355 uint64_t 356 mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) 357 { 358 struct mlx5_priv *priv = dev->data->dev_private; 359 struct mlx5_dev_config *config = &priv->config; 360 uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER | 361 RTE_ETH_RX_OFFLOAD_TIMESTAMP | 362 RTE_ETH_RX_OFFLOAD_RSS_HASH); 363 364 if (!config->mprq.enabled) 365 offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT; 366 if (config->hw_fcs_strip) 367 offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC; 368 if (config->hw_csum) 369 offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 370 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 371 RTE_ETH_RX_OFFLOAD_TCP_CKSUM); 372 if (config->hw_vlan_strip) 373 offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP; 374 if (MLX5_LRO_SUPPORTED(dev)) 375 offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO; 376 return offloads; 377 } 378 379 380 /** 381 * Returns the per-port supported offloads. 382 * 383 * @return 384 * Supported Rx offloads. 385 */ 386 uint64_t 387 mlx5_get_rx_port_offloads(void) 388 { 389 uint64_t offloads = RTE_ETH_RX_OFFLOAD_VLAN_FILTER; 390 391 return offloads; 392 } 393 394 /** 395 * Verify if the queue can be released. 396 * 397 * @param dev 398 * Pointer to Ethernet device. 399 * @param idx 400 * RX queue index. 401 * 402 * @return 403 * 1 if the queue can be released 404 * 0 if the queue can not be released, there are references to it. 405 * Negative errno and rte_errno is set if queue doesn't exist. 406 */ 407 static int 408 mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) 409 { 410 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx); 411 412 if (rxq == NULL) { 413 rte_errno = EINVAL; 414 return -rte_errno; 415 } 416 return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1); 417 } 418 419 /* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */ 420 static void 421 rxq_sync_cq(struct mlx5_rxq_data *rxq) 422 { 423 const uint16_t cqe_n = 1 << rxq->cqe_n; 424 const uint16_t cqe_mask = cqe_n - 1; 425 volatile struct mlx5_cqe *cqe; 426 int ret, i; 427 428 i = cqe_n; 429 do { 430 cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_mask]; 431 ret = check_cqe(cqe, cqe_n, rxq->cq_ci); 432 if (ret == MLX5_CQE_STATUS_HW_OWN) 433 break; 434 if (ret == MLX5_CQE_STATUS_ERR) { 435 rxq->cq_ci++; 436 continue; 437 } 438 MLX5_ASSERT(ret == MLX5_CQE_STATUS_SW_OWN); 439 if (MLX5_CQE_FORMAT(cqe->op_own) != MLX5_COMPRESSED) { 440 rxq->cq_ci++; 441 continue; 442 } 443 /* Compute the next non compressed CQE. */ 444 rxq->cq_ci += rte_be_to_cpu_32(cqe->byte_cnt); 445 446 } while (--i); 447 /* Move all CQEs to HW ownership, including possible MiniCQEs. */ 448 for (i = 0; i < cqe_n; i++) { 449 cqe = &(*rxq->cqes)[i]; 450 cqe->op_own = MLX5_CQE_INVALIDATE; 451 } 452 /* Resync CQE and WQE (WQ in RESET state). */ 453 rte_io_wmb(); 454 *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci); 455 rte_io_wmb(); 456 *rxq->rq_db = rte_cpu_to_be_32(0); 457 rte_io_wmb(); 458 } 459 460 /** 461 * Rx queue stop. Device queue goes to the RESET state, 462 * all involved mbufs are freed from WQ. 463 * 464 * @param dev 465 * Pointer to Ethernet device structure. 466 * @param idx 467 * RX queue index. 468 * 469 * @return 470 * 0 on success, a negative errno value otherwise and rte_errno is set. 471 */ 472 int 473 mlx5_rx_queue_stop_primary(struct rte_eth_dev *dev, uint16_t idx) 474 { 475 struct mlx5_priv *priv = dev->data->dev_private; 476 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx); 477 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl; 478 int ret; 479 480 MLX5_ASSERT(rxq != NULL && rxq_ctrl != NULL); 481 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 482 ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RDY2RST); 483 if (ret) { 484 DRV_LOG(ERR, "Cannot change Rx WQ state to RESET: %s", 485 strerror(errno)); 486 rte_errno = errno; 487 return ret; 488 } 489 /* Remove all processes CQEs. */ 490 rxq_sync_cq(&rxq_ctrl->rxq); 491 /* Free all involved mbufs. */ 492 rxq_free_elts(rxq_ctrl); 493 /* Set the actual queue state. */ 494 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED; 495 return 0; 496 } 497 498 /** 499 * Rx queue stop. Device queue goes to the RESET state, 500 * all involved mbufs are freed from WQ. 501 * 502 * @param dev 503 * Pointer to Ethernet device structure. 504 * @param idx 505 * RX queue index. 506 * 507 * @return 508 * 0 on success, a negative errno value otherwise and rte_errno is set. 509 */ 510 int 511 mlx5_rx_queue_stop(struct rte_eth_dev *dev, uint16_t idx) 512 { 513 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 514 int ret; 515 516 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) { 517 DRV_LOG(ERR, "Hairpin queue can't be stopped"); 518 rte_errno = EINVAL; 519 return -EINVAL; 520 } 521 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STOPPED) 522 return 0; 523 /* 524 * Vectorized Rx burst requires the CQ and RQ indices 525 * synchronized, that might be broken on RQ restart 526 * and cause Rx malfunction, so queue stopping is 527 * not supported if vectorized Rx burst is engaged. 528 * The routine pointer depends on the process 529 * type, should perform check there. 530 */ 531 if (pkt_burst == mlx5_rx_burst_vec) { 532 DRV_LOG(ERR, "Rx queue stop is not supported " 533 "for vectorized Rx"); 534 rte_errno = EINVAL; 535 return -EINVAL; 536 } 537 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 538 ret = mlx5_mp_os_req_queue_control(dev, idx, 539 MLX5_MP_REQ_QUEUE_RX_STOP); 540 } else { 541 ret = mlx5_rx_queue_stop_primary(dev, idx); 542 } 543 return ret; 544 } 545 546 /** 547 * Rx queue start. Device queue goes to the ready state, 548 * all required mbufs are allocated and WQ is replenished. 549 * 550 * @param dev 551 * Pointer to Ethernet device structure. 552 * @param idx 553 * RX queue index. 554 * 555 * @return 556 * 0 on success, a negative errno value otherwise and rte_errno is set. 557 */ 558 int 559 mlx5_rx_queue_start_primary(struct rte_eth_dev *dev, uint16_t idx) 560 { 561 struct mlx5_priv *priv = dev->data->dev_private; 562 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx); 563 struct mlx5_rxq_data *rxq_data = &rxq->ctrl->rxq; 564 int ret; 565 566 MLX5_ASSERT(rxq != NULL && rxq->ctrl != NULL); 567 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 568 /* Allocate needed buffers. */ 569 ret = rxq_alloc_elts(rxq->ctrl); 570 if (ret) { 571 DRV_LOG(ERR, "Cannot reallocate buffers for Rx WQ"); 572 rte_errno = errno; 573 return ret; 574 } 575 rte_io_wmb(); 576 *rxq_data->cq_db = rte_cpu_to_be_32(rxq_data->cq_ci); 577 rte_io_wmb(); 578 /* Reset RQ consumer before moving queue to READY state. */ 579 *rxq_data->rq_db = rte_cpu_to_be_32(0); 580 rte_io_wmb(); 581 ret = priv->obj_ops.rxq_obj_modify(rxq, MLX5_RXQ_MOD_RST2RDY); 582 if (ret) { 583 DRV_LOG(ERR, "Cannot change Rx WQ state to READY: %s", 584 strerror(errno)); 585 rte_errno = errno; 586 return ret; 587 } 588 /* Reinitialize RQ - set WQEs. */ 589 mlx5_rxq_initialize(rxq_data); 590 rxq_data->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR; 591 /* Set actual queue state. */ 592 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED; 593 return 0; 594 } 595 596 /** 597 * Rx queue start. Device queue goes to the ready state, 598 * all required mbufs are allocated and WQ is replenished. 599 * 600 * @param dev 601 * Pointer to Ethernet device structure. 602 * @param idx 603 * RX queue index. 604 * 605 * @return 606 * 0 on success, a negative errno value otherwise and rte_errno is set. 607 */ 608 int 609 mlx5_rx_queue_start(struct rte_eth_dev *dev, uint16_t idx) 610 { 611 int ret; 612 613 if (rte_eth_dev_is_rx_hairpin_queue(dev, idx)) { 614 DRV_LOG(ERR, "Hairpin queue can't be started"); 615 rte_errno = EINVAL; 616 return -EINVAL; 617 } 618 if (dev->data->rx_queue_state[idx] == RTE_ETH_QUEUE_STATE_STARTED) 619 return 0; 620 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 621 ret = mlx5_mp_os_req_queue_control(dev, idx, 622 MLX5_MP_REQ_QUEUE_RX_START); 623 } else { 624 ret = mlx5_rx_queue_start_primary(dev, idx); 625 } 626 return ret; 627 } 628 629 /** 630 * Rx queue presetup checks. 631 * 632 * @param dev 633 * Pointer to Ethernet device structure. 634 * @param idx 635 * RX queue index. 636 * @param desc 637 * Number of descriptors to configure in queue. 638 * @param[out] rxq_ctrl 639 * Address of pointer to shared Rx queue control. 640 * 641 * @return 642 * 0 on success, a negative errno value otherwise and rte_errno is set. 643 */ 644 static int 645 mlx5_rx_queue_pre_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t *desc, 646 struct mlx5_rxq_ctrl **rxq_ctrl) 647 { 648 struct mlx5_priv *priv = dev->data->dev_private; 649 struct mlx5_rxq_priv *rxq; 650 bool empty; 651 652 if (!rte_is_power_of_2(*desc)) { 653 *desc = 1 << log2above(*desc); 654 DRV_LOG(WARNING, 655 "port %u increased number of descriptors in Rx queue %u" 656 " to the next power of two (%d)", 657 dev->data->port_id, idx, *desc); 658 } 659 DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors", 660 dev->data->port_id, idx, *desc); 661 if (idx >= priv->rxqs_n) { 662 DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)", 663 dev->data->port_id, idx, priv->rxqs_n); 664 rte_errno = EOVERFLOW; 665 return -rte_errno; 666 } 667 if (rxq_ctrl == NULL || *rxq_ctrl == NULL) 668 return 0; 669 if (!(*rxq_ctrl)->rxq.shared) { 670 if (!mlx5_rxq_releasable(dev, idx)) { 671 DRV_LOG(ERR, "port %u unable to release queue index %u", 672 dev->data->port_id, idx); 673 rte_errno = EBUSY; 674 return -rte_errno; 675 } 676 mlx5_rxq_release(dev, idx); 677 } else { 678 if ((*rxq_ctrl)->obj != NULL) 679 /* Some port using shared Rx queue has been started. */ 680 return 0; 681 /* Release all owner RxQ to reconfigure Shared RxQ. */ 682 do { 683 rxq = LIST_FIRST(&(*rxq_ctrl)->owners); 684 LIST_REMOVE(rxq, owner_entry); 685 empty = LIST_EMPTY(&(*rxq_ctrl)->owners); 686 mlx5_rxq_release(ETH_DEV(rxq->priv), rxq->idx); 687 } while (!empty); 688 *rxq_ctrl = NULL; 689 } 690 return 0; 691 } 692 693 /** 694 * Get the shared Rx queue object that matches group and queue index. 695 * 696 * @param dev 697 * Pointer to Ethernet device structure. 698 * @param group 699 * Shared RXQ group. 700 * @param share_qid 701 * Shared RX queue index. 702 * 703 * @return 704 * Shared RXQ object that matching, or NULL if not found. 705 */ 706 static struct mlx5_rxq_ctrl * 707 mlx5_shared_rxq_get(struct rte_eth_dev *dev, uint32_t group, uint16_t share_qid) 708 { 709 struct mlx5_rxq_ctrl *rxq_ctrl; 710 struct mlx5_priv *priv = dev->data->dev_private; 711 712 LIST_FOREACH(rxq_ctrl, &priv->sh->shared_rxqs, share_entry) { 713 if (rxq_ctrl->share_group == group && 714 rxq_ctrl->share_qid == share_qid) 715 return rxq_ctrl; 716 } 717 return NULL; 718 } 719 720 /** 721 * Check whether requested Rx queue configuration matches shared RXQ. 722 * 723 * @param rxq_ctrl 724 * Pointer to shared RXQ. 725 * @param dev 726 * Pointer to Ethernet device structure. 727 * @param idx 728 * Queue index. 729 * @param desc 730 * Number of descriptors to configure in queue. 731 * @param socket 732 * NUMA socket on which memory must be allocated. 733 * @param[in] conf 734 * Thresholds parameters. 735 * @param mp 736 * Memory pool for buffer allocations. 737 * 738 * @return 739 * 0 on success, a negative errno value otherwise and rte_errno is set. 740 */ 741 static bool 742 mlx5_shared_rxq_match(struct mlx5_rxq_ctrl *rxq_ctrl, struct rte_eth_dev *dev, 743 uint16_t idx, uint16_t desc, unsigned int socket, 744 const struct rte_eth_rxconf *conf, 745 struct rte_mempool *mp) 746 { 747 struct mlx5_priv *spriv = LIST_FIRST(&rxq_ctrl->owners)->priv; 748 struct mlx5_priv *priv = dev->data->dev_private; 749 unsigned int i; 750 751 RTE_SET_USED(conf); 752 if (rxq_ctrl->socket != socket) { 753 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: socket mismatch", 754 dev->data->port_id, idx); 755 return false; 756 } 757 if (rxq_ctrl->rxq.elts_n != log2above(desc)) { 758 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: descriptor number mismatch", 759 dev->data->port_id, idx); 760 return false; 761 } 762 if (priv->mtu != spriv->mtu) { 763 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mtu mismatch", 764 dev->data->port_id, idx); 765 return false; 766 } 767 if (priv->dev_data->dev_conf.intr_conf.rxq != 768 spriv->dev_data->dev_conf.intr_conf.rxq) { 769 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: interrupt mismatch", 770 dev->data->port_id, idx); 771 return false; 772 } 773 if (mp != NULL && rxq_ctrl->rxq.mp != mp) { 774 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: mempool mismatch", 775 dev->data->port_id, idx); 776 return false; 777 } else if (mp == NULL) { 778 if (conf->rx_nseg != rxq_ctrl->rxseg_n) { 779 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch", 780 dev->data->port_id, idx); 781 return false; 782 } 783 for (i = 0; i < conf->rx_nseg; i++) { 784 if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i], 785 sizeof(struct rte_eth_rxseg_split))) { 786 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch", 787 dev->data->port_id, idx, i); 788 return false; 789 } 790 } 791 } 792 if (priv->config.hw_padding != spriv->config.hw_padding) { 793 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: padding mismatch", 794 dev->data->port_id, idx); 795 return false; 796 } 797 if (priv->config.cqe_comp != spriv->config.cqe_comp || 798 (priv->config.cqe_comp && 799 priv->config.cqe_comp_fmt != spriv->config.cqe_comp_fmt)) { 800 DRV_LOG(ERR, "port %u queue index %u failed to join shared group: CQE compression mismatch", 801 dev->data->port_id, idx); 802 return false; 803 } 804 return true; 805 } 806 807 /** 808 * 809 * @param dev 810 * Pointer to Ethernet device structure. 811 * @param idx 812 * RX queue index. 813 * @param desc 814 * Number of descriptors to configure in queue. 815 * @param socket 816 * NUMA socket on which memory must be allocated. 817 * @param[in] conf 818 * Thresholds parameters. 819 * @param mp 820 * Memory pool for buffer allocations. 821 * 822 * @return 823 * 0 on success, a negative errno value otherwise and rte_errno is set. 824 */ 825 int 826 mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, 827 unsigned int socket, const struct rte_eth_rxconf *conf, 828 struct rte_mempool *mp) 829 { 830 struct mlx5_priv *priv = dev->data->dev_private; 831 struct mlx5_rxq_priv *rxq; 832 struct mlx5_rxq_ctrl *rxq_ctrl = NULL; 833 struct rte_eth_rxseg_split *rx_seg = 834 (struct rte_eth_rxseg_split *)conf->rx_seg; 835 struct rte_eth_rxseg_split rx_single = {.mp = mp}; 836 uint16_t n_seg = conf->rx_nseg; 837 int res; 838 uint64_t offloads = conf->offloads | 839 dev->data->dev_conf.rxmode.offloads; 840 841 if (mp) { 842 /* 843 * The parameters should be checked on rte_eth_dev layer. 844 * If mp is specified it means the compatible configuration 845 * without buffer split feature tuning. 846 */ 847 rx_seg = &rx_single; 848 n_seg = 1; 849 } 850 if (n_seg > 1) { 851 /* The offloads should be checked on rte_eth_dev layer. */ 852 MLX5_ASSERT(offloads & RTE_ETH_RX_OFFLOAD_SCATTER); 853 if (!(offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { 854 DRV_LOG(ERR, "port %u queue index %u split " 855 "offload not configured", 856 dev->data->port_id, idx); 857 rte_errno = ENOSPC; 858 return -rte_errno; 859 } 860 MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG); 861 } 862 if (conf->share_group > 0) { 863 if (!priv->config.hca_attr.mem_rq_rmp) { 864 DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw", 865 dev->data->port_id, idx); 866 rte_errno = EINVAL; 867 return -rte_errno; 868 } 869 if (priv->obj_ops.rxq_obj_new != devx_obj_ops.rxq_obj_new) { 870 DRV_LOG(ERR, "port %u queue index %u shared Rx queue needs DevX api", 871 dev->data->port_id, idx); 872 rte_errno = EINVAL; 873 return -rte_errno; 874 } 875 if (conf->share_qid >= priv->rxqs_n) { 876 DRV_LOG(ERR, "port %u shared Rx queue index %u > number of Rx queues %u", 877 dev->data->port_id, conf->share_qid, 878 priv->rxqs_n); 879 rte_errno = EINVAL; 880 return -rte_errno; 881 } 882 if (priv->config.mprq.enabled) { 883 DRV_LOG(ERR, "port %u shared Rx queue index %u: not supported when MPRQ enabled", 884 dev->data->port_id, conf->share_qid); 885 rte_errno = EINVAL; 886 return -rte_errno; 887 } 888 /* Try to reuse shared RXQ. */ 889 rxq_ctrl = mlx5_shared_rxq_get(dev, conf->share_group, 890 conf->share_qid); 891 if (rxq_ctrl != NULL && 892 !mlx5_shared_rxq_match(rxq_ctrl, dev, idx, desc, socket, 893 conf, mp)) { 894 rte_errno = EINVAL; 895 return -rte_errno; 896 } 897 } 898 res = mlx5_rx_queue_pre_setup(dev, idx, &desc, &rxq_ctrl); 899 if (res) 900 return res; 901 /* Allocate RXQ. */ 902 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0, 903 SOCKET_ID_ANY); 904 if (!rxq) { 905 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data", 906 dev->data->port_id, idx); 907 rte_errno = ENOMEM; 908 return -rte_errno; 909 } 910 rxq->priv = priv; 911 rxq->idx = idx; 912 (*priv->rxq_privs)[idx] = rxq; 913 if (rxq_ctrl != NULL) { 914 /* Join owner list. */ 915 LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry); 916 rxq->ctrl = rxq_ctrl; 917 } else { 918 rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, 919 n_seg); 920 if (rxq_ctrl == NULL) { 921 DRV_LOG(ERR, "port %u unable to allocate rx queue index %u", 922 dev->data->port_id, idx); 923 mlx5_free(rxq); 924 (*priv->rxq_privs)[idx] = NULL; 925 rte_errno = ENOMEM; 926 return -rte_errno; 927 } 928 } 929 mlx5_rxq_ref(dev, idx); 930 DRV_LOG(DEBUG, "port %u adding Rx queue %u to list", 931 dev->data->port_id, idx); 932 dev->data->rx_queues[idx] = &rxq_ctrl->rxq; 933 return 0; 934 } 935 936 /** 937 * 938 * @param dev 939 * Pointer to Ethernet device structure. 940 * @param idx 941 * RX queue index. 942 * @param desc 943 * Number of descriptors to configure in queue. 944 * @param hairpin_conf 945 * Hairpin configuration parameters. 946 * 947 * @return 948 * 0 on success, a negative errno value otherwise and rte_errno is set. 949 */ 950 int 951 mlx5_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 952 uint16_t desc, 953 const struct rte_eth_hairpin_conf *hairpin_conf) 954 { 955 struct mlx5_priv *priv = dev->data->dev_private; 956 struct mlx5_rxq_priv *rxq; 957 struct mlx5_rxq_ctrl *rxq_ctrl; 958 int res; 959 960 res = mlx5_rx_queue_pre_setup(dev, idx, &desc, NULL); 961 if (res) 962 return res; 963 if (hairpin_conf->peer_count != 1) { 964 rte_errno = EINVAL; 965 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue index %u" 966 " peer count is %u", dev->data->port_id, 967 idx, hairpin_conf->peer_count); 968 return -rte_errno; 969 } 970 if (hairpin_conf->peers[0].port == dev->data->port_id) { 971 if (hairpin_conf->peers[0].queue >= priv->txqs_n) { 972 rte_errno = EINVAL; 973 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue" 974 " index %u, Tx %u is larger than %u", 975 dev->data->port_id, idx, 976 hairpin_conf->peers[0].queue, priv->txqs_n); 977 return -rte_errno; 978 } 979 } else { 980 if (hairpin_conf->manual_bind == 0 || 981 hairpin_conf->tx_explicit == 0) { 982 rte_errno = EINVAL; 983 DRV_LOG(ERR, "port %u unable to setup Rx hairpin queue" 984 " index %u peer port %u with attributes %u %u", 985 dev->data->port_id, idx, 986 hairpin_conf->peers[0].port, 987 hairpin_conf->manual_bind, 988 hairpin_conf->tx_explicit); 989 return -rte_errno; 990 } 991 } 992 rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0, 993 SOCKET_ID_ANY); 994 if (!rxq) { 995 DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data", 996 dev->data->port_id, idx); 997 rte_errno = ENOMEM; 998 return -rte_errno; 999 } 1000 rxq->priv = priv; 1001 rxq->idx = idx; 1002 (*priv->rxq_privs)[idx] = rxq; 1003 rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf); 1004 if (!rxq_ctrl) { 1005 DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u", 1006 dev->data->port_id, idx); 1007 mlx5_free(rxq); 1008 (*priv->rxq_privs)[idx] = NULL; 1009 rte_errno = ENOMEM; 1010 return -rte_errno; 1011 } 1012 DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list", 1013 dev->data->port_id, idx); 1014 dev->data->rx_queues[idx] = &rxq_ctrl->rxq; 1015 return 0; 1016 } 1017 1018 /** 1019 * DPDK callback to release a RX queue. 1020 * 1021 * @param dev 1022 * Pointer to Ethernet device structure. 1023 * @param qid 1024 * Receive queue index. 1025 */ 1026 void 1027 mlx5_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 1028 { 1029 struct mlx5_rxq_data *rxq = dev->data->rx_queues[qid]; 1030 1031 if (rxq == NULL) 1032 return; 1033 if (!mlx5_rxq_releasable(dev, qid)) 1034 rte_panic("port %u Rx queue %u is still used by a flow and" 1035 " cannot be removed\n", dev->data->port_id, qid); 1036 mlx5_rxq_release(dev, qid); 1037 } 1038 1039 /** 1040 * Allocate queue vector and fill epoll fd list for Rx interrupts. 1041 * 1042 * @param dev 1043 * Pointer to Ethernet device. 1044 * 1045 * @return 1046 * 0 on success, a negative errno value otherwise and rte_errno is set. 1047 */ 1048 int 1049 mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) 1050 { 1051 struct mlx5_priv *priv = dev->data->dev_private; 1052 unsigned int i; 1053 unsigned int rxqs_n = priv->rxqs_n; 1054 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); 1055 unsigned int count = 0; 1056 struct rte_intr_handle *intr_handle = dev->intr_handle; 1057 1058 if (!dev->data->dev_conf.intr_conf.rxq) 1059 return 0; 1060 mlx5_rx_intr_vec_disable(dev); 1061 if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) { 1062 DRV_LOG(ERR, 1063 "port %u failed to allocate memory for interrupt" 1064 " vector, Rx interrupts will not be supported", 1065 dev->data->port_id); 1066 rte_errno = ENOMEM; 1067 return -rte_errno; 1068 } 1069 1070 if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT)) 1071 return -rte_errno; 1072 1073 for (i = 0; i != n; ++i) { 1074 /* This rxq obj must not be released in this function. */ 1075 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i); 1076 struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL; 1077 int rc; 1078 1079 /* Skip queues that cannot request interrupts. */ 1080 if (!rxq_obj || (!rxq_obj->ibv_channel && 1081 !rxq_obj->devx_channel)) { 1082 /* Use invalid intr_vec[] index to disable entry. */ 1083 if (rte_intr_vec_list_index_set(intr_handle, i, 1084 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID)) 1085 return -rte_errno; 1086 continue; 1087 } 1088 mlx5_rxq_ref(dev, i); 1089 if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { 1090 DRV_LOG(ERR, 1091 "port %u too many Rx queues for interrupt" 1092 " vector size (%d), Rx interrupts cannot be" 1093 " enabled", 1094 dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID); 1095 mlx5_rx_intr_vec_disable(dev); 1096 rte_errno = ENOMEM; 1097 return -rte_errno; 1098 } 1099 rc = mlx5_os_set_nonblock_channel_fd(rxq_obj->fd); 1100 if (rc < 0) { 1101 rte_errno = errno; 1102 DRV_LOG(ERR, 1103 "port %u failed to make Rx interrupt file" 1104 " descriptor %d non-blocking for queue index" 1105 " %d", 1106 dev->data->port_id, rxq_obj->fd, i); 1107 mlx5_rx_intr_vec_disable(dev); 1108 return -rte_errno; 1109 } 1110 1111 if (rte_intr_vec_list_index_set(intr_handle, i, 1112 RTE_INTR_VEC_RXTX_OFFSET + count)) 1113 return -rte_errno; 1114 if (rte_intr_efds_index_set(intr_handle, count, 1115 rxq_obj->fd)) 1116 return -rte_errno; 1117 count++; 1118 } 1119 if (!count) 1120 mlx5_rx_intr_vec_disable(dev); 1121 else if (rte_intr_nb_efd_set(intr_handle, count)) 1122 return -rte_errno; 1123 return 0; 1124 } 1125 1126 /** 1127 * Clean up Rx interrupts handler. 1128 * 1129 * @param dev 1130 * Pointer to Ethernet device. 1131 */ 1132 void 1133 mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) 1134 { 1135 struct mlx5_priv *priv = dev->data->dev_private; 1136 struct rte_intr_handle *intr_handle = dev->intr_handle; 1137 unsigned int i; 1138 unsigned int rxqs_n = priv->rxqs_n; 1139 unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); 1140 1141 if (!dev->data->dev_conf.intr_conf.rxq) 1142 return; 1143 if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) 1144 goto free; 1145 for (i = 0; i != n; ++i) { 1146 if (rte_intr_vec_list_index_get(intr_handle, i) == 1147 RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID) 1148 continue; 1149 /** 1150 * Need to access directly the queue to release the reference 1151 * kept in mlx5_rx_intr_vec_enable(). 1152 */ 1153 mlx5_rxq_deref(dev, i); 1154 } 1155 free: 1156 rte_intr_free_epoll_fd(intr_handle); 1157 1158 rte_intr_vec_list_free(intr_handle); 1159 1160 rte_intr_nb_efd_set(intr_handle, 0); 1161 } 1162 1163 /** 1164 * MLX5 CQ notification . 1165 * 1166 * @param rxq 1167 * Pointer to receive queue structure. 1168 * @param sq_n_rxq 1169 * Sequence number per receive queue . 1170 */ 1171 static inline void 1172 mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) 1173 { 1174 int sq_n = 0; 1175 uint32_t doorbell_hi; 1176 uint64_t doorbell; 1177 1178 sq_n = sq_n_rxq & MLX5_CQ_SQN_MASK; 1179 doorbell_hi = sq_n << MLX5_CQ_SQN_OFFSET | (rxq->cq_ci & MLX5_CI_MASK); 1180 doorbell = (uint64_t)doorbell_hi << 32; 1181 doorbell |= rxq->cqn; 1182 mlx5_doorbell_ring(&rxq->uar_data, rte_cpu_to_be_64(doorbell), 1183 doorbell_hi, &rxq->cq_db[MLX5_CQ_ARM_DB], 0); 1184 } 1185 1186 /** 1187 * DPDK callback for Rx queue interrupt enable. 1188 * 1189 * @param dev 1190 * Pointer to Ethernet device structure. 1191 * @param rx_queue_id 1192 * Rx queue number. 1193 * 1194 * @return 1195 * 0 on success, a negative errno value otherwise and rte_errno is set. 1196 */ 1197 int 1198 mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1199 { 1200 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id); 1201 if (!rxq) 1202 goto error; 1203 if (rxq->ctrl->irq) { 1204 if (!rxq->ctrl->obj) 1205 goto error; 1206 mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn); 1207 } 1208 return 0; 1209 error: 1210 rte_errno = EINVAL; 1211 return -rte_errno; 1212 } 1213 1214 /** 1215 * DPDK callback for Rx queue interrupt disable. 1216 * 1217 * @param dev 1218 * Pointer to Ethernet device structure. 1219 * @param rx_queue_id 1220 * Rx queue number. 1221 * 1222 * @return 1223 * 0 on success, a negative errno value otherwise and rte_errno is set. 1224 */ 1225 int 1226 mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) 1227 { 1228 struct mlx5_priv *priv = dev->data->dev_private; 1229 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id); 1230 int ret = 0; 1231 1232 if (!rxq) { 1233 rte_errno = EINVAL; 1234 return -rte_errno; 1235 } 1236 if (!rxq->ctrl->obj) 1237 goto error; 1238 if (rxq->ctrl->irq) { 1239 ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj); 1240 if (ret < 0) 1241 goto error; 1242 rxq->ctrl->rxq.cq_arm_sn++; 1243 } 1244 return 0; 1245 error: 1246 /** 1247 * The ret variable may be EAGAIN which means the get_event function was 1248 * called before receiving one. 1249 */ 1250 if (ret < 0) 1251 rte_errno = errno; 1252 else 1253 rte_errno = EINVAL; 1254 if (rte_errno != EAGAIN) 1255 DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d", 1256 dev->data->port_id, rx_queue_id); 1257 return -rte_errno; 1258 } 1259 1260 /** 1261 * Verify the Rx queue objects list is empty 1262 * 1263 * @param dev 1264 * Pointer to Ethernet device. 1265 * 1266 * @return 1267 * The number of objects not released. 1268 */ 1269 int 1270 mlx5_rxq_obj_verify(struct rte_eth_dev *dev) 1271 { 1272 struct mlx5_priv *priv = dev->data->dev_private; 1273 int ret = 0; 1274 struct mlx5_rxq_obj *rxq_obj; 1275 1276 LIST_FOREACH(rxq_obj, &priv->rxqsobj, next) { 1277 if (rxq_obj->rxq_ctrl == NULL) 1278 continue; 1279 if (rxq_obj->rxq_ctrl->rxq.shared && 1280 !LIST_EMPTY(&rxq_obj->rxq_ctrl->owners)) 1281 continue; 1282 DRV_LOG(DEBUG, "port %u Rx queue %u still referenced", 1283 dev->data->port_id, rxq_obj->rxq_ctrl->rxq.idx); 1284 ++ret; 1285 } 1286 return ret; 1287 } 1288 1289 /** 1290 * Callback function to initialize mbufs for Multi-Packet RQ. 1291 */ 1292 static inline void 1293 mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg, 1294 void *_m, unsigned int i __rte_unused) 1295 { 1296 struct mlx5_mprq_buf *buf = _m; 1297 struct rte_mbuf_ext_shared_info *shinfo; 1298 unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg; 1299 unsigned int j; 1300 1301 memset(_m, 0, sizeof(*buf)); 1302 buf->mp = mp; 1303 __atomic_store_n(&buf->refcnt, 1, __ATOMIC_RELAXED); 1304 for (j = 0; j != strd_n; ++j) { 1305 shinfo = &buf->shinfos[j]; 1306 shinfo->free_cb = mlx5_mprq_buf_free_cb; 1307 shinfo->fcb_opaque = buf; 1308 } 1309 } 1310 1311 /** 1312 * Free mempool of Multi-Packet RQ. 1313 * 1314 * @param dev 1315 * Pointer to Ethernet device. 1316 * 1317 * @return 1318 * 0 on success, negative errno value on failure. 1319 */ 1320 int 1321 mlx5_mprq_free_mp(struct rte_eth_dev *dev) 1322 { 1323 struct mlx5_priv *priv = dev->data->dev_private; 1324 struct rte_mempool *mp = priv->mprq_mp; 1325 unsigned int i; 1326 1327 if (mp == NULL) 1328 return 0; 1329 DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ", 1330 dev->data->port_id, mp->name); 1331 /* 1332 * If a buffer in the pool has been externally attached to a mbuf and it 1333 * is still in use by application, destroying the Rx queue can spoil 1334 * the packet. It is unlikely to happen but if application dynamically 1335 * creates and destroys with holding Rx packets, this can happen. 1336 * 1337 * TODO: It is unavoidable for now because the mempool for Multi-Packet 1338 * RQ isn't provided by application but managed by PMD. 1339 */ 1340 if (!rte_mempool_full(mp)) { 1341 DRV_LOG(ERR, 1342 "port %u mempool for Multi-Packet RQ is still in use", 1343 dev->data->port_id); 1344 rte_errno = EBUSY; 1345 return -rte_errno; 1346 } 1347 rte_mempool_free(mp); 1348 /* Unset mempool for each Rx queue. */ 1349 for (i = 0; i != priv->rxqs_n; ++i) { 1350 struct mlx5_rxq_data *rxq = mlx5_rxq_data_get(dev, i); 1351 1352 if (rxq == NULL) 1353 continue; 1354 rxq->mprq_mp = NULL; 1355 } 1356 priv->mprq_mp = NULL; 1357 return 0; 1358 } 1359 1360 /** 1361 * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the 1362 * mempool. If already allocated, reuse it if there're enough elements. 1363 * Otherwise, resize it. 1364 * 1365 * @param dev 1366 * Pointer to Ethernet device. 1367 * 1368 * @return 1369 * 0 on success, negative errno value on failure. 1370 */ 1371 int 1372 mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) 1373 { 1374 struct mlx5_priv *priv = dev->data->dev_private; 1375 struct rte_mempool *mp = priv->mprq_mp; 1376 char name[RTE_MEMPOOL_NAMESIZE]; 1377 unsigned int desc = 0; 1378 unsigned int buf_len; 1379 unsigned int obj_num; 1380 unsigned int obj_size; 1381 unsigned int strd_num_n = 0; 1382 unsigned int strd_sz_n = 0; 1383 unsigned int i; 1384 unsigned int n_ibv = 0; 1385 int ret; 1386 1387 if (!mlx5_mprq_enabled(dev)) 1388 return 0; 1389 /* Count the total number of descriptors configured. */ 1390 for (i = 0; i != priv->rxqs_n; ++i) { 1391 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); 1392 struct mlx5_rxq_data *rxq; 1393 1394 if (rxq_ctrl == NULL || 1395 rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) 1396 continue; 1397 rxq = &rxq_ctrl->rxq; 1398 n_ibv++; 1399 desc += 1 << rxq->elts_n; 1400 /* Get the max number of strides. */ 1401 if (strd_num_n < rxq->strd_num_n) 1402 strd_num_n = rxq->strd_num_n; 1403 /* Get the max size of a stride. */ 1404 if (strd_sz_n < rxq->strd_sz_n) 1405 strd_sz_n = rxq->strd_sz_n; 1406 } 1407 MLX5_ASSERT(strd_num_n && strd_sz_n); 1408 buf_len = (1 << strd_num_n) * (1 << strd_sz_n); 1409 obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) * 1410 sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM; 1411 /* 1412 * Received packets can be either memcpy'd or externally referenced. In 1413 * case that the packet is attached to an mbuf as an external buffer, as 1414 * it isn't possible to predict how the buffers will be queued by 1415 * application, there's no option to exactly pre-allocate needed buffers 1416 * in advance but to speculatively prepares enough buffers. 1417 * 1418 * In the data path, if this Mempool is depleted, PMD will try to memcpy 1419 * received packets to buffers provided by application (rxq->mp) until 1420 * this Mempool gets available again. 1421 */ 1422 desc *= 4; 1423 obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * n_ibv; 1424 /* 1425 * rte_mempool_create_empty() has sanity check to refuse large cache 1426 * size compared to the number of elements. 1427 * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a 1428 * constant number 2 instead. 1429 */ 1430 obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2); 1431 /* Check a mempool is already allocated and if it can be resued. */ 1432 if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) { 1433 DRV_LOG(DEBUG, "port %u mempool %s is being reused", 1434 dev->data->port_id, mp->name); 1435 /* Reuse. */ 1436 goto exit; 1437 } else if (mp != NULL) { 1438 DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it", 1439 dev->data->port_id, mp->name); 1440 /* 1441 * If failed to free, which means it may be still in use, no way 1442 * but to keep using the existing one. On buffer underrun, 1443 * packets will be memcpy'd instead of external buffer 1444 * attachment. 1445 */ 1446 if (mlx5_mprq_free_mp(dev)) { 1447 if (mp->elt_size >= obj_size) 1448 goto exit; 1449 else 1450 return -rte_errno; 1451 } 1452 } 1453 snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id); 1454 mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ, 1455 0, NULL, NULL, mlx5_mprq_buf_init, 1456 (void *)((uintptr_t)1 << strd_num_n), 1457 dev->device->numa_node, 0); 1458 if (mp == NULL) { 1459 DRV_LOG(ERR, 1460 "port %u failed to allocate a mempool for" 1461 " Multi-Packet RQ, count=%u, size=%u", 1462 dev->data->port_id, obj_num, obj_size); 1463 rte_errno = ENOMEM; 1464 return -rte_errno; 1465 } 1466 ret = mlx5_mr_mempool_register(priv->sh->cdev, mp, false); 1467 if (ret < 0 && rte_errno != EEXIST) { 1468 ret = rte_errno; 1469 DRV_LOG(ERR, "port %u failed to register a mempool for Multi-Packet RQ", 1470 dev->data->port_id); 1471 rte_mempool_free(mp); 1472 rte_errno = ret; 1473 return -rte_errno; 1474 } 1475 priv->mprq_mp = mp; 1476 exit: 1477 /* Set mempool for each Rx queue. */ 1478 for (i = 0; i != priv->rxqs_n; ++i) { 1479 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, i); 1480 1481 if (rxq_ctrl == NULL || 1482 rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD) 1483 continue; 1484 rxq_ctrl->rxq.mprq_mp = mp; 1485 } 1486 DRV_LOG(INFO, "port %u Multi-Packet RQ is configured", 1487 dev->data->port_id); 1488 return 0; 1489 } 1490 1491 #define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \ 1492 sizeof(struct rte_vlan_hdr) * 2 + \ 1493 sizeof(struct rte_ipv6_hdr))) 1494 #define MAX_TCP_OPTION_SIZE 40u 1495 #define MLX5_MAX_LRO_HEADER_FIX ((unsigned int)(MLX5_MAX_TCP_HDR_OFFSET + \ 1496 sizeof(struct rte_tcp_hdr) + \ 1497 MAX_TCP_OPTION_SIZE)) 1498 1499 /** 1500 * Adjust the maximum LRO massage size. 1501 * 1502 * @param dev 1503 * Pointer to Ethernet device. 1504 * @param idx 1505 * RX queue index. 1506 * @param max_lro_size 1507 * The maximum size for LRO packet. 1508 */ 1509 static void 1510 mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint16_t idx, 1511 uint32_t max_lro_size) 1512 { 1513 struct mlx5_priv *priv = dev->data->dev_private; 1514 1515 if (priv->config.hca_attr.lro_max_msg_sz_mode == 1516 MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size > 1517 MLX5_MAX_TCP_HDR_OFFSET) 1518 max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET; 1519 max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE); 1520 MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE); 1521 max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE; 1522 if (priv->max_lro_msg_size) 1523 priv->max_lro_msg_size = 1524 RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size); 1525 else 1526 priv->max_lro_msg_size = max_lro_size; 1527 DRV_LOG(DEBUG, 1528 "port %u Rx Queue %u max LRO message size adjusted to %u bytes", 1529 dev->data->port_id, idx, 1530 priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE); 1531 } 1532 1533 /** 1534 * Create a DPDK Rx queue. 1535 * 1536 * @param dev 1537 * Pointer to Ethernet device. 1538 * @param rxq 1539 * RX queue private data. 1540 * @param desc 1541 * Number of descriptors to configure in queue. 1542 * @param socket 1543 * NUMA socket on which memory must be allocated. 1544 * 1545 * @return 1546 * A DPDK queue object on success, NULL otherwise and rte_errno is set. 1547 */ 1548 struct mlx5_rxq_ctrl * 1549 mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, 1550 uint16_t desc, 1551 unsigned int socket, const struct rte_eth_rxconf *conf, 1552 const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg) 1553 { 1554 uint16_t idx = rxq->idx; 1555 struct mlx5_priv *priv = dev->data->dev_private; 1556 struct mlx5_rxq_ctrl *tmpl; 1557 unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp); 1558 struct mlx5_dev_config *config = &priv->config; 1559 uint64_t offloads = conf->offloads | 1560 dev->data->dev_conf.rxmode.offloads; 1561 unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO); 1562 unsigned int max_rx_pktlen = lro_on_queue ? 1563 dev->data->dev_conf.rxmode.max_lro_pkt_size : 1564 dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN + 1565 RTE_ETHER_CRC_LEN; 1566 unsigned int non_scatter_min_mbuf_size = max_rx_pktlen + 1567 RTE_PKTMBUF_HEADROOM; 1568 unsigned int max_lro_size = 0; 1569 unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM; 1570 const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 && 1571 !rx_seg[0].offset && !rx_seg[0].length; 1572 unsigned int mprq_stride_nums = config->mprq.stride_num_n ? 1573 config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N; 1574 unsigned int mprq_stride_size = non_scatter_min_mbuf_size <= 1575 (1U << config->mprq.max_stride_size_n) ? 1576 log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N; 1577 unsigned int mprq_stride_cap = (config->mprq.stride_num_n ? 1578 (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) * 1579 (config->mprq.stride_size_n ? 1580 (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size)); 1581 /* 1582 * Always allocate extra slots, even if eventually 1583 * the vector Rx will not be used. 1584 */ 1585 uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; 1586 const struct rte_eth_rxseg_split *qs_seg = rx_seg; 1587 unsigned int tail_len; 1588 1589 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, 1590 sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) + 1591 (!!mprq_en) * 1592 (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *), 1593 0, socket); 1594 if (!tmpl) { 1595 rte_errno = ENOMEM; 1596 return NULL; 1597 } 1598 LIST_INIT(&tmpl->owners); 1599 if (conf->share_group > 0) { 1600 tmpl->rxq.shared = 1; 1601 tmpl->share_group = conf->share_group; 1602 tmpl->share_qid = conf->share_qid; 1603 LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry); 1604 } 1605 rxq->ctrl = tmpl; 1606 LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry); 1607 MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG); 1608 /* 1609 * Save the original segment configuration in the shared queue 1610 * descriptor for the later check on the sibling queue creation. 1611 */ 1612 tmpl->rxseg_n = n_seg; 1613 rte_memcpy(tmpl->rxseg, qs_seg, 1614 sizeof(struct rte_eth_rxseg_split) * n_seg); 1615 /* 1616 * Build the array of actual buffer offsets and lengths. 1617 * Pad with the buffers from the last memory pool if 1618 * needed to handle max size packets, replace zero length 1619 * with the buffer length from the pool. 1620 */ 1621 tail_len = max_rx_pktlen; 1622 do { 1623 struct mlx5_eth_rxseg *hw_seg = 1624 &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n]; 1625 uint32_t buf_len, offset, seg_len; 1626 1627 /* 1628 * For the buffers beyond descriptions offset is zero, 1629 * the first buffer contains head room. 1630 */ 1631 buf_len = rte_pktmbuf_data_room_size(qs_seg->mp); 1632 offset = (tmpl->rxq.rxseg_n >= n_seg ? 0 : qs_seg->offset) + 1633 (tmpl->rxq.rxseg_n ? 0 : RTE_PKTMBUF_HEADROOM); 1634 /* 1635 * For the buffers beyond descriptions the length is 1636 * pool buffer length, zero lengths are replaced with 1637 * pool buffer length either. 1638 */ 1639 seg_len = tmpl->rxq.rxseg_n >= n_seg ? buf_len : 1640 qs_seg->length ? 1641 qs_seg->length : 1642 (buf_len - offset); 1643 /* Check is done in long int, now overflows. */ 1644 if (buf_len < seg_len + offset) { 1645 DRV_LOG(ERR, "port %u Rx queue %u: Split offset/length " 1646 "%u/%u can't be satisfied", 1647 dev->data->port_id, idx, 1648 qs_seg->length, qs_seg->offset); 1649 rte_errno = EINVAL; 1650 goto error; 1651 } 1652 if (seg_len > tail_len) 1653 seg_len = buf_len - offset; 1654 if (++tmpl->rxq.rxseg_n > MLX5_MAX_RXQ_NSEG) { 1655 DRV_LOG(ERR, 1656 "port %u too many SGEs (%u) needed to handle" 1657 " requested maximum packet size %u, the maximum" 1658 " supported are %u", dev->data->port_id, 1659 tmpl->rxq.rxseg_n, max_rx_pktlen, 1660 MLX5_MAX_RXQ_NSEG); 1661 rte_errno = ENOTSUP; 1662 goto error; 1663 } 1664 /* Build the actual scattering element in the queue object. */ 1665 hw_seg->mp = qs_seg->mp; 1666 MLX5_ASSERT(offset <= UINT16_MAX); 1667 MLX5_ASSERT(seg_len <= UINT16_MAX); 1668 hw_seg->offset = (uint16_t)offset; 1669 hw_seg->length = (uint16_t)seg_len; 1670 /* 1671 * Advance the segment descriptor, the padding is the based 1672 * on the attributes of the last descriptor. 1673 */ 1674 if (tmpl->rxq.rxseg_n < n_seg) 1675 qs_seg++; 1676 tail_len -= RTE_MIN(tail_len, seg_len); 1677 } while (tail_len || !rte_is_power_of_2(tmpl->rxq.rxseg_n)); 1678 MLX5_ASSERT(tmpl->rxq.rxseg_n && 1679 tmpl->rxq.rxseg_n <= MLX5_MAX_RXQ_NSEG); 1680 if (tmpl->rxq.rxseg_n > 1 && !(offloads & RTE_ETH_RX_OFFLOAD_SCATTER)) { 1681 DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not" 1682 " configured and no enough mbuf space(%u) to contain " 1683 "the maximum RX packet length(%u) with head-room(%u)", 1684 dev->data->port_id, idx, mb_len, max_rx_pktlen, 1685 RTE_PKTMBUF_HEADROOM); 1686 rte_errno = ENOSPC; 1687 goto error; 1688 } 1689 tmpl->type = MLX5_RXQ_TYPE_STANDARD; 1690 if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl, 1691 &priv->sh->cdev->mr_scache.dev_gen, socket)) { 1692 /* rte_errno is already set. */ 1693 goto error; 1694 } 1695 tmpl->socket = socket; 1696 if (dev->data->dev_conf.intr_conf.rxq) 1697 tmpl->irq = 1; 1698 /* 1699 * This Rx queue can be configured as a Multi-Packet RQ if all of the 1700 * following conditions are met: 1701 * - MPRQ is enabled. 1702 * - The number of descs is more than the number of strides. 1703 * - max_rx_pktlen plus overhead is less than the max size 1704 * of a stride or mprq_stride_size is specified by a user. 1705 * Need to make sure that there are enough strides to encap 1706 * the maximum packet size in case mprq_stride_size is set. 1707 * Otherwise, enable Rx scatter if necessary. 1708 */ 1709 if (mprq_en && desc > (1U << mprq_stride_nums) && 1710 (non_scatter_min_mbuf_size <= 1711 (1U << config->mprq.max_stride_size_n) || 1712 (config->mprq.stride_size_n && 1713 non_scatter_min_mbuf_size <= mprq_stride_cap))) { 1714 /* TODO: Rx scatter isn't supported yet. */ 1715 tmpl->rxq.sges_n = 0; 1716 /* Trim the number of descs needed. */ 1717 desc >>= mprq_stride_nums; 1718 tmpl->rxq.strd_num_n = config->mprq.stride_num_n ? 1719 config->mprq.stride_num_n : mprq_stride_nums; 1720 tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ? 1721 config->mprq.stride_size_n : mprq_stride_size; 1722 tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT; 1723 tmpl->rxq.strd_scatter_en = 1724 !!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER); 1725 tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size, 1726 config->mprq.max_memcpy_len); 1727 max_lro_size = RTE_MIN(max_rx_pktlen, 1728 (1u << tmpl->rxq.strd_num_n) * 1729 (1u << tmpl->rxq.strd_sz_n)); 1730 DRV_LOG(DEBUG, 1731 "port %u Rx queue %u: Multi-Packet RQ is enabled" 1732 " strd_num_n = %u, strd_sz_n = %u", 1733 dev->data->port_id, idx, 1734 tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n); 1735 } else if (tmpl->rxq.rxseg_n == 1) { 1736 MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size); 1737 tmpl->rxq.sges_n = 0; 1738 max_lro_size = max_rx_pktlen; 1739 } else if (offloads & RTE_ETH_RX_OFFLOAD_SCATTER) { 1740 unsigned int sges_n; 1741 1742 if (lro_on_queue && first_mb_free_size < 1743 MLX5_MAX_LRO_HEADER_FIX) { 1744 DRV_LOG(ERR, "Not enough space in the first segment(%u)" 1745 " to include the max header size(%u) for LRO", 1746 first_mb_free_size, MLX5_MAX_LRO_HEADER_FIX); 1747 rte_errno = ENOTSUP; 1748 goto error; 1749 } 1750 /* 1751 * Determine the number of SGEs needed for a full packet 1752 * and round it to the next power of two. 1753 */ 1754 sges_n = log2above(tmpl->rxq.rxseg_n); 1755 if (sges_n > MLX5_MAX_LOG_RQ_SEGS) { 1756 DRV_LOG(ERR, 1757 "port %u too many SGEs (%u) needed to handle" 1758 " requested maximum packet size %u, the maximum" 1759 " supported are %u", dev->data->port_id, 1760 1 << sges_n, max_rx_pktlen, 1761 1u << MLX5_MAX_LOG_RQ_SEGS); 1762 rte_errno = ENOTSUP; 1763 goto error; 1764 } 1765 tmpl->rxq.sges_n = sges_n; 1766 max_lro_size = max_rx_pktlen; 1767 } 1768 if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq)) 1769 DRV_LOG(WARNING, 1770 "port %u MPRQ is requested but cannot be enabled\n" 1771 " (requested: pkt_sz = %u, desc_num = %u," 1772 " rxq_num = %u, stride_sz = %u, stride_num = %u\n" 1773 " supported: min_rxqs_num = %u," 1774 " min_stride_sz = %u, max_stride_sz = %u).", 1775 dev->data->port_id, non_scatter_min_mbuf_size, 1776 desc, priv->rxqs_n, 1777 config->mprq.stride_size_n ? 1778 (1U << config->mprq.stride_size_n) : 1779 (1U << mprq_stride_size), 1780 config->mprq.stride_num_n ? 1781 (1U << config->mprq.stride_num_n) : 1782 (1U << mprq_stride_nums), 1783 config->mprq.min_rxqs_num, 1784 (1U << config->mprq.min_stride_size_n), 1785 (1U << config->mprq.max_stride_size_n)); 1786 DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", 1787 dev->data->port_id, 1 << tmpl->rxq.sges_n); 1788 if (desc % (1 << tmpl->rxq.sges_n)) { 1789 DRV_LOG(ERR, 1790 "port %u number of Rx queue descriptors (%u) is not a" 1791 " multiple of SGEs per packet (%u)", 1792 dev->data->port_id, 1793 desc, 1794 1 << tmpl->rxq.sges_n); 1795 rte_errno = EINVAL; 1796 goto error; 1797 } 1798 mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size); 1799 /* Toggle RX checksum offload if hardware supports it. */ 1800 tmpl->rxq.csum = !!(offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM); 1801 /* Configure Rx timestamp. */ 1802 tmpl->rxq.hw_timestamp = !!(offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP); 1803 tmpl->rxq.timestamp_rx_flag = 0; 1804 if (tmpl->rxq.hw_timestamp && rte_mbuf_dyn_rx_timestamp_register( 1805 &tmpl->rxq.timestamp_offset, 1806 &tmpl->rxq.timestamp_rx_flag) != 0) { 1807 DRV_LOG(ERR, "Cannot register Rx timestamp field/flag"); 1808 goto error; 1809 } 1810 /* Configure VLAN stripping. */ 1811 tmpl->rxq.vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP); 1812 /* By default, FCS (CRC) is stripped by hardware. */ 1813 tmpl->rxq.crc_present = 0; 1814 tmpl->rxq.lro = lro_on_queue; 1815 if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) { 1816 if (config->hw_fcs_strip) { 1817 /* 1818 * RQs used for LRO-enabled TIRs should not be 1819 * configured to scatter the FCS. 1820 */ 1821 if (lro_on_queue) 1822 DRV_LOG(WARNING, 1823 "port %u CRC stripping has been " 1824 "disabled but will still be performed " 1825 "by hardware, because LRO is enabled", 1826 dev->data->port_id); 1827 else 1828 tmpl->rxq.crc_present = 1; 1829 } else { 1830 DRV_LOG(WARNING, 1831 "port %u CRC stripping has been disabled but will" 1832 " still be performed by hardware, make sure MLNX_OFED" 1833 " and firmware are up to date", 1834 dev->data->port_id); 1835 } 1836 } 1837 DRV_LOG(DEBUG, 1838 "port %u CRC stripping is %s, %u bytes will be subtracted from" 1839 " incoming frames to hide it", 1840 dev->data->port_id, 1841 tmpl->rxq.crc_present ? "disabled" : "enabled", 1842 tmpl->rxq.crc_present << 2); 1843 /* Save port ID. */ 1844 tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf && 1845 (!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS)); 1846 tmpl->rxq.port_id = dev->data->port_id; 1847 tmpl->sh = priv->sh; 1848 tmpl->rxq.mp = rx_seg[0].mp; 1849 tmpl->rxq.elts_n = log2above(desc); 1850 tmpl->rxq.rq_repl_thresh = 1851 MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n); 1852 tmpl->rxq.elts = 1853 (struct rte_mbuf *(*)[desc_n])(tmpl + 1); 1854 tmpl->rxq.mprq_bufs = 1855 (struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n); 1856 tmpl->rxq.idx = idx; 1857 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); 1858 return tmpl; 1859 error: 1860 mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh); 1861 mlx5_free(tmpl); 1862 return NULL; 1863 } 1864 1865 /** 1866 * Create a DPDK Rx hairpin queue. 1867 * 1868 * @param dev 1869 * Pointer to Ethernet device. 1870 * @param rxq 1871 * RX queue. 1872 * @param desc 1873 * Number of descriptors to configure in queue. 1874 * @param hairpin_conf 1875 * The hairpin binding configuration. 1876 * 1877 * @return 1878 * A DPDK queue object on success, NULL otherwise and rte_errno is set. 1879 */ 1880 struct mlx5_rxq_ctrl * 1881 mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq, 1882 uint16_t desc, 1883 const struct rte_eth_hairpin_conf *hairpin_conf) 1884 { 1885 uint16_t idx = rxq->idx; 1886 struct mlx5_priv *priv = dev->data->dev_private; 1887 struct mlx5_rxq_ctrl *tmpl; 1888 1889 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0, 1890 SOCKET_ID_ANY); 1891 if (!tmpl) { 1892 rte_errno = ENOMEM; 1893 return NULL; 1894 } 1895 LIST_INIT(&tmpl->owners); 1896 rxq->ctrl = tmpl; 1897 LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry); 1898 tmpl->type = MLX5_RXQ_TYPE_HAIRPIN; 1899 tmpl->socket = SOCKET_ID_ANY; 1900 tmpl->rxq.rss_hash = 0; 1901 tmpl->rxq.port_id = dev->data->port_id; 1902 tmpl->sh = priv->sh; 1903 tmpl->rxq.mp = NULL; 1904 tmpl->rxq.elts_n = log2above(desc); 1905 tmpl->rxq.elts = NULL; 1906 tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 }; 1907 tmpl->rxq.idx = idx; 1908 rxq->hairpin_conf = *hairpin_conf; 1909 mlx5_rxq_ref(dev, idx); 1910 LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); 1911 return tmpl; 1912 } 1913 1914 /** 1915 * Increase Rx queue reference count. 1916 * 1917 * @param dev 1918 * Pointer to Ethernet device. 1919 * @param idx 1920 * RX queue index. 1921 * 1922 * @return 1923 * A pointer to the queue if it exists, NULL otherwise. 1924 */ 1925 struct mlx5_rxq_priv * 1926 mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx) 1927 { 1928 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx); 1929 1930 if (rxq != NULL) 1931 __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED); 1932 return rxq; 1933 } 1934 1935 /** 1936 * Dereference a Rx queue. 1937 * 1938 * @param dev 1939 * Pointer to Ethernet device. 1940 * @param idx 1941 * RX queue index. 1942 * 1943 * @return 1944 * Updated reference count. 1945 */ 1946 uint32_t 1947 mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx) 1948 { 1949 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx); 1950 1951 if (rxq == NULL) 1952 return 0; 1953 return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED); 1954 } 1955 1956 /** 1957 * Get a Rx queue. 1958 * 1959 * @param dev 1960 * Pointer to Ethernet device. 1961 * @param idx 1962 * RX queue index. 1963 * 1964 * @return 1965 * A pointer to the queue if it exists, NULL otherwise. 1966 */ 1967 struct mlx5_rxq_priv * 1968 mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) 1969 { 1970 struct mlx5_priv *priv = dev->data->dev_private; 1971 1972 MLX5_ASSERT(priv->rxq_privs != NULL); 1973 return (*priv->rxq_privs)[idx]; 1974 } 1975 1976 /** 1977 * Get Rx queue shareable control. 1978 * 1979 * @param dev 1980 * Pointer to Ethernet device. 1981 * @param idx 1982 * RX queue index. 1983 * 1984 * @return 1985 * A pointer to the queue control if it exists, NULL otherwise. 1986 */ 1987 struct mlx5_rxq_ctrl * 1988 mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx) 1989 { 1990 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx); 1991 1992 return rxq == NULL ? NULL : rxq->ctrl; 1993 } 1994 1995 /** 1996 * Get Rx queue shareable data. 1997 * 1998 * @param dev 1999 * Pointer to Ethernet device. 2000 * @param idx 2001 * RX queue index. 2002 * 2003 * @return 2004 * A pointer to the queue data if it exists, NULL otherwise. 2005 */ 2006 struct mlx5_rxq_data * 2007 mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx) 2008 { 2009 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx); 2010 2011 return rxq == NULL ? NULL : &rxq->ctrl->rxq; 2012 } 2013 2014 /** 2015 * Release a Rx queue. 2016 * 2017 * @param dev 2018 * Pointer to Ethernet device. 2019 * @param idx 2020 * RX queue index. 2021 * 2022 * @return 2023 * 1 while a reference on it exists, 0 when freed. 2024 */ 2025 int 2026 mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) 2027 { 2028 struct mlx5_priv *priv = dev->data->dev_private; 2029 struct mlx5_rxq_priv *rxq; 2030 struct mlx5_rxq_ctrl *rxq_ctrl; 2031 uint32_t refcnt; 2032 2033 if (priv->rxq_privs == NULL) 2034 return 0; 2035 rxq = mlx5_rxq_get(dev, idx); 2036 if (rxq == NULL || rxq->refcnt == 0) 2037 return 0; 2038 rxq_ctrl = rxq->ctrl; 2039 refcnt = mlx5_rxq_deref(dev, idx); 2040 if (refcnt > 1) { 2041 return 1; 2042 } else if (refcnt == 1) { /* RxQ stopped. */ 2043 priv->obj_ops.rxq_obj_release(rxq); 2044 if (!rxq_ctrl->started && rxq_ctrl->obj != NULL) { 2045 LIST_REMOVE(rxq_ctrl->obj, next); 2046 mlx5_free(rxq_ctrl->obj); 2047 rxq_ctrl->obj = NULL; 2048 } 2049 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { 2050 if (!rxq_ctrl->started) 2051 rxq_free_elts(rxq_ctrl); 2052 dev->data->rx_queue_state[idx] = 2053 RTE_ETH_QUEUE_STATE_STOPPED; 2054 } 2055 } else { /* Refcnt zero, closing device. */ 2056 LIST_REMOVE(rxq, owner_entry); 2057 if (LIST_EMPTY(&rxq_ctrl->owners)) { 2058 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) 2059 mlx5_mr_btree_free 2060 (&rxq_ctrl->rxq.mr_ctrl.cache_bh); 2061 if (rxq_ctrl->rxq.shared) 2062 LIST_REMOVE(rxq_ctrl, share_entry); 2063 LIST_REMOVE(rxq_ctrl, next); 2064 mlx5_free(rxq_ctrl); 2065 } 2066 dev->data->rx_queues[idx] = NULL; 2067 mlx5_free(rxq); 2068 (*priv->rxq_privs)[idx] = NULL; 2069 } 2070 return 0; 2071 } 2072 2073 /** 2074 * Verify the Rx Queue list is empty 2075 * 2076 * @param dev 2077 * Pointer to Ethernet device. 2078 * 2079 * @return 2080 * The number of object not released. 2081 */ 2082 int 2083 mlx5_rxq_verify(struct rte_eth_dev *dev) 2084 { 2085 struct mlx5_priv *priv = dev->data->dev_private; 2086 struct mlx5_rxq_ctrl *rxq_ctrl; 2087 int ret = 0; 2088 2089 LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { 2090 DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced", 2091 dev->data->port_id, rxq_ctrl->rxq.idx); 2092 ++ret; 2093 } 2094 return ret; 2095 } 2096 2097 /** 2098 * Get a Rx queue type. 2099 * 2100 * @param dev 2101 * Pointer to Ethernet device. 2102 * @param idx 2103 * Rx queue index. 2104 * 2105 * @return 2106 * The Rx queue type. 2107 */ 2108 enum mlx5_rxq_type 2109 mlx5_rxq_get_type(struct rte_eth_dev *dev, uint16_t idx) 2110 { 2111 struct mlx5_priv *priv = dev->data->dev_private; 2112 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_ctrl_get(dev, idx); 2113 2114 if (idx < priv->rxqs_n && rxq_ctrl != NULL) 2115 return rxq_ctrl->type; 2116 return MLX5_RXQ_TYPE_UNDEFINED; 2117 } 2118 2119 /* 2120 * Get a Rx hairpin queue configuration. 2121 * 2122 * @param dev 2123 * Pointer to Ethernet device. 2124 * @param idx 2125 * Rx queue index. 2126 * 2127 * @return 2128 * Pointer to the configuration if a hairpin RX queue, otherwise NULL. 2129 */ 2130 const struct rte_eth_hairpin_conf * 2131 mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx) 2132 { 2133 struct mlx5_priv *priv = dev->data->dev_private; 2134 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx); 2135 2136 if (idx < priv->rxqs_n && rxq != NULL) { 2137 if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) 2138 return &rxq->hairpin_conf; 2139 } 2140 return NULL; 2141 } 2142 2143 /** 2144 * Match queues listed in arguments to queues contained in indirection table 2145 * object. 2146 * 2147 * @param ind_tbl 2148 * Pointer to indirection table to match. 2149 * @param queues 2150 * Queues to match to ques in indirection table. 2151 * @param queues_n 2152 * Number of queues in the array. 2153 * 2154 * @return 2155 * 1 if all queues in indirection table match 0 othrwise. 2156 */ 2157 static int 2158 mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl, 2159 const uint16_t *queues, uint32_t queues_n) 2160 { 2161 return (ind_tbl->queues_n == queues_n) && 2162 (!memcmp(ind_tbl->queues, queues, 2163 ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))); 2164 } 2165 2166 /** 2167 * Get an indirection table. 2168 * 2169 * @param dev 2170 * Pointer to Ethernet device. 2171 * @param queues 2172 * Queues entering in the indirection table. 2173 * @param queues_n 2174 * Number of queues in the array. 2175 * 2176 * @return 2177 * An indirection table if found. 2178 */ 2179 struct mlx5_ind_table_obj * 2180 mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, 2181 uint32_t queues_n) 2182 { 2183 struct mlx5_priv *priv = dev->data->dev_private; 2184 struct mlx5_ind_table_obj *ind_tbl; 2185 2186 rte_rwlock_read_lock(&priv->ind_tbls_lock); 2187 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { 2188 if ((ind_tbl->queues_n == queues_n) && 2189 (memcmp(ind_tbl->queues, queues, 2190 ind_tbl->queues_n * sizeof(ind_tbl->queues[0])) 2191 == 0)) { 2192 __atomic_fetch_add(&ind_tbl->refcnt, 1, 2193 __ATOMIC_RELAXED); 2194 break; 2195 } 2196 } 2197 rte_rwlock_read_unlock(&priv->ind_tbls_lock); 2198 return ind_tbl; 2199 } 2200 2201 /** 2202 * Release an indirection table. 2203 * 2204 * @param dev 2205 * Pointer to Ethernet device. 2206 * @param ind_table 2207 * Indirection table to release. 2208 * @param standalone 2209 * Indirection table for Standalone queue. 2210 * @param deref_rxqs 2211 * If true, then dereference RX queues related to indirection table. 2212 * Otherwise, no additional action will be taken. 2213 * 2214 * @return 2215 * 1 while a reference on it exists, 0 when freed. 2216 */ 2217 int 2218 mlx5_ind_table_obj_release(struct rte_eth_dev *dev, 2219 struct mlx5_ind_table_obj *ind_tbl, 2220 bool standalone, 2221 bool deref_rxqs) 2222 { 2223 struct mlx5_priv *priv = dev->data->dev_private; 2224 unsigned int i, ret; 2225 2226 rte_rwlock_write_lock(&priv->ind_tbls_lock); 2227 ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); 2228 if (!ret && !standalone) 2229 LIST_REMOVE(ind_tbl, next); 2230 rte_rwlock_write_unlock(&priv->ind_tbls_lock); 2231 if (ret) 2232 return 1; 2233 priv->obj_ops.ind_table_destroy(ind_tbl); 2234 if (deref_rxqs) { 2235 for (i = 0; i != ind_tbl->queues_n; ++i) 2236 claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i])); 2237 } 2238 mlx5_free(ind_tbl); 2239 return 0; 2240 } 2241 2242 /** 2243 * Verify the Rx Queue list is empty 2244 * 2245 * @param dev 2246 * Pointer to Ethernet device. 2247 * 2248 * @return 2249 * The number of object not released. 2250 */ 2251 int 2252 mlx5_ind_table_obj_verify(struct rte_eth_dev *dev) 2253 { 2254 struct mlx5_priv *priv = dev->data->dev_private; 2255 struct mlx5_ind_table_obj *ind_tbl; 2256 int ret = 0; 2257 2258 rte_rwlock_read_lock(&priv->ind_tbls_lock); 2259 LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { 2260 DRV_LOG(DEBUG, 2261 "port %u indirection table obj %p still referenced", 2262 dev->data->port_id, (void *)ind_tbl); 2263 ++ret; 2264 } 2265 rte_rwlock_read_unlock(&priv->ind_tbls_lock); 2266 return ret; 2267 } 2268 2269 /** 2270 * Setup an indirection table structure fields. 2271 * 2272 * @param dev 2273 * Pointer to Ethernet device. 2274 * @param ind_table 2275 * Indirection table to modify. 2276 * @param ref_qs 2277 * Whether to increment RxQ reference counters. 2278 * 2279 * @return 2280 * 0 on success, a negative errno value otherwise and rte_errno is set. 2281 */ 2282 int 2283 mlx5_ind_table_obj_setup(struct rte_eth_dev *dev, 2284 struct mlx5_ind_table_obj *ind_tbl, 2285 bool ref_qs) 2286 { 2287 struct mlx5_priv *priv = dev->data->dev_private; 2288 uint32_t queues_n = ind_tbl->queues_n; 2289 uint16_t *queues = ind_tbl->queues; 2290 unsigned int i = 0, j; 2291 int ret = 0, err; 2292 const unsigned int n = rte_is_power_of_2(queues_n) ? 2293 log2above(queues_n) : 2294 log2above(priv->config.ind_table_max_size); 2295 2296 if (ref_qs) 2297 for (i = 0; i != queues_n; ++i) { 2298 if (mlx5_rxq_ref(dev, queues[i]) == NULL) { 2299 ret = -rte_errno; 2300 goto error; 2301 } 2302 } 2303 ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl); 2304 if (ret) 2305 goto error; 2306 __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED); 2307 return 0; 2308 error: 2309 if (ref_qs) { 2310 err = rte_errno; 2311 for (j = 0; j < i; j++) 2312 mlx5_rxq_deref(dev, queues[j]); 2313 rte_errno = err; 2314 } 2315 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", 2316 dev->data->port_id); 2317 return ret; 2318 } 2319 2320 /** 2321 * Create an indirection table. 2322 * 2323 * @param dev 2324 * Pointer to Ethernet device. 2325 * @param queues 2326 * Queues entering in the indirection table. 2327 * @param queues_n 2328 * Number of queues in the array. 2329 * @param standalone 2330 * Indirection table for Standalone queue. 2331 * @param ref_qs 2332 * Whether to increment RxQ reference counters. 2333 * 2334 * @return 2335 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. 2336 */ 2337 static struct mlx5_ind_table_obj * 2338 mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues, 2339 uint32_t queues_n, bool standalone, bool ref_qs) 2340 { 2341 struct mlx5_priv *priv = dev->data->dev_private; 2342 struct mlx5_ind_table_obj *ind_tbl; 2343 int ret; 2344 2345 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) + 2346 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY); 2347 if (!ind_tbl) { 2348 rte_errno = ENOMEM; 2349 return NULL; 2350 } 2351 ind_tbl->queues_n = queues_n; 2352 ind_tbl->queues = (uint16_t *)(ind_tbl + 1); 2353 memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues)); 2354 ret = mlx5_ind_table_obj_setup(dev, ind_tbl, ref_qs); 2355 if (ret < 0) { 2356 mlx5_free(ind_tbl); 2357 return NULL; 2358 } 2359 if (!standalone) { 2360 rte_rwlock_write_lock(&priv->ind_tbls_lock); 2361 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); 2362 rte_rwlock_write_unlock(&priv->ind_tbls_lock); 2363 } 2364 return ind_tbl; 2365 } 2366 2367 static int 2368 mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused, 2369 struct mlx5_ind_table_obj *ind_tbl) 2370 { 2371 uint32_t refcnt; 2372 2373 refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED); 2374 if (refcnt <= 1) 2375 return 0; 2376 /* 2377 * Modification of indirection tables having more than 1 2378 * reference is unsupported. 2379 */ 2380 DRV_LOG(DEBUG, 2381 "Port %u cannot modify indirection table %p (refcnt %u > 1).", 2382 dev->data->port_id, (void *)ind_tbl, refcnt); 2383 rte_errno = EINVAL; 2384 return -rte_errno; 2385 } 2386 2387 /** 2388 * Modify an indirection table. 2389 * 2390 * @param dev 2391 * Pointer to Ethernet device. 2392 * @param ind_table 2393 * Indirection table to modify. 2394 * @param queues 2395 * Queues replacement for the indirection table. 2396 * @param queues_n 2397 * Number of queues in the array. 2398 * @param standalone 2399 * Indirection table for Standalone queue. 2400 * @param ref_new_qs 2401 * Whether to increment new RxQ set reference counters. 2402 * @param deref_old_qs 2403 * Whether to decrement old RxQ set reference counters. 2404 * 2405 * @return 2406 * 0 on success, a negative errno value otherwise and rte_errno is set. 2407 */ 2408 int 2409 mlx5_ind_table_obj_modify(struct rte_eth_dev *dev, 2410 struct mlx5_ind_table_obj *ind_tbl, 2411 uint16_t *queues, const uint32_t queues_n, 2412 bool standalone, bool ref_new_qs, bool deref_old_qs) 2413 { 2414 struct mlx5_priv *priv = dev->data->dev_private; 2415 unsigned int i = 0, j; 2416 int ret = 0, err; 2417 const unsigned int n = rte_is_power_of_2(queues_n) ? 2418 log2above(queues_n) : 2419 log2above(priv->config.ind_table_max_size); 2420 2421 MLX5_ASSERT(standalone); 2422 RTE_SET_USED(standalone); 2423 if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0) 2424 return -rte_errno; 2425 if (ref_new_qs) 2426 for (i = 0; i != queues_n; ++i) { 2427 if (!mlx5_rxq_ref(dev, queues[i])) { 2428 ret = -rte_errno; 2429 goto error; 2430 } 2431 } 2432 MLX5_ASSERT(priv->obj_ops.ind_table_modify); 2433 ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl); 2434 if (ret) 2435 goto error; 2436 if (deref_old_qs) 2437 for (i = 0; i < ind_tbl->queues_n; i++) 2438 claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i])); 2439 ind_tbl->queues_n = queues_n; 2440 ind_tbl->queues = queues; 2441 return 0; 2442 error: 2443 if (ref_new_qs) { 2444 err = rte_errno; 2445 for (j = 0; j < i; j++) 2446 mlx5_rxq_deref(dev, queues[j]); 2447 rte_errno = err; 2448 } 2449 DRV_LOG(DEBUG, "Port %u cannot setup indirection table.", 2450 dev->data->port_id); 2451 return ret; 2452 } 2453 2454 /** 2455 * Attach an indirection table to its queues. 2456 * 2457 * @param dev 2458 * Pointer to Ethernet device. 2459 * @param ind_table 2460 * Indirection table to attach. 2461 * 2462 * @return 2463 * 0 on success, a negative errno value otherwise and rte_errno is set. 2464 */ 2465 int 2466 mlx5_ind_table_obj_attach(struct rte_eth_dev *dev, 2467 struct mlx5_ind_table_obj *ind_tbl) 2468 { 2469 int ret; 2470 2471 ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues, 2472 ind_tbl->queues_n, 2473 true /* standalone */, 2474 true /* ref_new_qs */, 2475 false /* deref_old_qs */); 2476 if (ret != 0) 2477 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p", 2478 dev->data->port_id, (void *)ind_tbl); 2479 return ret; 2480 } 2481 2482 /** 2483 * Detach an indirection table from its queues. 2484 * 2485 * @param dev 2486 * Pointer to Ethernet device. 2487 * @param ind_table 2488 * Indirection table to detach. 2489 * 2490 * @return 2491 * 0 on success, a negative errno value otherwise and rte_errno is set. 2492 */ 2493 int 2494 mlx5_ind_table_obj_detach(struct rte_eth_dev *dev, 2495 struct mlx5_ind_table_obj *ind_tbl) 2496 { 2497 struct mlx5_priv *priv = dev->data->dev_private; 2498 const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ? 2499 log2above(ind_tbl->queues_n) : 2500 log2above(priv->config.ind_table_max_size); 2501 unsigned int i; 2502 int ret; 2503 2504 ret = mlx5_ind_table_obj_check_standalone(dev, ind_tbl); 2505 if (ret != 0) 2506 return ret; 2507 MLX5_ASSERT(priv->obj_ops.ind_table_modify); 2508 ret = priv->obj_ops.ind_table_modify(dev, n, NULL, 0, ind_tbl); 2509 if (ret != 0) { 2510 DRV_LOG(ERR, "Port %u could not modify indirect table obj %p", 2511 dev->data->port_id, (void *)ind_tbl); 2512 return ret; 2513 } 2514 for (i = 0; i < ind_tbl->queues_n; i++) 2515 mlx5_rxq_release(dev, ind_tbl->queues[i]); 2516 return ret; 2517 } 2518 2519 int 2520 mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry, 2521 void *cb_ctx) 2522 { 2523 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 2524 struct mlx5_flow_rss_desc *rss_desc = ctx->data; 2525 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); 2526 2527 return (hrxq->rss_key_len != rss_desc->key_len || 2528 memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) || 2529 hrxq->hash_fields != rss_desc->hash_fields || 2530 hrxq->ind_table->queues_n != rss_desc->queue_num || 2531 memcmp(hrxq->ind_table->queues, rss_desc->queue, 2532 rss_desc->queue_num * sizeof(rss_desc->queue[0]))); 2533 } 2534 2535 /** 2536 * Modify an Rx Hash queue configuration. 2537 * 2538 * @param dev 2539 * Pointer to Ethernet device. 2540 * @param hrxq 2541 * Index to Hash Rx queue to modify. 2542 * @param rss_key 2543 * RSS key for the Rx hash queue. 2544 * @param rss_key_len 2545 * RSS key length. 2546 * @param hash_fields 2547 * Verbs protocol hash field to make the RSS on. 2548 * @param queues 2549 * Queues entering in hash queue. In case of empty hash_fields only the 2550 * first queue index will be taken for the indirection table. 2551 * @param queues_n 2552 * Number of queues. 2553 * 2554 * @return 2555 * 0 on success, a negative errno value otherwise and rte_errno is set. 2556 */ 2557 int 2558 mlx5_hrxq_modify(struct rte_eth_dev *dev, uint32_t hrxq_idx, 2559 const uint8_t *rss_key, uint32_t rss_key_len, 2560 uint64_t hash_fields, 2561 const uint16_t *queues, uint32_t queues_n) 2562 { 2563 int err; 2564 struct mlx5_ind_table_obj *ind_tbl = NULL; 2565 struct mlx5_priv *priv = dev->data->dev_private; 2566 struct mlx5_hrxq *hrxq = 2567 mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); 2568 bool dev_started = !!dev->data->dev_started; 2569 int ret; 2570 2571 if (!hrxq) { 2572 rte_errno = EINVAL; 2573 return -rte_errno; 2574 } 2575 /* validations */ 2576 if (hrxq->rss_key_len != rss_key_len) { 2577 /* rss_key_len is fixed size 40 byte & not supposed to change */ 2578 rte_errno = EINVAL; 2579 return -rte_errno; 2580 } 2581 queues_n = hash_fields ? queues_n : 1; 2582 if (mlx5_ind_table_obj_match_queues(hrxq->ind_table, 2583 queues, queues_n)) { 2584 ind_tbl = hrxq->ind_table; 2585 } else { 2586 if (hrxq->standalone) { 2587 /* 2588 * Replacement of indirection table unsupported for 2589 * stanalone hrxq objects (used by shared RSS). 2590 */ 2591 rte_errno = ENOTSUP; 2592 return -rte_errno; 2593 } 2594 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); 2595 if (!ind_tbl) 2596 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, 2597 hrxq->standalone, 2598 dev_started); 2599 } 2600 if (!ind_tbl) { 2601 rte_errno = ENOMEM; 2602 return -rte_errno; 2603 } 2604 MLX5_ASSERT(priv->obj_ops.hrxq_modify); 2605 ret = priv->obj_ops.hrxq_modify(dev, hrxq, rss_key, 2606 hash_fields, ind_tbl); 2607 if (ret) { 2608 rte_errno = errno; 2609 goto error; 2610 } 2611 if (ind_tbl != hrxq->ind_table) { 2612 MLX5_ASSERT(!hrxq->standalone); 2613 mlx5_ind_table_obj_release(dev, hrxq->ind_table, 2614 hrxq->standalone, true); 2615 hrxq->ind_table = ind_tbl; 2616 } 2617 hrxq->hash_fields = hash_fields; 2618 memcpy(hrxq->rss_key, rss_key, rss_key_len); 2619 return 0; 2620 error: 2621 err = rte_errno; 2622 if (ind_tbl != hrxq->ind_table) { 2623 MLX5_ASSERT(!hrxq->standalone); 2624 mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone, 2625 true); 2626 } 2627 rte_errno = err; 2628 return -rte_errno; 2629 } 2630 2631 static void 2632 __mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) 2633 { 2634 struct mlx5_priv *priv = dev->data->dev_private; 2635 2636 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 2637 mlx5_glue->destroy_flow_action(hrxq->action); 2638 #endif 2639 priv->obj_ops.hrxq_destroy(hrxq); 2640 if (!hrxq->standalone) { 2641 mlx5_ind_table_obj_release(dev, hrxq->ind_table, 2642 hrxq->standalone, true); 2643 } 2644 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); 2645 } 2646 2647 /** 2648 * Release the hash Rx queue. 2649 * 2650 * @param dev 2651 * Pointer to Ethernet device. 2652 * @param hrxq 2653 * Index to Hash Rx queue to release. 2654 * 2655 * @param list 2656 * mlx5 list pointer. 2657 * @param entry 2658 * Hash queue entry pointer. 2659 */ 2660 void 2661 mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry) 2662 { 2663 struct rte_eth_dev *dev = tool_ctx; 2664 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); 2665 2666 __mlx5_hrxq_remove(dev, hrxq); 2667 } 2668 2669 static struct mlx5_hrxq * 2670 __mlx5_hrxq_create(struct rte_eth_dev *dev, 2671 struct mlx5_flow_rss_desc *rss_desc) 2672 { 2673 struct mlx5_priv *priv = dev->data->dev_private; 2674 const uint8_t *rss_key = rss_desc->key; 2675 uint32_t rss_key_len = rss_desc->key_len; 2676 bool standalone = !!rss_desc->shared_rss; 2677 const uint16_t *queues = 2678 standalone ? rss_desc->const_q : rss_desc->queue; 2679 uint32_t queues_n = rss_desc->queue_num; 2680 struct mlx5_hrxq *hrxq = NULL; 2681 uint32_t hrxq_idx = 0; 2682 struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl; 2683 int ret; 2684 2685 queues_n = rss_desc->hash_fields ? queues_n : 1; 2686 if (!ind_tbl) 2687 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); 2688 if (!ind_tbl) 2689 ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n, 2690 standalone, 2691 !!dev->data->dev_started); 2692 if (!ind_tbl) 2693 return NULL; 2694 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); 2695 if (!hrxq) 2696 goto error; 2697 hrxq->standalone = standalone; 2698 hrxq->idx = hrxq_idx; 2699 hrxq->ind_table = ind_tbl; 2700 hrxq->rss_key_len = rss_key_len; 2701 hrxq->hash_fields = rss_desc->hash_fields; 2702 memcpy(hrxq->rss_key, rss_key, rss_key_len); 2703 ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel); 2704 if (ret < 0) 2705 goto error; 2706 return hrxq; 2707 error: 2708 if (!rss_desc->ind_tbl) 2709 mlx5_ind_table_obj_release(dev, ind_tbl, standalone, true); 2710 if (hrxq) 2711 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); 2712 return NULL; 2713 } 2714 2715 struct mlx5_list_entry * 2716 mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx) 2717 { 2718 struct rte_eth_dev *dev = tool_ctx; 2719 struct mlx5_flow_cb_ctx *ctx = cb_ctx; 2720 struct mlx5_flow_rss_desc *rss_desc = ctx->data; 2721 struct mlx5_hrxq *hrxq; 2722 2723 hrxq = __mlx5_hrxq_create(dev, rss_desc); 2724 return hrxq ? &hrxq->entry : NULL; 2725 } 2726 2727 struct mlx5_list_entry * 2728 mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry, 2729 void *cb_ctx __rte_unused) 2730 { 2731 struct rte_eth_dev *dev = tool_ctx; 2732 struct mlx5_priv *priv = dev->data->dev_private; 2733 struct mlx5_hrxq *hrxq; 2734 uint32_t hrxq_idx = 0; 2735 2736 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); 2737 if (!hrxq) 2738 return NULL; 2739 memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN); 2740 hrxq->idx = hrxq_idx; 2741 return &hrxq->entry; 2742 } 2743 2744 void 2745 mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry) 2746 { 2747 struct rte_eth_dev *dev = tool_ctx; 2748 struct mlx5_priv *priv = dev->data->dev_private; 2749 struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry); 2750 2751 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx); 2752 } 2753 2754 /** 2755 * Get an Rx Hash queue. 2756 * 2757 * @param dev 2758 * Pointer to Ethernet device. 2759 * @param rss_desc 2760 * RSS configuration for the Rx hash queue. 2761 * 2762 * @return 2763 * An hash Rx queue index on success. 2764 */ 2765 uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, 2766 struct mlx5_flow_rss_desc *rss_desc) 2767 { 2768 struct mlx5_priv *priv = dev->data->dev_private; 2769 struct mlx5_hrxq *hrxq; 2770 struct mlx5_list_entry *entry; 2771 struct mlx5_flow_cb_ctx ctx = { 2772 .data = rss_desc, 2773 }; 2774 2775 if (rss_desc->shared_rss) { 2776 hrxq = __mlx5_hrxq_create(dev, rss_desc); 2777 } else { 2778 entry = mlx5_list_register(priv->hrxqs, &ctx); 2779 if (!entry) 2780 return 0; 2781 hrxq = container_of(entry, typeof(*hrxq), entry); 2782 } 2783 if (hrxq) 2784 return hrxq->idx; 2785 return 0; 2786 } 2787 2788 /** 2789 * Release the hash Rx queue. 2790 * 2791 * @param dev 2792 * Pointer to Ethernet device. 2793 * @param hrxq_idx 2794 * Index to Hash Rx queue to release. 2795 * 2796 * @return 2797 * 1 while a reference on it exists, 0 when freed. 2798 */ 2799 int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx) 2800 { 2801 struct mlx5_priv *priv = dev->data->dev_private; 2802 struct mlx5_hrxq *hrxq; 2803 2804 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); 2805 if (!hrxq) 2806 return 0; 2807 if (!hrxq->standalone) 2808 return mlx5_list_unregister(priv->hrxqs, &hrxq->entry); 2809 __mlx5_hrxq_remove(dev, hrxq); 2810 return 0; 2811 } 2812 2813 /** 2814 * Create a drop Rx Hash queue. 2815 * 2816 * @param dev 2817 * Pointer to Ethernet device. 2818 * 2819 * @return 2820 * The Verbs/DevX object initialized, NULL otherwise and rte_errno is set. 2821 */ 2822 struct mlx5_hrxq * 2823 mlx5_drop_action_create(struct rte_eth_dev *dev) 2824 { 2825 struct mlx5_priv *priv = dev->data->dev_private; 2826 struct mlx5_hrxq *hrxq = NULL; 2827 int ret; 2828 2829 if (priv->drop_queue.hrxq) 2830 return priv->drop_queue.hrxq; 2831 hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY); 2832 if (!hrxq) { 2833 DRV_LOG(WARNING, 2834 "Port %u cannot allocate memory for drop queue.", 2835 dev->data->port_id); 2836 rte_errno = ENOMEM; 2837 goto error; 2838 } 2839 priv->drop_queue.hrxq = hrxq; 2840 hrxq->ind_table = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq->ind_table), 2841 0, SOCKET_ID_ANY); 2842 if (!hrxq->ind_table) { 2843 rte_errno = ENOMEM; 2844 goto error; 2845 } 2846 ret = priv->obj_ops.drop_action_create(dev); 2847 if (ret < 0) 2848 goto error; 2849 return hrxq; 2850 error: 2851 if (hrxq) { 2852 if (hrxq->ind_table) 2853 mlx5_free(hrxq->ind_table); 2854 priv->drop_queue.hrxq = NULL; 2855 mlx5_free(hrxq); 2856 } 2857 return NULL; 2858 } 2859 2860 /** 2861 * Release a drop hash Rx queue. 2862 * 2863 * @param dev 2864 * Pointer to Ethernet device. 2865 */ 2866 void 2867 mlx5_drop_action_destroy(struct rte_eth_dev *dev) 2868 { 2869 struct mlx5_priv *priv = dev->data->dev_private; 2870 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq; 2871 2872 if (!priv->drop_queue.hrxq) 2873 return; 2874 priv->obj_ops.drop_action_destroy(dev); 2875 mlx5_free(priv->drop_queue.rxq); 2876 mlx5_free(hrxq->ind_table); 2877 mlx5_free(hrxq); 2878 priv->drop_queue.rxq = NULL; 2879 priv->drop_queue.hrxq = NULL; 2880 } 2881 2882 /** 2883 * Verify the Rx Queue list is empty 2884 * 2885 * @param dev 2886 * Pointer to Ethernet device. 2887 * 2888 * @return 2889 * The number of object not released. 2890 */ 2891 uint32_t 2892 mlx5_hrxq_verify(struct rte_eth_dev *dev) 2893 { 2894 struct mlx5_priv *priv = dev->data->dev_private; 2895 2896 return mlx5_list_get_entry_num(priv->hrxqs); 2897 } 2898 2899 /** 2900 * Set the Rx queue timestamp conversion parameters 2901 * 2902 * @param[in] dev 2903 * Pointer to the Ethernet device structure. 2904 */ 2905 void 2906 mlx5_rxq_timestamp_set(struct rte_eth_dev *dev) 2907 { 2908 struct mlx5_priv *priv = dev->data->dev_private; 2909 struct mlx5_dev_ctx_shared *sh = priv->sh; 2910 unsigned int i; 2911 2912 for (i = 0; i != priv->rxqs_n; ++i) { 2913 struct mlx5_rxq_data *data = mlx5_rxq_data_get(dev, i); 2914 2915 if (data == NULL) 2916 continue; 2917 data->sh = sh; 2918 data->rt_timestamp = priv->config.rt_timestamp; 2919 } 2920 } 2921