1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2015 6WIND S.A. 3 * Copyright 2015 Mellanox Technologies, Ltd 4 */ 5 6 #include <stddef.h> 7 #include <unistd.h> 8 #include <string.h> 9 #include <stdint.h> 10 #include <stdlib.h> 11 #include <errno.h> 12 13 #include <rte_malloc.h> 14 #include <ethdev_driver.h> 15 #include <ethdev_pci.h> 16 #include <rte_pci.h> 17 #include <rte_bus_pci.h> 18 #include <rte_common.h> 19 #include <rte_kvargs.h> 20 #include <rte_rwlock.h> 21 #include <rte_spinlock.h> 22 #include <rte_string_fns.h> 23 #include <rte_alarm.h> 24 #include <rte_cycles.h> 25 26 #include <mlx5_glue.h> 27 #include <mlx5_devx_cmds.h> 28 #include <mlx5_common.h> 29 #include <mlx5_common_os.h> 30 #include <mlx5_common_mp.h> 31 #include <mlx5_common_pci.h> 32 #include <mlx5_malloc.h> 33 34 #include "mlx5_defs.h" 35 #include "mlx5.h" 36 #include "mlx5_utils.h" 37 #include "mlx5_rxtx.h" 38 #include "mlx5_autoconf.h" 39 #include "mlx5_mr.h" 40 #include "mlx5_flow.h" 41 #include "mlx5_flow_os.h" 42 #include "rte_pmd_mlx5.h" 43 44 /* Device parameter to enable RX completion queue compression. */ 45 #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" 46 47 /* Device parameter to enable padding Rx packet to cacheline size. */ 48 #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" 49 50 /* Device parameter to enable Multi-Packet Rx queue. */ 51 #define MLX5_RX_MPRQ_EN "mprq_en" 52 53 /* Device parameter to configure log 2 of the number of strides for MPRQ. */ 54 #define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num" 55 56 /* Device parameter to configure log 2 of the stride size for MPRQ. */ 57 #define MLX5_RX_MPRQ_LOG_STRIDE_SIZE "mprq_log_stride_size" 58 59 /* Device parameter to limit the size of memcpy'd packet for MPRQ. */ 60 #define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len" 61 62 /* Device parameter to set the minimum number of Rx queues to enable MPRQ. */ 63 #define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq" 64 65 /* Device parameter to configure inline send. Deprecated, ignored.*/ 66 #define MLX5_TXQ_INLINE "txq_inline" 67 68 /* Device parameter to limit packet size to inline with ordinary SEND. */ 69 #define MLX5_TXQ_INLINE_MAX "txq_inline_max" 70 71 /* Device parameter to configure minimal data size to inline. */ 72 #define MLX5_TXQ_INLINE_MIN "txq_inline_min" 73 74 /* Device parameter to limit packet size to inline with Enhanced MPW. */ 75 #define MLX5_TXQ_INLINE_MPW "txq_inline_mpw" 76 77 /* 78 * Device parameter to configure the number of TX queues threshold for 79 * enabling inline send. 80 */ 81 #define MLX5_TXQS_MIN_INLINE "txqs_min_inline" 82 83 /* 84 * Device parameter to configure the number of TX queues threshold for 85 * enabling vectorized Tx, deprecated, ignored (no vectorized Tx routines). 86 */ 87 #define MLX5_TXQS_MAX_VEC "txqs_max_vec" 88 89 /* Device parameter to enable multi-packet send WQEs. */ 90 #define MLX5_TXQ_MPW_EN "txq_mpw_en" 91 92 /* 93 * Device parameter to force doorbell register mapping 94 * to non-cahed region eliminating the extra write memory barrier. 95 */ 96 #define MLX5_TX_DB_NC "tx_db_nc" 97 98 /* 99 * Device parameter to include 2 dsegs in the title WQEBB. 100 * Deprecated, ignored. 101 */ 102 #define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en" 103 104 /* 105 * Device parameter to limit the size of inlining packet. 106 * Deprecated, ignored. 107 */ 108 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" 109 110 /* 111 * Device parameter to enable Tx scheduling on timestamps 112 * and specify the packet pacing granularity in nanoseconds. 113 */ 114 #define MLX5_TX_PP "tx_pp" 115 116 /* 117 * Device parameter to specify skew in nanoseconds on Tx datapath, 118 * it represents the time between SQ start WQE processing and 119 * appearing actual packet data on the wire. 120 */ 121 #define MLX5_TX_SKEW "tx_skew" 122 123 /* 124 * Device parameter to enable hardware Tx vector. 125 * Deprecated, ignored (no vectorized Tx routines anymore). 126 */ 127 #define MLX5_TX_VEC_EN "tx_vec_en" 128 129 /* Device parameter to enable hardware Rx vector. */ 130 #define MLX5_RX_VEC_EN "rx_vec_en" 131 132 /* Allow L3 VXLAN flow creation. */ 133 #define MLX5_L3_VXLAN_EN "l3_vxlan_en" 134 135 /* Activate DV E-Switch flow steering. */ 136 #define MLX5_DV_ESW_EN "dv_esw_en" 137 138 /* Activate DV flow steering. */ 139 #define MLX5_DV_FLOW_EN "dv_flow_en" 140 141 /* Enable extensive flow metadata support. */ 142 #define MLX5_DV_XMETA_EN "dv_xmeta_en" 143 144 /* Device parameter to let the user manage the lacp traffic of bonded device */ 145 #define MLX5_LACP_BY_USER "lacp_by_user" 146 147 /* Activate Netlink support in VF mode. */ 148 #define MLX5_VF_NL_EN "vf_nl_en" 149 150 /* Enable extending memsegs when creating a MR. */ 151 #define MLX5_MR_EXT_MEMSEG_EN "mr_ext_memseg_en" 152 153 /* Select port representors to instantiate. */ 154 #define MLX5_REPRESENTOR "representor" 155 156 /* Device parameter to configure the maximum number of dump files per queue. */ 157 #define MLX5_MAX_DUMP_FILES_NUM "max_dump_files_num" 158 159 /* Configure timeout of LRO session (in microseconds). */ 160 #define MLX5_LRO_TIMEOUT_USEC "lro_timeout_usec" 161 162 /* 163 * Device parameter to configure the total data buffer size for a single 164 * hairpin queue (logarithm value). 165 */ 166 #define MLX5_HP_BUF_SIZE "hp_buf_log_sz" 167 168 /* Flow memory reclaim mode. */ 169 #define MLX5_RECLAIM_MEM "reclaim_mem_mode" 170 171 /* The default memory allocator used in PMD. */ 172 #define MLX5_SYS_MEM_EN "sys_mem_en" 173 /* Decap will be used or not. */ 174 #define MLX5_DECAP_EN "decap_en" 175 176 /* Shared memory between primary and secondary processes. */ 177 struct mlx5_shared_data *mlx5_shared_data; 178 179 /** Driver-specific log messages type. */ 180 int mlx5_logtype; 181 182 static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = 183 LIST_HEAD_INITIALIZER(); 184 static pthread_mutex_t mlx5_dev_ctx_list_mutex; 185 static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { 186 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 187 [MLX5_IPOOL_DECAP_ENCAP] = { 188 .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), 189 .trunk_size = 64, 190 .grow_trunk = 3, 191 .grow_shift = 2, 192 .need_lock = 1, 193 .release_mem_en = 1, 194 .malloc = mlx5_malloc, 195 .free = mlx5_free, 196 .type = "mlx5_encap_decap_ipool", 197 }, 198 [MLX5_IPOOL_PUSH_VLAN] = { 199 .size = sizeof(struct mlx5_flow_dv_push_vlan_action_resource), 200 .trunk_size = 64, 201 .grow_trunk = 3, 202 .grow_shift = 2, 203 .need_lock = 1, 204 .release_mem_en = 1, 205 .malloc = mlx5_malloc, 206 .free = mlx5_free, 207 .type = "mlx5_push_vlan_ipool", 208 }, 209 [MLX5_IPOOL_TAG] = { 210 .size = sizeof(struct mlx5_flow_dv_tag_resource), 211 .trunk_size = 64, 212 .grow_trunk = 3, 213 .grow_shift = 2, 214 .need_lock = 1, 215 .release_mem_en = 1, 216 .malloc = mlx5_malloc, 217 .free = mlx5_free, 218 .type = "mlx5_tag_ipool", 219 }, 220 [MLX5_IPOOL_PORT_ID] = { 221 .size = sizeof(struct mlx5_flow_dv_port_id_action_resource), 222 .trunk_size = 64, 223 .grow_trunk = 3, 224 .grow_shift = 2, 225 .need_lock = 1, 226 .release_mem_en = 1, 227 .malloc = mlx5_malloc, 228 .free = mlx5_free, 229 .type = "mlx5_port_id_ipool", 230 }, 231 [MLX5_IPOOL_JUMP] = { 232 .size = sizeof(struct mlx5_flow_tbl_data_entry), 233 .trunk_size = 64, 234 .grow_trunk = 3, 235 .grow_shift = 2, 236 .need_lock = 1, 237 .release_mem_en = 1, 238 .malloc = mlx5_malloc, 239 .free = mlx5_free, 240 .type = "mlx5_jump_ipool", 241 }, 242 [MLX5_IPOOL_SAMPLE] = { 243 .size = sizeof(struct mlx5_flow_dv_sample_resource), 244 .trunk_size = 64, 245 .grow_trunk = 3, 246 .grow_shift = 2, 247 .need_lock = 1, 248 .release_mem_en = 1, 249 .malloc = mlx5_malloc, 250 .free = mlx5_free, 251 .type = "mlx5_sample_ipool", 252 }, 253 [MLX5_IPOOL_DEST_ARRAY] = { 254 .size = sizeof(struct mlx5_flow_dv_dest_array_resource), 255 .trunk_size = 64, 256 .grow_trunk = 3, 257 .grow_shift = 2, 258 .need_lock = 1, 259 .release_mem_en = 1, 260 .malloc = mlx5_malloc, 261 .free = mlx5_free, 262 .type = "mlx5_dest_array_ipool", 263 }, 264 [MLX5_IPOOL_TUNNEL_ID] = { 265 .size = sizeof(struct mlx5_flow_tunnel), 266 .trunk_size = MLX5_MAX_TUNNELS, 267 .need_lock = 1, 268 .release_mem_en = 1, 269 .type = "mlx5_tunnel_offload", 270 }, 271 [MLX5_IPOOL_TNL_TBL_ID] = { 272 .size = 0, 273 .need_lock = 1, 274 .type = "mlx5_flow_tnl_tbl_ipool", 275 }, 276 #endif 277 [MLX5_IPOOL_MTR] = { 278 .size = sizeof(struct mlx5_flow_meter), 279 .trunk_size = 64, 280 .grow_trunk = 3, 281 .grow_shift = 2, 282 .need_lock = 1, 283 .release_mem_en = 1, 284 .malloc = mlx5_malloc, 285 .free = mlx5_free, 286 .type = "mlx5_meter_ipool", 287 }, 288 [MLX5_IPOOL_MCP] = { 289 .size = sizeof(struct mlx5_flow_mreg_copy_resource), 290 .trunk_size = 64, 291 .grow_trunk = 3, 292 .grow_shift = 2, 293 .need_lock = 1, 294 .release_mem_en = 1, 295 .malloc = mlx5_malloc, 296 .free = mlx5_free, 297 .type = "mlx5_mcp_ipool", 298 }, 299 [MLX5_IPOOL_HRXQ] = { 300 .size = (sizeof(struct mlx5_hrxq) + MLX5_RSS_HASH_KEY_LEN), 301 .trunk_size = 64, 302 .grow_trunk = 3, 303 .grow_shift = 2, 304 .need_lock = 1, 305 .release_mem_en = 1, 306 .malloc = mlx5_malloc, 307 .free = mlx5_free, 308 .type = "mlx5_hrxq_ipool", 309 }, 310 [MLX5_IPOOL_MLX5_FLOW] = { 311 /* 312 * MLX5_IPOOL_MLX5_FLOW size varies for DV and VERBS flows. 313 * It set in run time according to PCI function configuration. 314 */ 315 .size = 0, 316 .trunk_size = 64, 317 .grow_trunk = 3, 318 .grow_shift = 2, 319 .need_lock = 1, 320 .release_mem_en = 1, 321 .malloc = mlx5_malloc, 322 .free = mlx5_free, 323 .type = "mlx5_flow_handle_ipool", 324 }, 325 [MLX5_IPOOL_RTE_FLOW] = { 326 .size = sizeof(struct rte_flow), 327 .trunk_size = 4096, 328 .need_lock = 1, 329 .release_mem_en = 1, 330 .malloc = mlx5_malloc, 331 .free = mlx5_free, 332 .type = "rte_flow_ipool", 333 }, 334 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID] = { 335 .size = 0, 336 .need_lock = 1, 337 .type = "mlx5_flow_rss_id_ipool", 338 }, 339 [MLX5_IPOOL_RSS_SHARED_ACTIONS] = { 340 .size = sizeof(struct mlx5_shared_action_rss), 341 .trunk_size = 64, 342 .grow_trunk = 3, 343 .grow_shift = 2, 344 .need_lock = 1, 345 .release_mem_en = 1, 346 .malloc = mlx5_malloc, 347 .free = mlx5_free, 348 .type = "mlx5_shared_action_rss", 349 }, 350 }; 351 352 353 #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 354 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 355 356 #define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 357 358 /** 359 * Initialize the ASO aging management structure. 360 * 361 * @param[in] sh 362 * Pointer to mlx5_dev_ctx_shared object to free 363 * 364 * @return 365 * 0 on success, a negative errno value otherwise and rte_errno is set. 366 */ 367 int 368 mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh) 369 { 370 int err; 371 372 if (sh->aso_age_mng) 373 return 0; 374 sh->aso_age_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->aso_age_mng), 375 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 376 if (!sh->aso_age_mng) { 377 DRV_LOG(ERR, "aso_age_mng allocation was failed."); 378 rte_errno = ENOMEM; 379 return -ENOMEM; 380 } 381 err = mlx5_aso_queue_init(sh); 382 if (err) { 383 mlx5_free(sh->aso_age_mng); 384 return -1; 385 } 386 rte_spinlock_init(&sh->aso_age_mng->resize_sl); 387 rte_spinlock_init(&sh->aso_age_mng->free_sl); 388 LIST_INIT(&sh->aso_age_mng->free); 389 return 0; 390 } 391 392 /** 393 * Close and release all the resources of the ASO aging management structure. 394 * 395 * @param[in] sh 396 * Pointer to mlx5_dev_ctx_shared object to free. 397 */ 398 static void 399 mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) 400 { 401 int i, j; 402 403 mlx5_aso_queue_stop(sh); 404 mlx5_aso_queue_uninit(sh); 405 if (sh->aso_age_mng->pools) { 406 struct mlx5_aso_age_pool *pool; 407 408 for (i = 0; i < sh->aso_age_mng->next; ++i) { 409 pool = sh->aso_age_mng->pools[i]; 410 claim_zero(mlx5_devx_cmd_destroy 411 (pool->flow_hit_aso_obj)); 412 for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) 413 if (pool->actions[j].dr_action) 414 claim_zero 415 (mlx5_flow_os_destroy_flow_action 416 (pool->actions[j].dr_action)); 417 mlx5_free(pool); 418 } 419 mlx5_free(sh->aso_age_mng->pools); 420 } 421 mlx5_free(sh->aso_age_mng); 422 } 423 424 /** 425 * Initialize the shared aging list information per port. 426 * 427 * @param[in] sh 428 * Pointer to mlx5_dev_ctx_shared object. 429 */ 430 static void 431 mlx5_flow_aging_init(struct mlx5_dev_ctx_shared *sh) 432 { 433 uint32_t i; 434 struct mlx5_age_info *age_info; 435 436 for (i = 0; i < sh->max_port; i++) { 437 age_info = &sh->port[i].age_info; 438 age_info->flags = 0; 439 TAILQ_INIT(&age_info->aged_counters); 440 LIST_INIT(&age_info->aged_aso); 441 rte_spinlock_init(&age_info->aged_sl); 442 MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER); 443 } 444 } 445 446 /** 447 * Initialize the counters management structure. 448 * 449 * @param[in] sh 450 * Pointer to mlx5_dev_ctx_shared object to free 451 */ 452 static void 453 mlx5_flow_counters_mng_init(struct mlx5_dev_ctx_shared *sh) 454 { 455 int i; 456 457 memset(&sh->cmng, 0, sizeof(sh->cmng)); 458 TAILQ_INIT(&sh->cmng.flow_counters); 459 sh->cmng.min_id = MLX5_CNT_BATCH_OFFSET; 460 sh->cmng.max_id = -1; 461 sh->cmng.last_pool_idx = POOL_IDX_INVALID; 462 rte_spinlock_init(&sh->cmng.pool_update_sl); 463 for (i = 0; i < MLX5_COUNTER_TYPE_MAX; i++) { 464 TAILQ_INIT(&sh->cmng.counters[i]); 465 rte_spinlock_init(&sh->cmng.csl[i]); 466 } 467 } 468 469 /** 470 * Destroy all the resources allocated for a counter memory management. 471 * 472 * @param[in] mng 473 * Pointer to the memory management structure. 474 */ 475 static void 476 mlx5_flow_destroy_counter_stat_mem_mng(struct mlx5_counter_stats_mem_mng *mng) 477 { 478 uint8_t *mem = (uint8_t *)(uintptr_t)mng->raws[0].data; 479 480 LIST_REMOVE(mng, next); 481 claim_zero(mlx5_devx_cmd_destroy(mng->dm)); 482 claim_zero(mlx5_os_umem_dereg(mng->umem)); 483 mlx5_free(mem); 484 } 485 486 /** 487 * Close and release all the resources of the counters management. 488 * 489 * @param[in] sh 490 * Pointer to mlx5_dev_ctx_shared object to free. 491 */ 492 static void 493 mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) 494 { 495 struct mlx5_counter_stats_mem_mng *mng; 496 int i, j; 497 int retries = 1024; 498 499 rte_errno = 0; 500 while (--retries) { 501 rte_eal_alarm_cancel(mlx5_flow_query_alarm, sh); 502 if (rte_errno != EINPROGRESS) 503 break; 504 rte_pause(); 505 } 506 507 if (sh->cmng.pools) { 508 struct mlx5_flow_counter_pool *pool; 509 uint16_t n_valid = sh->cmng.n_valid; 510 bool fallback = sh->cmng.counter_fallback; 511 512 for (i = 0; i < n_valid; ++i) { 513 pool = sh->cmng.pools[i]; 514 if (!fallback && pool->min_dcs) 515 claim_zero(mlx5_devx_cmd_destroy 516 (pool->min_dcs)); 517 for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) { 518 struct mlx5_flow_counter *cnt = 519 MLX5_POOL_GET_CNT(pool, j); 520 521 if (cnt->action) 522 claim_zero 523 (mlx5_flow_os_destroy_flow_action 524 (cnt->action)); 525 if (fallback && MLX5_POOL_GET_CNT 526 (pool, j)->dcs_when_free) 527 claim_zero(mlx5_devx_cmd_destroy 528 (cnt->dcs_when_free)); 529 } 530 mlx5_free(pool); 531 } 532 mlx5_free(sh->cmng.pools); 533 } 534 mng = LIST_FIRST(&sh->cmng.mem_mngs); 535 while (mng) { 536 mlx5_flow_destroy_counter_stat_mem_mng(mng); 537 mng = LIST_FIRST(&sh->cmng.mem_mngs); 538 } 539 memset(&sh->cmng, 0, sizeof(sh->cmng)); 540 } 541 542 /* Send FLOW_AGED event if needed. */ 543 void 544 mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh) 545 { 546 struct mlx5_age_info *age_info; 547 uint32_t i; 548 549 for (i = 0; i < sh->max_port; i++) { 550 age_info = &sh->port[i].age_info; 551 if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) 552 continue; 553 if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) 554 rte_eth_dev_callback_process 555 (&rte_eth_devices[sh->port[i].devx_ih_port_id], 556 RTE_ETH_EVENT_FLOW_AGED, NULL); 557 age_info->flags = 0; 558 } 559 } 560 561 /** 562 * Initialize the flow resources' indexed mempool. 563 * 564 * @param[in] sh 565 * Pointer to mlx5_dev_ctx_shared object. 566 * @param[in] sh 567 * Pointer to user dev config. 568 */ 569 static void 570 mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh, 571 const struct mlx5_dev_config *config) 572 { 573 uint8_t i; 574 struct mlx5_indexed_pool_config cfg; 575 576 for (i = 0; i < MLX5_IPOOL_MAX; ++i) { 577 cfg = mlx5_ipool_cfg[i]; 578 switch (i) { 579 default: 580 break; 581 /* 582 * Set MLX5_IPOOL_MLX5_FLOW ipool size 583 * according to PCI function flow configuration. 584 */ 585 case MLX5_IPOOL_MLX5_FLOW: 586 cfg.size = config->dv_flow_en ? 587 sizeof(struct mlx5_flow_handle) : 588 MLX5_FLOW_HANDLE_VERBS_SIZE; 589 break; 590 } 591 if (config->reclaim_mode) 592 cfg.release_mem_en = 1; 593 sh->ipool[i] = mlx5_ipool_create(&cfg); 594 } 595 } 596 597 /** 598 * Release the flow resources' indexed mempool. 599 * 600 * @param[in] sh 601 * Pointer to mlx5_dev_ctx_shared object. 602 */ 603 static void 604 mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh) 605 { 606 uint8_t i; 607 608 for (i = 0; i < MLX5_IPOOL_MAX; ++i) 609 mlx5_ipool_destroy(sh->ipool[i]); 610 } 611 612 /* 613 * Check if dynamic flex parser for eCPRI already exists. 614 * 615 * @param dev 616 * Pointer to Ethernet device structure. 617 * 618 * @return 619 * true on exists, false on not. 620 */ 621 bool 622 mlx5_flex_parser_ecpri_exist(struct rte_eth_dev *dev) 623 { 624 struct mlx5_priv *priv = dev->data->dev_private; 625 struct mlx5_flex_parser_profiles *prf = 626 &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 627 628 return !!prf->obj; 629 } 630 631 /* 632 * Allocation of a flex parser for eCPRI. Once created, this parser related 633 * resources will be held until the device is closed. 634 * 635 * @param dev 636 * Pointer to Ethernet device structure. 637 * 638 * @return 639 * 0 on success, a negative errno value otherwise and rte_errno is set. 640 */ 641 int 642 mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev) 643 { 644 struct mlx5_priv *priv = dev->data->dev_private; 645 struct mlx5_flex_parser_profiles *prf = 646 &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 647 struct mlx5_devx_graph_node_attr node = { 648 .modify_field_select = 0, 649 }; 650 uint32_t ids[8]; 651 int ret; 652 653 if (!priv->config.hca_attr.parse_graph_flex_node) { 654 DRV_LOG(ERR, "Dynamic flex parser is not supported " 655 "for device %s.", priv->dev_data->name); 656 return -ENOTSUP; 657 } 658 node.header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED; 659 /* 8 bytes now: 4B common header + 4B message body header. */ 660 node.header_length_base_value = 0x8; 661 /* After MAC layer: Ether / VLAN. */ 662 node.in[0].arc_parse_graph_node = MLX5_GRAPH_ARC_NODE_MAC; 663 /* Type of compared condition should be 0xAEFE in the L2 layer. */ 664 node.in[0].compare_condition_value = RTE_ETHER_TYPE_ECPRI; 665 /* Sample #0: type in common header. */ 666 node.sample[0].flow_match_sample_en = 1; 667 /* Fixed offset. */ 668 node.sample[0].flow_match_sample_offset_mode = 0x0; 669 /* Only the 2nd byte will be used. */ 670 node.sample[0].flow_match_sample_field_base_offset = 0x0; 671 /* Sample #1: message payload. */ 672 node.sample[1].flow_match_sample_en = 1; 673 /* Fixed offset. */ 674 node.sample[1].flow_match_sample_offset_mode = 0x0; 675 /* 676 * Only the first two bytes will be used right now, and its offset will 677 * start after the common header that with the length of a DW(u32). 678 */ 679 node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t); 680 prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node); 681 if (!prf->obj) { 682 DRV_LOG(ERR, "Failed to create flex parser node object."); 683 return (rte_errno == 0) ? -ENODEV : -rte_errno; 684 } 685 prf->num = 2; 686 ret = mlx5_devx_cmd_query_parse_samples(prf->obj, ids, prf->num); 687 if (ret) { 688 DRV_LOG(ERR, "Failed to query sample IDs."); 689 return (rte_errno == 0) ? -ENODEV : -rte_errno; 690 } 691 prf->offset[0] = 0x0; 692 prf->offset[1] = sizeof(uint32_t); 693 prf->ids[0] = ids[0]; 694 prf->ids[1] = ids[1]; 695 return 0; 696 } 697 698 /* 699 * Destroy the flex parser node, including the parser itself, input / output 700 * arcs and DW samples. Resources could be reused then. 701 * 702 * @param dev 703 * Pointer to Ethernet device structure. 704 */ 705 static void 706 mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev) 707 { 708 struct mlx5_priv *priv = dev->data->dev_private; 709 struct mlx5_flex_parser_profiles *prf = 710 &priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0]; 711 712 if (prf->obj) 713 mlx5_devx_cmd_destroy(prf->obj); 714 prf->obj = NULL; 715 } 716 717 /* 718 * Allocate Rx and Tx UARs in robust fashion. 719 * This routine handles the following UAR allocation issues: 720 * 721 * - tries to allocate the UAR with the most appropriate memory 722 * mapping type from the ones supported by the host 723 * 724 * - tries to allocate the UAR with non-NULL base address 725 * OFED 5.0.x and Upstream rdma_core before v29 returned the NULL as 726 * UAR base address if UAR was not the first object in the UAR page. 727 * It caused the PMD failure and we should try to get another UAR 728 * till we get the first one with non-NULL base address returned. 729 */ 730 static int 731 mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh, 732 const struct mlx5_dev_config *config) 733 { 734 uint32_t uar_mapping, retry; 735 int err = 0; 736 void *base_addr; 737 738 for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) { 739 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC 740 /* Control the mapping type according to the settings. */ 741 uar_mapping = (config->dbnc == MLX5_TXDB_NCACHED) ? 742 MLX5DV_UAR_ALLOC_TYPE_NC : 743 MLX5DV_UAR_ALLOC_TYPE_BF; 744 #else 745 RTE_SET_USED(config); 746 /* 747 * It seems we have no way to control the memory mapping type 748 * for the UAR, the default "Write-Combining" type is supposed. 749 * The UAR initialization on queue creation queries the 750 * actual mapping type done by Verbs/kernel and setups the 751 * PMD datapath accordingly. 752 */ 753 uar_mapping = 0; 754 #endif 755 sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, uar_mapping); 756 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC 757 if (!sh->tx_uar && 758 uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) { 759 if (config->dbnc == MLX5_TXDB_CACHED || 760 config->dbnc == MLX5_TXDB_HEURISTIC) 761 DRV_LOG(WARNING, "Devarg tx_db_nc setting " 762 "is not supported by DevX"); 763 /* 764 * In some environments like virtual machine 765 * the Write Combining mapped might be not supported 766 * and UAR allocation fails. We try "Non-Cached" 767 * mapping for the case. The tx_burst routines take 768 * the UAR mapping type into account on UAR setup 769 * on queue creation. 770 */ 771 DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)"); 772 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC; 773 sh->tx_uar = mlx5_glue->devx_alloc_uar 774 (sh->ctx, uar_mapping); 775 } else if (!sh->tx_uar && 776 uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) { 777 if (config->dbnc == MLX5_TXDB_NCACHED) 778 DRV_LOG(WARNING, "Devarg tx_db_nc settings " 779 "is not supported by DevX"); 780 /* 781 * If Verbs/kernel does not support "Non-Cached" 782 * try the "Write-Combining". 783 */ 784 DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)"); 785 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF; 786 sh->tx_uar = mlx5_glue->devx_alloc_uar 787 (sh->ctx, uar_mapping); 788 } 789 #endif 790 if (!sh->tx_uar) { 791 DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (BF/NC)"); 792 err = ENOMEM; 793 goto exit; 794 } 795 base_addr = mlx5_os_get_devx_uar_base_addr(sh->tx_uar); 796 if (base_addr) 797 break; 798 /* 799 * The UARs are allocated by rdma_core within the 800 * IB device context, on context closure all UARs 801 * will be freed, should be no memory/object leakage. 802 */ 803 DRV_LOG(DEBUG, "Retrying to allocate Tx DevX UAR"); 804 sh->tx_uar = NULL; 805 } 806 /* Check whether we finally succeeded with valid UAR allocation. */ 807 if (!sh->tx_uar) { 808 DRV_LOG(ERR, "Failed to allocate Tx DevX UAR (NULL base)"); 809 err = ENOMEM; 810 goto exit; 811 } 812 for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) { 813 uar_mapping = 0; 814 sh->devx_rx_uar = mlx5_glue->devx_alloc_uar 815 (sh->ctx, uar_mapping); 816 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC 817 if (!sh->devx_rx_uar && 818 uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) { 819 /* 820 * Rx UAR is used to control interrupts only, 821 * should be no datapath noticeable impact, 822 * can try "Non-Cached" mapping safely. 823 */ 824 DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)"); 825 uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC; 826 sh->devx_rx_uar = mlx5_glue->devx_alloc_uar 827 (sh->ctx, uar_mapping); 828 } 829 #endif 830 if (!sh->devx_rx_uar) { 831 DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (BF/NC)"); 832 err = ENOMEM; 833 goto exit; 834 } 835 base_addr = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar); 836 if (base_addr) 837 break; 838 /* 839 * The UARs are allocated by rdma_core within the 840 * IB device context, on context closure all UARs 841 * will be freed, should be no memory/object leakage. 842 */ 843 DRV_LOG(DEBUG, "Retrying to allocate Rx DevX UAR"); 844 sh->devx_rx_uar = NULL; 845 } 846 /* Check whether we finally succeeded with valid UAR allocation. */ 847 if (!sh->devx_rx_uar) { 848 DRV_LOG(ERR, "Failed to allocate Rx DevX UAR (NULL base)"); 849 err = ENOMEM; 850 } 851 exit: 852 return err; 853 } 854 855 /** 856 * Allocate shared device context. If there is multiport device the 857 * master and representors will share this context, if there is single 858 * port dedicated device, the context will be used by only given 859 * port due to unification. 860 * 861 * Routine first searches the context for the specified device name, 862 * if found the shared context assumed and reference counter is incremented. 863 * If no context found the new one is created and initialized with specified 864 * device context and parameters. 865 * 866 * @param[in] spawn 867 * Pointer to the device attributes (name, port, etc). 868 * @param[in] config 869 * Pointer to device configuration structure. 870 * 871 * @return 872 * Pointer to mlx5_dev_ctx_shared object on success, 873 * otherwise NULL and rte_errno is set. 874 */ 875 struct mlx5_dev_ctx_shared * 876 mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, 877 const struct mlx5_dev_config *config) 878 { 879 struct mlx5_dev_ctx_shared *sh; 880 int err = 0; 881 uint32_t i; 882 struct mlx5_devx_tis_attr tis_attr = { 0 }; 883 884 MLX5_ASSERT(spawn); 885 /* Secondary process should not create the shared context. */ 886 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 887 pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 888 /* Search for IB context by device name. */ 889 LIST_FOREACH(sh, &mlx5_dev_ctx_list, next) { 890 if (!strcmp(sh->ibdev_name, 891 mlx5_os_get_dev_device_name(spawn->phys_dev))) { 892 sh->refcnt++; 893 goto exit; 894 } 895 } 896 /* No device found, we have to create new shared context. */ 897 MLX5_ASSERT(spawn->max_port); 898 sh = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 899 sizeof(struct mlx5_dev_ctx_shared) + 900 spawn->max_port * 901 sizeof(struct mlx5_dev_shared_port), 902 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 903 if (!sh) { 904 DRV_LOG(ERR, "shared context allocation failure"); 905 rte_errno = ENOMEM; 906 goto exit; 907 } 908 err = mlx5_os_open_device(spawn, config, sh); 909 if (!sh->ctx) 910 goto error; 911 err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr); 912 if (err) { 913 DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed"); 914 goto error; 915 } 916 sh->refcnt = 1; 917 sh->bond_dev = UINT16_MAX; 918 sh->max_port = spawn->max_port; 919 strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx), 920 sizeof(sh->ibdev_name) - 1); 921 strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx), 922 sizeof(sh->ibdev_path) - 1); 923 /* 924 * Setting port_id to max unallowed value means 925 * there is no interrupt subhandler installed for 926 * the given port index i. 927 */ 928 for (i = 0; i < sh->max_port; i++) { 929 sh->port[i].ih_port_id = RTE_MAX_ETHPORTS; 930 sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS; 931 } 932 sh->pd = mlx5_os_alloc_pd(sh->ctx); 933 if (sh->pd == NULL) { 934 DRV_LOG(ERR, "PD allocation failure"); 935 err = ENOMEM; 936 goto error; 937 } 938 if (sh->devx) { 939 err = mlx5_os_get_pdn(sh->pd, &sh->pdn); 940 if (err) { 941 DRV_LOG(ERR, "Fail to extract pdn from PD"); 942 goto error; 943 } 944 sh->td = mlx5_devx_cmd_create_td(sh->ctx); 945 if (!sh->td) { 946 DRV_LOG(ERR, "TD allocation failure"); 947 err = ENOMEM; 948 goto error; 949 } 950 tis_attr.transport_domain = sh->td->id; 951 sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr); 952 if (!sh->tis) { 953 DRV_LOG(ERR, "TIS allocation failure"); 954 err = ENOMEM; 955 goto error; 956 } 957 err = mlx5_alloc_rxtx_uars(sh, config); 958 if (err) 959 goto error; 960 MLX5_ASSERT(sh->tx_uar); 961 MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->tx_uar)); 962 963 MLX5_ASSERT(sh->devx_rx_uar); 964 MLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar)); 965 } 966 #ifndef RTE_ARCH_64 967 /* Initialize UAR access locks for 32bit implementations. */ 968 rte_spinlock_init(&sh->uar_lock_cq); 969 for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) 970 rte_spinlock_init(&sh->uar_lock[i]); 971 #endif 972 /* 973 * Once the device is added to the list of memory event 974 * callback, its global MR cache table cannot be expanded 975 * on the fly because of deadlock. If it overflows, lookup 976 * should be done by searching MR list linearly, which is slow. 977 * 978 * At this point the device is not added to the memory 979 * event list yet, context is just being created. 980 */ 981 err = mlx5_mr_btree_init(&sh->share_cache.cache, 982 MLX5_MR_BTREE_CACHE_N * 2, 983 spawn->pci_dev->device.numa_node); 984 if (err) { 985 err = rte_errno; 986 goto error; 987 } 988 mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb, 989 &sh->share_cache.dereg_mr_cb); 990 mlx5_os_dev_shared_handler_install(sh); 991 sh->cnt_id_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_DWORD); 992 if (!sh->cnt_id_tbl) { 993 err = rte_errno; 994 goto error; 995 } 996 if (LIST_EMPTY(&mlx5_dev_ctx_list)) { 997 err = mlx5_flow_os_init_workspace_once(); 998 if (err) 999 goto error; 1000 } 1001 mlx5_flow_aging_init(sh); 1002 mlx5_flow_counters_mng_init(sh); 1003 mlx5_flow_ipool_create(sh, config); 1004 /* Add device to memory callback list. */ 1005 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 1006 LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, 1007 sh, mem_event_cb); 1008 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 1009 /* Add context to the global device list. */ 1010 LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); 1011 rte_spinlock_init(&sh->geneve_tlv_opt_sl); 1012 exit: 1013 pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 1014 return sh; 1015 error: 1016 pthread_mutex_destroy(&sh->txpp.mutex); 1017 pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 1018 MLX5_ASSERT(sh); 1019 if (sh->cnt_id_tbl) 1020 mlx5_l3t_destroy(sh->cnt_id_tbl); 1021 if (sh->tis) 1022 claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 1023 if (sh->td) 1024 claim_zero(mlx5_devx_cmd_destroy(sh->td)); 1025 if (sh->devx_rx_uar) 1026 mlx5_glue->devx_free_uar(sh->devx_rx_uar); 1027 if (sh->tx_uar) 1028 mlx5_glue->devx_free_uar(sh->tx_uar); 1029 if (sh->pd) 1030 claim_zero(mlx5_os_dealloc_pd(sh->pd)); 1031 if (sh->ctx) 1032 claim_zero(mlx5_glue->close_device(sh->ctx)); 1033 mlx5_free(sh); 1034 MLX5_ASSERT(err > 0); 1035 rte_errno = err; 1036 return NULL; 1037 } 1038 1039 /** 1040 * Free shared IB device context. Decrement counter and if zero free 1041 * all allocated resources and close handles. 1042 * 1043 * @param[in] sh 1044 * Pointer to mlx5_dev_ctx_shared object to free 1045 */ 1046 void 1047 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) 1048 { 1049 pthread_mutex_lock(&mlx5_dev_ctx_list_mutex); 1050 #ifdef RTE_LIBRTE_MLX5_DEBUG 1051 /* Check the object presence in the list. */ 1052 struct mlx5_dev_ctx_shared *lctx; 1053 1054 LIST_FOREACH(lctx, &mlx5_dev_ctx_list, next) 1055 if (lctx == sh) 1056 break; 1057 MLX5_ASSERT(lctx); 1058 if (lctx != sh) { 1059 DRV_LOG(ERR, "Freeing non-existing shared IB context"); 1060 goto exit; 1061 } 1062 #endif 1063 MLX5_ASSERT(sh); 1064 MLX5_ASSERT(sh->refcnt); 1065 /* Secondary process should not free the shared context. */ 1066 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); 1067 if (--sh->refcnt) 1068 goto exit; 1069 /* Remove from memory callback device list. */ 1070 rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); 1071 LIST_REMOVE(sh, mem_event_cb); 1072 rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); 1073 /* Release created Memory Regions. */ 1074 mlx5_mr_release_cache(&sh->share_cache); 1075 /* Remove context from the global device list. */ 1076 LIST_REMOVE(sh, next); 1077 /* Release flow workspaces objects on the last device. */ 1078 if (LIST_EMPTY(&mlx5_dev_ctx_list)) 1079 mlx5_flow_os_release_workspace(); 1080 pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 1081 /* 1082 * Ensure there is no async event handler installed. 1083 * Only primary process handles async device events. 1084 **/ 1085 mlx5_flow_counters_mng_close(sh); 1086 if (sh->aso_age_mng) { 1087 mlx5_flow_aso_age_mng_close(sh); 1088 sh->aso_age_mng = NULL; 1089 } 1090 mlx5_flow_ipool_destroy(sh); 1091 mlx5_os_dev_shared_handler_uninstall(sh); 1092 if (sh->cnt_id_tbl) { 1093 mlx5_l3t_destroy(sh->cnt_id_tbl); 1094 sh->cnt_id_tbl = NULL; 1095 } 1096 if (sh->tx_uar) { 1097 mlx5_glue->devx_free_uar(sh->tx_uar); 1098 sh->tx_uar = NULL; 1099 } 1100 if (sh->pd) 1101 claim_zero(mlx5_os_dealloc_pd(sh->pd)); 1102 if (sh->tis) 1103 claim_zero(mlx5_devx_cmd_destroy(sh->tis)); 1104 if (sh->td) 1105 claim_zero(mlx5_devx_cmd_destroy(sh->td)); 1106 if (sh->devx_rx_uar) 1107 mlx5_glue->devx_free_uar(sh->devx_rx_uar); 1108 if (sh->ctx) 1109 claim_zero(mlx5_glue->close_device(sh->ctx)); 1110 MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL); 1111 pthread_mutex_destroy(&sh->txpp.mutex); 1112 mlx5_free(sh); 1113 return; 1114 exit: 1115 pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); 1116 } 1117 1118 /** 1119 * Destroy table hash list. 1120 * 1121 * @param[in] priv 1122 * Pointer to the private device data structure. 1123 */ 1124 void 1125 mlx5_free_table_hash_list(struct mlx5_priv *priv) 1126 { 1127 struct mlx5_dev_ctx_shared *sh = priv->sh; 1128 1129 if (!sh->flow_tbls) 1130 return; 1131 mlx5_hlist_destroy(sh->flow_tbls); 1132 } 1133 1134 /** 1135 * Initialize flow table hash list and create the root tables entry 1136 * for each domain. 1137 * 1138 * @param[in] priv 1139 * Pointer to the private device data structure. 1140 * 1141 * @return 1142 * Zero on success, positive error code otherwise. 1143 */ 1144 int 1145 mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) 1146 { 1147 int err = 0; 1148 /* Tables are only used in DV and DR modes. */ 1149 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 1150 struct mlx5_dev_ctx_shared *sh = priv->sh; 1151 char s[MLX5_HLIST_NAMESIZE]; 1152 1153 MLX5_ASSERT(sh); 1154 snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); 1155 sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE, 1156 0, 0, flow_dv_tbl_create_cb, 1157 flow_dv_tbl_match_cb, 1158 flow_dv_tbl_remove_cb); 1159 if (!sh->flow_tbls) { 1160 DRV_LOG(ERR, "flow tables with hash creation failed."); 1161 err = ENOMEM; 1162 return err; 1163 } 1164 sh->flow_tbls->ctx = sh; 1165 #ifndef HAVE_MLX5DV_DR 1166 struct rte_flow_error error; 1167 struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id]; 1168 1169 /* 1170 * In case we have not DR support, the zero tables should be created 1171 * because DV expect to see them even if they cannot be created by 1172 * RDMA-CORE. 1173 */ 1174 if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, NULL, 0, 1, &error) || 1175 !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, NULL, 0, 1, &error) || 1176 !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, NULL, 0, 1, &error)) { 1177 err = ENOMEM; 1178 goto error; 1179 } 1180 return err; 1181 error: 1182 mlx5_free_table_hash_list(priv); 1183 #endif /* HAVE_MLX5DV_DR */ 1184 #endif 1185 return err; 1186 } 1187 1188 /** 1189 * Retrieve integer value from environment variable. 1190 * 1191 * @param[in] name 1192 * Environment variable name. 1193 * 1194 * @return 1195 * Integer value, 0 if the variable is not set. 1196 */ 1197 int 1198 mlx5_getenv_int(const char *name) 1199 { 1200 const char *val = getenv(name); 1201 1202 if (val == NULL) 1203 return 0; 1204 return atoi(val); 1205 } 1206 1207 /** 1208 * DPDK callback to add udp tunnel port 1209 * 1210 * @param[in] dev 1211 * A pointer to eth_dev 1212 * @param[in] udp_tunnel 1213 * A pointer to udp tunnel 1214 * 1215 * @return 1216 * 0 on valid udp ports and tunnels, -ENOTSUP otherwise. 1217 */ 1218 int 1219 mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused, 1220 struct rte_eth_udp_tunnel *udp_tunnel) 1221 { 1222 MLX5_ASSERT(udp_tunnel != NULL); 1223 if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN && 1224 udp_tunnel->udp_port == 4789) 1225 return 0; 1226 if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN_GPE && 1227 udp_tunnel->udp_port == 4790) 1228 return 0; 1229 return -ENOTSUP; 1230 } 1231 1232 /** 1233 * Initialize process private data structure. 1234 * 1235 * @param dev 1236 * Pointer to Ethernet device structure. 1237 * 1238 * @return 1239 * 0 on success, a negative errno value otherwise and rte_errno is set. 1240 */ 1241 int 1242 mlx5_proc_priv_init(struct rte_eth_dev *dev) 1243 { 1244 struct mlx5_priv *priv = dev->data->dev_private; 1245 struct mlx5_proc_priv *ppriv; 1246 size_t ppriv_size; 1247 1248 /* 1249 * UAR register table follows the process private structure. BlueFlame 1250 * registers for Tx queues are stored in the table. 1251 */ 1252 ppriv_size = 1253 sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); 1254 ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size, 1255 RTE_CACHE_LINE_SIZE, dev->device->numa_node); 1256 if (!ppriv) { 1257 rte_errno = ENOMEM; 1258 return -rte_errno; 1259 } 1260 ppriv->uar_table_sz = priv->txqs_n; 1261 dev->process_private = ppriv; 1262 return 0; 1263 } 1264 1265 /** 1266 * Un-initialize process private data structure. 1267 * 1268 * @param dev 1269 * Pointer to Ethernet device structure. 1270 */ 1271 void 1272 mlx5_proc_priv_uninit(struct rte_eth_dev *dev) 1273 { 1274 if (!dev->process_private) 1275 return; 1276 mlx5_free(dev->process_private); 1277 dev->process_private = NULL; 1278 } 1279 1280 /** 1281 * DPDK callback to close the device. 1282 * 1283 * Destroy all queues and objects, free memory. 1284 * 1285 * @param dev 1286 * Pointer to Ethernet device structure. 1287 */ 1288 int 1289 mlx5_dev_close(struct rte_eth_dev *dev) 1290 { 1291 struct mlx5_priv *priv = dev->data->dev_private; 1292 unsigned int i; 1293 int ret; 1294 1295 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1296 /* Check if process_private released. */ 1297 if (!dev->process_private) 1298 return 0; 1299 mlx5_tx_uar_uninit_secondary(dev); 1300 mlx5_proc_priv_uninit(dev); 1301 rte_eth_dev_release_port(dev); 1302 return 0; 1303 } 1304 if (!priv->sh) 1305 return 0; 1306 DRV_LOG(DEBUG, "port %u closing device \"%s\"", 1307 dev->data->port_id, 1308 ((priv->sh->ctx != NULL) ? 1309 mlx5_os_get_ctx_device_name(priv->sh->ctx) : "")); 1310 /* 1311 * If default mreg copy action is removed at the stop stage, 1312 * the search will return none and nothing will be done anymore. 1313 */ 1314 mlx5_flow_stop_default(dev); 1315 mlx5_traffic_disable(dev); 1316 /* 1317 * If all the flows are already flushed in the device stop stage, 1318 * then this will return directly without any action. 1319 */ 1320 mlx5_flow_list_flush(dev, &priv->flows, true); 1321 mlx5_shared_action_flush(dev); 1322 mlx5_flow_meter_flush(dev, NULL); 1323 /* Prevent crashes when queues are still in use. */ 1324 dev->rx_pkt_burst = removed_rx_burst; 1325 dev->tx_pkt_burst = removed_tx_burst; 1326 rte_wmb(); 1327 /* Disable datapath on secondary process. */ 1328 mlx5_mp_os_req_stop_rxtx(dev); 1329 /* Free the eCPRI flex parser resource. */ 1330 mlx5_flex_parser_ecpri_release(dev); 1331 if (priv->rxqs != NULL) { 1332 /* XXX race condition if mlx5_rx_burst() is still running. */ 1333 rte_delay_us_sleep(1000); 1334 for (i = 0; (i != priv->rxqs_n); ++i) 1335 mlx5_rxq_release(dev, i); 1336 priv->rxqs_n = 0; 1337 priv->rxqs = NULL; 1338 } 1339 if (priv->txqs != NULL) { 1340 /* XXX race condition if mlx5_tx_burst() is still running. */ 1341 rte_delay_us_sleep(1000); 1342 for (i = 0; (i != priv->txqs_n); ++i) 1343 mlx5_txq_release(dev, i); 1344 priv->txqs_n = 0; 1345 priv->txqs = NULL; 1346 } 1347 mlx5_proc_priv_uninit(dev); 1348 if (priv->q_counters) { 1349 mlx5_devx_cmd_destroy(priv->q_counters); 1350 priv->q_counters = NULL; 1351 } 1352 if (priv->drop_queue.hrxq) 1353 mlx5_drop_action_destroy(dev); 1354 if (priv->mreg_cp_tbl) 1355 mlx5_hlist_destroy(priv->mreg_cp_tbl); 1356 mlx5_mprq_free_mp(dev); 1357 mlx5_os_free_shared_dr(priv); 1358 if (priv->rss_conf.rss_key != NULL) 1359 mlx5_free(priv->rss_conf.rss_key); 1360 if (priv->reta_idx != NULL) 1361 mlx5_free(priv->reta_idx); 1362 if (priv->config.vf) 1363 mlx5_os_mac_addr_flush(dev); 1364 if (priv->nl_socket_route >= 0) 1365 close(priv->nl_socket_route); 1366 if (priv->nl_socket_rdma >= 0) 1367 close(priv->nl_socket_rdma); 1368 if (priv->vmwa_context) 1369 mlx5_vlan_vmwa_exit(priv->vmwa_context); 1370 ret = mlx5_hrxq_verify(dev); 1371 if (ret) 1372 DRV_LOG(WARNING, "port %u some hash Rx queue still remain", 1373 dev->data->port_id); 1374 ret = mlx5_ind_table_obj_verify(dev); 1375 if (ret) 1376 DRV_LOG(WARNING, "port %u some indirection table still remain", 1377 dev->data->port_id); 1378 ret = mlx5_rxq_obj_verify(dev); 1379 if (ret) 1380 DRV_LOG(WARNING, "port %u some Rx queue objects still remain", 1381 dev->data->port_id); 1382 ret = mlx5_rxq_verify(dev); 1383 if (ret) 1384 DRV_LOG(WARNING, "port %u some Rx queues still remain", 1385 dev->data->port_id); 1386 ret = mlx5_txq_obj_verify(dev); 1387 if (ret) 1388 DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain", 1389 dev->data->port_id); 1390 ret = mlx5_txq_verify(dev); 1391 if (ret) 1392 DRV_LOG(WARNING, "port %u some Tx queues still remain", 1393 dev->data->port_id); 1394 ret = mlx5_flow_verify(dev); 1395 if (ret) 1396 DRV_LOG(WARNING, "port %u some flows still remain", 1397 dev->data->port_id); 1398 mlx5_cache_list_destroy(&priv->hrxqs); 1399 /* 1400 * Free the shared context in last turn, because the cleanup 1401 * routines above may use some shared fields, like 1402 * mlx5_os_mac_addr_flush() uses ibdev_path for retrieveing 1403 * ifindex if Netlink fails. 1404 */ 1405 mlx5_free_shared_dev_ctx(priv->sh); 1406 if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 1407 unsigned int c = 0; 1408 uint16_t port_id; 1409 1410 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 1411 struct mlx5_priv *opriv = 1412 rte_eth_devices[port_id].data->dev_private; 1413 1414 if (!opriv || 1415 opriv->domain_id != priv->domain_id || 1416 &rte_eth_devices[port_id] == dev) 1417 continue; 1418 ++c; 1419 break; 1420 } 1421 if (!c) 1422 claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 1423 } 1424 memset(priv, 0, sizeof(*priv)); 1425 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 1426 /* 1427 * Reset mac_addrs to NULL such that it is not freed as part of 1428 * rte_eth_dev_release_port(). mac_addrs is part of dev_private so 1429 * it is freed when dev_private is freed. 1430 */ 1431 dev->data->mac_addrs = NULL; 1432 return 0; 1433 } 1434 1435 const struct eth_dev_ops mlx5_dev_ops = { 1436 .dev_configure = mlx5_dev_configure, 1437 .dev_start = mlx5_dev_start, 1438 .dev_stop = mlx5_dev_stop, 1439 .dev_set_link_down = mlx5_set_link_down, 1440 .dev_set_link_up = mlx5_set_link_up, 1441 .dev_close = mlx5_dev_close, 1442 .promiscuous_enable = mlx5_promiscuous_enable, 1443 .promiscuous_disable = mlx5_promiscuous_disable, 1444 .allmulticast_enable = mlx5_allmulticast_enable, 1445 .allmulticast_disable = mlx5_allmulticast_disable, 1446 .link_update = mlx5_link_update, 1447 .stats_get = mlx5_stats_get, 1448 .stats_reset = mlx5_stats_reset, 1449 .xstats_get = mlx5_xstats_get, 1450 .xstats_reset = mlx5_xstats_reset, 1451 .xstats_get_names = mlx5_xstats_get_names, 1452 .fw_version_get = mlx5_fw_version_get, 1453 .dev_infos_get = mlx5_dev_infos_get, 1454 .read_clock = mlx5_txpp_read_clock, 1455 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 1456 .vlan_filter_set = mlx5_vlan_filter_set, 1457 .rx_queue_setup = mlx5_rx_queue_setup, 1458 .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 1459 .tx_queue_setup = mlx5_tx_queue_setup, 1460 .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 1461 .rx_queue_release = mlx5_rx_queue_release, 1462 .tx_queue_release = mlx5_tx_queue_release, 1463 .rx_queue_start = mlx5_rx_queue_start, 1464 .rx_queue_stop = mlx5_rx_queue_stop, 1465 .tx_queue_start = mlx5_tx_queue_start, 1466 .tx_queue_stop = mlx5_tx_queue_stop, 1467 .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 1468 .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 1469 .mac_addr_remove = mlx5_mac_addr_remove, 1470 .mac_addr_add = mlx5_mac_addr_add, 1471 .mac_addr_set = mlx5_mac_addr_set, 1472 .set_mc_addr_list = mlx5_set_mc_addr_list, 1473 .mtu_set = mlx5_dev_set_mtu, 1474 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 1475 .vlan_offload_set = mlx5_vlan_offload_set, 1476 .reta_update = mlx5_dev_rss_reta_update, 1477 .reta_query = mlx5_dev_rss_reta_query, 1478 .rss_hash_update = mlx5_rss_hash_update, 1479 .rss_hash_conf_get = mlx5_rss_hash_conf_get, 1480 .filter_ctrl = mlx5_dev_filter_ctrl, 1481 .rxq_info_get = mlx5_rxq_info_get, 1482 .txq_info_get = mlx5_txq_info_get, 1483 .rx_burst_mode_get = mlx5_rx_burst_mode_get, 1484 .tx_burst_mode_get = mlx5_tx_burst_mode_get, 1485 .rx_queue_intr_enable = mlx5_rx_intr_enable, 1486 .rx_queue_intr_disable = mlx5_rx_intr_disable, 1487 .is_removed = mlx5_is_removed, 1488 .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, 1489 .get_module_info = mlx5_get_module_info, 1490 .get_module_eeprom = mlx5_get_module_eeprom, 1491 .hairpin_cap_get = mlx5_hairpin_cap_get, 1492 .mtr_ops_get = mlx5_flow_meter_ops_get, 1493 .hairpin_bind = mlx5_hairpin_bind, 1494 .hairpin_unbind = mlx5_hairpin_unbind, 1495 .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, 1496 .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, 1497 .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, 1498 .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, 1499 }; 1500 1501 /* Available operations from secondary process. */ 1502 const struct eth_dev_ops mlx5_dev_sec_ops = { 1503 .stats_get = mlx5_stats_get, 1504 .stats_reset = mlx5_stats_reset, 1505 .xstats_get = mlx5_xstats_get, 1506 .xstats_reset = mlx5_xstats_reset, 1507 .xstats_get_names = mlx5_xstats_get_names, 1508 .fw_version_get = mlx5_fw_version_get, 1509 .dev_infos_get = mlx5_dev_infos_get, 1510 .read_clock = mlx5_txpp_read_clock, 1511 .rx_queue_start = mlx5_rx_queue_start, 1512 .rx_queue_stop = mlx5_rx_queue_stop, 1513 .tx_queue_start = mlx5_tx_queue_start, 1514 .tx_queue_stop = mlx5_tx_queue_stop, 1515 .rxq_info_get = mlx5_rxq_info_get, 1516 .txq_info_get = mlx5_txq_info_get, 1517 .rx_burst_mode_get = mlx5_rx_burst_mode_get, 1518 .tx_burst_mode_get = mlx5_tx_burst_mode_get, 1519 .get_module_info = mlx5_get_module_info, 1520 .get_module_eeprom = mlx5_get_module_eeprom, 1521 }; 1522 1523 /* Available operations in flow isolated mode. */ 1524 const struct eth_dev_ops mlx5_dev_ops_isolate = { 1525 .dev_configure = mlx5_dev_configure, 1526 .dev_start = mlx5_dev_start, 1527 .dev_stop = mlx5_dev_stop, 1528 .dev_set_link_down = mlx5_set_link_down, 1529 .dev_set_link_up = mlx5_set_link_up, 1530 .dev_close = mlx5_dev_close, 1531 .promiscuous_enable = mlx5_promiscuous_enable, 1532 .promiscuous_disable = mlx5_promiscuous_disable, 1533 .allmulticast_enable = mlx5_allmulticast_enable, 1534 .allmulticast_disable = mlx5_allmulticast_disable, 1535 .link_update = mlx5_link_update, 1536 .stats_get = mlx5_stats_get, 1537 .stats_reset = mlx5_stats_reset, 1538 .xstats_get = mlx5_xstats_get, 1539 .xstats_reset = mlx5_xstats_reset, 1540 .xstats_get_names = mlx5_xstats_get_names, 1541 .fw_version_get = mlx5_fw_version_get, 1542 .dev_infos_get = mlx5_dev_infos_get, 1543 .read_clock = mlx5_txpp_read_clock, 1544 .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, 1545 .vlan_filter_set = mlx5_vlan_filter_set, 1546 .rx_queue_setup = mlx5_rx_queue_setup, 1547 .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, 1548 .tx_queue_setup = mlx5_tx_queue_setup, 1549 .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, 1550 .rx_queue_release = mlx5_rx_queue_release, 1551 .tx_queue_release = mlx5_tx_queue_release, 1552 .rx_queue_start = mlx5_rx_queue_start, 1553 .rx_queue_stop = mlx5_rx_queue_stop, 1554 .tx_queue_start = mlx5_tx_queue_start, 1555 .tx_queue_stop = mlx5_tx_queue_stop, 1556 .flow_ctrl_get = mlx5_dev_get_flow_ctrl, 1557 .flow_ctrl_set = mlx5_dev_set_flow_ctrl, 1558 .mac_addr_remove = mlx5_mac_addr_remove, 1559 .mac_addr_add = mlx5_mac_addr_add, 1560 .mac_addr_set = mlx5_mac_addr_set, 1561 .set_mc_addr_list = mlx5_set_mc_addr_list, 1562 .mtu_set = mlx5_dev_set_mtu, 1563 .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, 1564 .vlan_offload_set = mlx5_vlan_offload_set, 1565 .filter_ctrl = mlx5_dev_filter_ctrl, 1566 .rxq_info_get = mlx5_rxq_info_get, 1567 .txq_info_get = mlx5_txq_info_get, 1568 .rx_burst_mode_get = mlx5_rx_burst_mode_get, 1569 .tx_burst_mode_get = mlx5_tx_burst_mode_get, 1570 .rx_queue_intr_enable = mlx5_rx_intr_enable, 1571 .rx_queue_intr_disable = mlx5_rx_intr_disable, 1572 .is_removed = mlx5_is_removed, 1573 .get_module_info = mlx5_get_module_info, 1574 .get_module_eeprom = mlx5_get_module_eeprom, 1575 .hairpin_cap_get = mlx5_hairpin_cap_get, 1576 .mtr_ops_get = mlx5_flow_meter_ops_get, 1577 .hairpin_bind = mlx5_hairpin_bind, 1578 .hairpin_unbind = mlx5_hairpin_unbind, 1579 .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, 1580 .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, 1581 .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, 1582 .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, 1583 }; 1584 1585 /** 1586 * Verify and store value for device argument. 1587 * 1588 * @param[in] key 1589 * Key argument to verify. 1590 * @param[in] val 1591 * Value associated with key. 1592 * @param opaque 1593 * User data. 1594 * 1595 * @return 1596 * 0 on success, a negative errno value otherwise and rte_errno is set. 1597 */ 1598 static int 1599 mlx5_args_check(const char *key, const char *val, void *opaque) 1600 { 1601 struct mlx5_dev_config *config = opaque; 1602 unsigned long mod; 1603 signed long tmp; 1604 1605 /* No-op, port representors are processed in mlx5_dev_spawn(). */ 1606 if (!strcmp(MLX5_REPRESENTOR, key)) 1607 return 0; 1608 errno = 0; 1609 tmp = strtol(val, NULL, 0); 1610 if (errno) { 1611 rte_errno = errno; 1612 DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val); 1613 return -rte_errno; 1614 } 1615 if (tmp < 0 && strcmp(MLX5_TX_PP, key) && strcmp(MLX5_TX_SKEW, key)) { 1616 /* Negative values are acceptable for some keys only. */ 1617 rte_errno = EINVAL; 1618 DRV_LOG(WARNING, "%s: invalid negative value \"%s\"", key, val); 1619 return -rte_errno; 1620 } 1621 mod = tmp >= 0 ? tmp : -tmp; 1622 if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { 1623 if (tmp > MLX5_CQE_RESP_FORMAT_L34H_STRIDX) { 1624 DRV_LOG(ERR, "invalid CQE compression " 1625 "format parameter"); 1626 rte_errno = EINVAL; 1627 return -rte_errno; 1628 } 1629 config->cqe_comp = !!tmp; 1630 config->cqe_comp_fmt = tmp; 1631 } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { 1632 config->hw_padding = !!tmp; 1633 } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { 1634 config->mprq.enabled = !!tmp; 1635 } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) { 1636 config->mprq.stride_num_n = tmp; 1637 } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_SIZE, key) == 0) { 1638 config->mprq.stride_size_n = tmp; 1639 } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) { 1640 config->mprq.max_memcpy_len = tmp; 1641 } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) { 1642 config->mprq.min_rxqs_num = tmp; 1643 } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { 1644 DRV_LOG(WARNING, "%s: deprecated parameter," 1645 " converted to txq_inline_max", key); 1646 config->txq_inline_max = tmp; 1647 } else if (strcmp(MLX5_TXQ_INLINE_MAX, key) == 0) { 1648 config->txq_inline_max = tmp; 1649 } else if (strcmp(MLX5_TXQ_INLINE_MIN, key) == 0) { 1650 config->txq_inline_min = tmp; 1651 } else if (strcmp(MLX5_TXQ_INLINE_MPW, key) == 0) { 1652 config->txq_inline_mpw = tmp; 1653 } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { 1654 config->txqs_inline = tmp; 1655 } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) { 1656 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1657 } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { 1658 config->mps = !!tmp; 1659 } else if (strcmp(MLX5_TX_DB_NC, key) == 0) { 1660 if (tmp != MLX5_TXDB_CACHED && 1661 tmp != MLX5_TXDB_NCACHED && 1662 tmp != MLX5_TXDB_HEURISTIC) { 1663 DRV_LOG(ERR, "invalid Tx doorbell " 1664 "mapping parameter"); 1665 rte_errno = EINVAL; 1666 return -rte_errno; 1667 } 1668 config->dbnc = tmp; 1669 } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) { 1670 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1671 } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { 1672 DRV_LOG(WARNING, "%s: deprecated parameter," 1673 " converted to txq_inline_mpw", key); 1674 config->txq_inline_mpw = tmp; 1675 } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { 1676 DRV_LOG(WARNING, "%s: deprecated parameter, ignored", key); 1677 } else if (strcmp(MLX5_TX_PP, key) == 0) { 1678 if (!mod) { 1679 DRV_LOG(ERR, "Zero Tx packet pacing parameter"); 1680 rte_errno = EINVAL; 1681 return -rte_errno; 1682 } 1683 config->tx_pp = tmp; 1684 } else if (strcmp(MLX5_TX_SKEW, key) == 0) { 1685 config->tx_skew = tmp; 1686 } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { 1687 config->rx_vec_en = !!tmp; 1688 } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) { 1689 config->l3_vxlan_en = !!tmp; 1690 } else if (strcmp(MLX5_VF_NL_EN, key) == 0) { 1691 config->vf_nl_en = !!tmp; 1692 } else if (strcmp(MLX5_DV_ESW_EN, key) == 0) { 1693 config->dv_esw_en = !!tmp; 1694 } else if (strcmp(MLX5_DV_FLOW_EN, key) == 0) { 1695 config->dv_flow_en = !!tmp; 1696 } else if (strcmp(MLX5_DV_XMETA_EN, key) == 0) { 1697 if (tmp != MLX5_XMETA_MODE_LEGACY && 1698 tmp != MLX5_XMETA_MODE_META16 && 1699 tmp != MLX5_XMETA_MODE_META32 && 1700 tmp != MLX5_XMETA_MODE_MISS_INFO) { 1701 DRV_LOG(ERR, "invalid extensive " 1702 "metadata parameter"); 1703 rte_errno = EINVAL; 1704 return -rte_errno; 1705 } 1706 if (tmp != MLX5_XMETA_MODE_MISS_INFO) 1707 config->dv_xmeta_en = tmp; 1708 else 1709 config->dv_miss_info = 1; 1710 } else if (strcmp(MLX5_LACP_BY_USER, key) == 0) { 1711 config->lacp_by_user = !!tmp; 1712 } else if (strcmp(MLX5_MR_EXT_MEMSEG_EN, key) == 0) { 1713 config->mr_ext_memseg_en = !!tmp; 1714 } else if (strcmp(MLX5_MAX_DUMP_FILES_NUM, key) == 0) { 1715 config->max_dump_files_num = tmp; 1716 } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { 1717 config->lro.timeout = tmp; 1718 } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) { 1719 DRV_LOG(DEBUG, "class argument is %s.", val); 1720 } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) { 1721 config->log_hp_size = tmp; 1722 } else if (strcmp(MLX5_RECLAIM_MEM, key) == 0) { 1723 if (tmp != MLX5_RCM_NONE && 1724 tmp != MLX5_RCM_LIGHT && 1725 tmp != MLX5_RCM_AGGR) { 1726 DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val); 1727 rte_errno = EINVAL; 1728 return -rte_errno; 1729 } 1730 config->reclaim_mode = tmp; 1731 } else if (strcmp(MLX5_SYS_MEM_EN, key) == 0) { 1732 config->sys_mem_en = !!tmp; 1733 } else if (strcmp(MLX5_DECAP_EN, key) == 0) { 1734 config->decap_en = !!tmp; 1735 } else { 1736 DRV_LOG(WARNING, "%s: unknown parameter", key); 1737 rte_errno = EINVAL; 1738 return -rte_errno; 1739 } 1740 return 0; 1741 } 1742 1743 /** 1744 * Parse device parameters. 1745 * 1746 * @param config 1747 * Pointer to device configuration structure. 1748 * @param devargs 1749 * Device arguments structure. 1750 * 1751 * @return 1752 * 0 on success, a negative errno value otherwise and rte_errno is set. 1753 */ 1754 int 1755 mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) 1756 { 1757 const char **params = (const char *[]){ 1758 MLX5_RXQ_CQE_COMP_EN, 1759 MLX5_RXQ_PKT_PAD_EN, 1760 MLX5_RX_MPRQ_EN, 1761 MLX5_RX_MPRQ_LOG_STRIDE_NUM, 1762 MLX5_RX_MPRQ_LOG_STRIDE_SIZE, 1763 MLX5_RX_MPRQ_MAX_MEMCPY_LEN, 1764 MLX5_RXQS_MIN_MPRQ, 1765 MLX5_TXQ_INLINE, 1766 MLX5_TXQ_INLINE_MIN, 1767 MLX5_TXQ_INLINE_MAX, 1768 MLX5_TXQ_INLINE_MPW, 1769 MLX5_TXQS_MIN_INLINE, 1770 MLX5_TXQS_MAX_VEC, 1771 MLX5_TXQ_MPW_EN, 1772 MLX5_TXQ_MPW_HDR_DSEG_EN, 1773 MLX5_TXQ_MAX_INLINE_LEN, 1774 MLX5_TX_DB_NC, 1775 MLX5_TX_PP, 1776 MLX5_TX_SKEW, 1777 MLX5_TX_VEC_EN, 1778 MLX5_RX_VEC_EN, 1779 MLX5_L3_VXLAN_EN, 1780 MLX5_VF_NL_EN, 1781 MLX5_DV_ESW_EN, 1782 MLX5_DV_FLOW_EN, 1783 MLX5_DV_XMETA_EN, 1784 MLX5_LACP_BY_USER, 1785 MLX5_MR_EXT_MEMSEG_EN, 1786 MLX5_REPRESENTOR, 1787 MLX5_MAX_DUMP_FILES_NUM, 1788 MLX5_LRO_TIMEOUT_USEC, 1789 MLX5_CLASS_ARG_NAME, 1790 MLX5_HP_BUF_SIZE, 1791 MLX5_RECLAIM_MEM, 1792 MLX5_SYS_MEM_EN, 1793 MLX5_DECAP_EN, 1794 NULL, 1795 }; 1796 struct rte_kvargs *kvlist; 1797 int ret = 0; 1798 int i; 1799 1800 if (devargs == NULL) 1801 return 0; 1802 /* Following UGLY cast is done to pass checkpatch. */ 1803 kvlist = rte_kvargs_parse(devargs->args, params); 1804 if (kvlist == NULL) { 1805 rte_errno = EINVAL; 1806 return -rte_errno; 1807 } 1808 /* Process parameters. */ 1809 for (i = 0; (params[i] != NULL); ++i) { 1810 if (rte_kvargs_count(kvlist, params[i])) { 1811 ret = rte_kvargs_process(kvlist, params[i], 1812 mlx5_args_check, config); 1813 if (ret) { 1814 rte_errno = EINVAL; 1815 rte_kvargs_free(kvlist); 1816 return -rte_errno; 1817 } 1818 } 1819 } 1820 rte_kvargs_free(kvlist); 1821 return 0; 1822 } 1823 1824 /** 1825 * Configures the minimal amount of data to inline into WQE 1826 * while sending packets. 1827 * 1828 * - the txq_inline_min has the maximal priority, if this 1829 * key is specified in devargs 1830 * - if DevX is enabled the inline mode is queried from the 1831 * device (HCA attributes and NIC vport context if needed). 1832 * - otherwise L2 mode (18 bytes) is assumed for ConnectX-4/4 Lx 1833 * and none (0 bytes) for other NICs 1834 * 1835 * @param spawn 1836 * Verbs device parameters (name, port, switch_info) to spawn. 1837 * @param config 1838 * Device configuration parameters. 1839 */ 1840 void 1841 mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, 1842 struct mlx5_dev_config *config) 1843 { 1844 if (config->txq_inline_min != MLX5_ARG_UNSET) { 1845 /* Application defines size of inlined data explicitly. */ 1846 switch (spawn->pci_dev->id.device_id) { 1847 case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 1848 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 1849 if (config->txq_inline_min < 1850 (int)MLX5_INLINE_HSIZE_L2) { 1851 DRV_LOG(DEBUG, 1852 "txq_inline_mix aligned to minimal" 1853 " ConnectX-4 required value %d", 1854 (int)MLX5_INLINE_HSIZE_L2); 1855 config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 1856 } 1857 break; 1858 } 1859 goto exit; 1860 } 1861 if (config->hca_attr.eth_net_offloads) { 1862 /* We have DevX enabled, inline mode queried successfully. */ 1863 switch (config->hca_attr.wqe_inline_mode) { 1864 case MLX5_CAP_INLINE_MODE_L2: 1865 /* outer L2 header must be inlined. */ 1866 config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 1867 goto exit; 1868 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: 1869 /* No inline data are required by NIC. */ 1870 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 1871 config->hw_vlan_insert = 1872 config->hca_attr.wqe_vlan_insert; 1873 DRV_LOG(DEBUG, "Tx VLAN insertion is supported"); 1874 goto exit; 1875 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: 1876 /* inline mode is defined by NIC vport context. */ 1877 if (!config->hca_attr.eth_virt) 1878 break; 1879 switch (config->hca_attr.vport_inline_mode) { 1880 case MLX5_INLINE_MODE_NONE: 1881 config->txq_inline_min = 1882 MLX5_INLINE_HSIZE_NONE; 1883 goto exit; 1884 case MLX5_INLINE_MODE_L2: 1885 config->txq_inline_min = 1886 MLX5_INLINE_HSIZE_L2; 1887 goto exit; 1888 case MLX5_INLINE_MODE_IP: 1889 config->txq_inline_min = 1890 MLX5_INLINE_HSIZE_L3; 1891 goto exit; 1892 case MLX5_INLINE_MODE_TCP_UDP: 1893 config->txq_inline_min = 1894 MLX5_INLINE_HSIZE_L4; 1895 goto exit; 1896 case MLX5_INLINE_MODE_INNER_L2: 1897 config->txq_inline_min = 1898 MLX5_INLINE_HSIZE_INNER_L2; 1899 goto exit; 1900 case MLX5_INLINE_MODE_INNER_IP: 1901 config->txq_inline_min = 1902 MLX5_INLINE_HSIZE_INNER_L3; 1903 goto exit; 1904 case MLX5_INLINE_MODE_INNER_TCP_UDP: 1905 config->txq_inline_min = 1906 MLX5_INLINE_HSIZE_INNER_L4; 1907 goto exit; 1908 } 1909 } 1910 } 1911 /* 1912 * We get here if we are unable to deduce 1913 * inline data size with DevX. Try PCI ID 1914 * to determine old NICs. 1915 */ 1916 switch (spawn->pci_dev->id.device_id) { 1917 case PCI_DEVICE_ID_MELLANOX_CONNECTX4: 1918 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 1919 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: 1920 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1921 config->txq_inline_min = MLX5_INLINE_HSIZE_L2; 1922 config->hw_vlan_insert = 0; 1923 break; 1924 case PCI_DEVICE_ID_MELLANOX_CONNECTX5: 1925 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 1926 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: 1927 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 1928 /* 1929 * These NICs support VLAN insertion from WQE and 1930 * report the wqe_vlan_insert flag. But there is the bug 1931 * and PFC control may be broken, so disable feature. 1932 */ 1933 config->hw_vlan_insert = 0; 1934 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 1935 break; 1936 default: 1937 config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; 1938 break; 1939 } 1940 exit: 1941 DRV_LOG(DEBUG, "min tx inline configured: %d", config->txq_inline_min); 1942 } 1943 1944 /** 1945 * Configures the metadata mask fields in the shared context. 1946 * 1947 * @param [in] dev 1948 * Pointer to Ethernet device. 1949 */ 1950 void 1951 mlx5_set_metadata_mask(struct rte_eth_dev *dev) 1952 { 1953 struct mlx5_priv *priv = dev->data->dev_private; 1954 struct mlx5_dev_ctx_shared *sh = priv->sh; 1955 uint32_t meta, mark, reg_c0; 1956 1957 reg_c0 = ~priv->vport_meta_mask; 1958 switch (priv->config.dv_xmeta_en) { 1959 case MLX5_XMETA_MODE_LEGACY: 1960 meta = UINT32_MAX; 1961 mark = MLX5_FLOW_MARK_MASK; 1962 break; 1963 case MLX5_XMETA_MODE_META16: 1964 meta = reg_c0 >> rte_bsf32(reg_c0); 1965 mark = MLX5_FLOW_MARK_MASK; 1966 break; 1967 case MLX5_XMETA_MODE_META32: 1968 meta = UINT32_MAX; 1969 mark = (reg_c0 >> rte_bsf32(reg_c0)) & MLX5_FLOW_MARK_MASK; 1970 break; 1971 default: 1972 meta = 0; 1973 mark = 0; 1974 MLX5_ASSERT(false); 1975 break; 1976 } 1977 if (sh->dv_mark_mask && sh->dv_mark_mask != mark) 1978 DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X", 1979 sh->dv_mark_mask, mark); 1980 else 1981 sh->dv_mark_mask = mark; 1982 if (sh->dv_meta_mask && sh->dv_meta_mask != meta) 1983 DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X", 1984 sh->dv_meta_mask, meta); 1985 else 1986 sh->dv_meta_mask = meta; 1987 if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0) 1988 DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X", 1989 sh->dv_meta_mask, reg_c0); 1990 else 1991 sh->dv_regc0_mask = reg_c0; 1992 DRV_LOG(DEBUG, "metadata mode %u", priv->config.dv_xmeta_en); 1993 DRV_LOG(DEBUG, "metadata MARK mask %08X", sh->dv_mark_mask); 1994 DRV_LOG(DEBUG, "metadata META mask %08X", sh->dv_meta_mask); 1995 DRV_LOG(DEBUG, "metadata reg_c0 mask %08X", sh->dv_regc0_mask); 1996 } 1997 1998 int 1999 rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) 2000 { 2001 static const char *const dynf_names[] = { 2002 RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, 2003 RTE_MBUF_DYNFLAG_METADATA_NAME, 2004 RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME 2005 }; 2006 unsigned int i; 2007 2008 if (n < RTE_DIM(dynf_names)) 2009 return -ENOMEM; 2010 for (i = 0; i < RTE_DIM(dynf_names); i++) { 2011 if (names[i] == NULL) 2012 return -EINVAL; 2013 strcpy(names[i], dynf_names[i]); 2014 } 2015 return RTE_DIM(dynf_names); 2016 } 2017 2018 /** 2019 * Comparison callback to sort device data. 2020 * 2021 * This is meant to be used with qsort(). 2022 * 2023 * @param a[in] 2024 * Pointer to pointer to first data object. 2025 * @param b[in] 2026 * Pointer to pointer to second data object. 2027 * 2028 * @return 2029 * 0 if both objects are equal, less than 0 if the first argument is less 2030 * than the second, greater than 0 otherwise. 2031 */ 2032 int 2033 mlx5_dev_check_sibling_config(struct mlx5_priv *priv, 2034 struct mlx5_dev_config *config) 2035 { 2036 struct mlx5_dev_ctx_shared *sh = priv->sh; 2037 struct mlx5_dev_config *sh_conf = NULL; 2038 uint16_t port_id; 2039 2040 MLX5_ASSERT(sh); 2041 /* Nothing to compare for the single/first device. */ 2042 if (sh->refcnt == 1) 2043 return 0; 2044 /* Find the device with shared context. */ 2045 MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { 2046 struct mlx5_priv *opriv = 2047 rte_eth_devices[port_id].data->dev_private; 2048 2049 if (opriv && opriv != priv && opriv->sh == sh) { 2050 sh_conf = &opriv->config; 2051 break; 2052 } 2053 } 2054 if (!sh_conf) 2055 return 0; 2056 if (sh_conf->dv_flow_en ^ config->dv_flow_en) { 2057 DRV_LOG(ERR, "\"dv_flow_en\" configuration mismatch" 2058 " for shared %s context", sh->ibdev_name); 2059 rte_errno = EINVAL; 2060 return rte_errno; 2061 } 2062 if (sh_conf->dv_xmeta_en ^ config->dv_xmeta_en) { 2063 DRV_LOG(ERR, "\"dv_xmeta_en\" configuration mismatch" 2064 " for shared %s context", sh->ibdev_name); 2065 rte_errno = EINVAL; 2066 return rte_errno; 2067 } 2068 return 0; 2069 } 2070 2071 /** 2072 * Look for the ethernet device belonging to mlx5 driver. 2073 * 2074 * @param[in] port_id 2075 * port_id to start looking for device. 2076 * @param[in] pci_dev 2077 * Pointer to the hint PCI device. When device is being probed 2078 * the its siblings (master and preceding representors might 2079 * not have assigned driver yet (because the mlx5_os_pci_probe() 2080 * is not completed yet, for this case match on hint PCI 2081 * device may be used to detect sibling device. 2082 * 2083 * @return 2084 * port_id of found device, RTE_MAX_ETHPORT if not found. 2085 */ 2086 uint16_t 2087 mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev) 2088 { 2089 while (port_id < RTE_MAX_ETHPORTS) { 2090 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 2091 2092 if (dev->state != RTE_ETH_DEV_UNUSED && 2093 dev->device && 2094 (dev->device == &pci_dev->device || 2095 (dev->device->driver && 2096 dev->device->driver->name && 2097 !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME)))) 2098 break; 2099 port_id++; 2100 } 2101 if (port_id >= RTE_MAX_ETHPORTS) 2102 return RTE_MAX_ETHPORTS; 2103 return port_id; 2104 } 2105 2106 /** 2107 * DPDK callback to remove a PCI device. 2108 * 2109 * This function removes all Ethernet devices belong to a given PCI device. 2110 * 2111 * @param[in] pci_dev 2112 * Pointer to the PCI device. 2113 * 2114 * @return 2115 * 0 on success, the function cannot fail. 2116 */ 2117 static int 2118 mlx5_pci_remove(struct rte_pci_device *pci_dev) 2119 { 2120 uint16_t port_id; 2121 int ret = 0; 2122 2123 RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { 2124 /* 2125 * mlx5_dev_close() is not registered to secondary process, 2126 * call the close function explicitly for secondary process. 2127 */ 2128 if (rte_eal_process_type() == RTE_PROC_SECONDARY) 2129 ret |= mlx5_dev_close(&rte_eth_devices[port_id]); 2130 else 2131 ret |= rte_eth_dev_close(port_id); 2132 } 2133 return ret == 0 ? 0 : -EIO; 2134 } 2135 2136 static const struct rte_pci_id mlx5_pci_id_map[] = { 2137 { 2138 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2139 PCI_DEVICE_ID_MELLANOX_CONNECTX4) 2140 }, 2141 { 2142 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2143 PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) 2144 }, 2145 { 2146 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2147 PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) 2148 }, 2149 { 2150 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2151 PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) 2152 }, 2153 { 2154 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2155 PCI_DEVICE_ID_MELLANOX_CONNECTX5) 2156 }, 2157 { 2158 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2159 PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) 2160 }, 2161 { 2162 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2163 PCI_DEVICE_ID_MELLANOX_CONNECTX5EX) 2164 }, 2165 { 2166 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2167 PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF) 2168 }, 2169 { 2170 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2171 PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) 2172 }, 2173 { 2174 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2175 PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF) 2176 }, 2177 { 2178 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2179 PCI_DEVICE_ID_MELLANOX_CONNECTX6) 2180 }, 2181 { 2182 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2183 PCI_DEVICE_ID_MELLANOX_CONNECTX6VF) 2184 }, 2185 { 2186 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2187 PCI_DEVICE_ID_MELLANOX_CONNECTX6DX) 2188 }, 2189 { 2190 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2191 PCI_DEVICE_ID_MELLANOX_CONNECTXVF) 2192 }, 2193 { 2194 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2195 PCI_DEVICE_ID_MELLANOX_CONNECTX6DXBF) 2196 }, 2197 { 2198 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2199 PCI_DEVICE_ID_MELLANOX_CONNECTX6LX) 2200 }, 2201 { 2202 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2203 PCI_DEVICE_ID_MELLANOX_CONNECTX7) 2204 }, 2205 { 2206 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, 2207 PCI_DEVICE_ID_MELLANOX_CONNECTX7BF) 2208 }, 2209 { 2210 .vendor_id = 0 2211 } 2212 }; 2213 2214 static struct mlx5_pci_driver mlx5_driver = { 2215 .driver_class = MLX5_CLASS_NET, 2216 .pci_driver = { 2217 .driver = { 2218 .name = MLX5_DRIVER_NAME, 2219 }, 2220 .id_table = mlx5_pci_id_map, 2221 .probe = mlx5_os_pci_probe, 2222 .remove = mlx5_pci_remove, 2223 .dma_map = mlx5_dma_map, 2224 .dma_unmap = mlx5_dma_unmap, 2225 .drv_flags = PCI_DRV_FLAGS, 2226 }, 2227 }; 2228 2229 /* Initialize driver log type. */ 2230 RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE) 2231 2232 /** 2233 * Driver initialization routine. 2234 */ 2235 RTE_INIT(rte_mlx5_pmd_init) 2236 { 2237 pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL); 2238 mlx5_common_init(); 2239 /* Build the static tables for Verbs conversion. */ 2240 mlx5_set_ptype_table(); 2241 mlx5_set_cksum_table(); 2242 mlx5_set_swp_types_table(); 2243 if (mlx5_glue) 2244 mlx5_pci_driver_register(&mlx5_driver); 2245 } 2246 2247 RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); 2248 RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); 2249 RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); 2250