1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <errno.h> 6 #include <stdalign.h> 7 #include <stddef.h> 8 #include <stdint.h> 9 #include <stdlib.h> 10 11 #include <rte_windows.h> 12 #include <ethdev_pci.h> 13 14 #include <mlx5_glue.h> 15 #include <mlx5_devx_cmds.h> 16 #include <mlx5_common.h> 17 #include <mlx5_common_mp.h> 18 #include <mlx5_common_mr.h> 19 #include <mlx5_malloc.h> 20 21 #include "mlx5_defs.h" 22 #include "mlx5.h" 23 #include "mlx5_common_os.h" 24 #include "mlx5_utils.h" 25 #include "mlx5_rxtx.h" 26 #include "mlx5_rx.h" 27 #include "mlx5_tx.h" 28 #include "mlx5_autoconf.h" 29 #include "mlx5_mr.h" 30 #include "mlx5_flow.h" 31 #include "mlx5_devx.h" 32 33 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 34 35 /* Spinlock for mlx5_shared_data allocation. */ 36 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 37 38 /* rte flow indexed pool configuration. */ 39 static struct mlx5_indexed_pool_config icfg[] = { 40 { 41 .size = sizeof(struct rte_flow), 42 .trunk_size = 64, 43 .need_lock = 1, 44 .release_mem_en = 0, 45 .malloc = mlx5_malloc, 46 .free = mlx5_free, 47 .per_core_cache = 0, 48 .type = "ctl_flow_ipool", 49 }, 50 { 51 .size = sizeof(struct rte_flow), 52 .trunk_size = 64, 53 .grow_trunk = 3, 54 .grow_shift = 2, 55 .need_lock = 1, 56 .release_mem_en = 0, 57 .malloc = mlx5_malloc, 58 .free = mlx5_free, 59 .per_core_cache = 1 << 14, 60 .type = "rte_flow_ipool", 61 }, 62 { 63 .size = sizeof(struct rte_flow), 64 .trunk_size = 64, 65 .grow_trunk = 3, 66 .grow_shift = 2, 67 .need_lock = 1, 68 .release_mem_en = 0, 69 .malloc = mlx5_malloc, 70 .free = mlx5_free, 71 .per_core_cache = 0, 72 .type = "mcp_flow_ipool", 73 }, 74 }; 75 76 /** 77 * Initialize shared data between primary and secondary process. 78 * 79 * A memzone is reserved by primary process and secondary processes attach to 80 * the memzone. 81 * 82 * @return 83 * 0 on success, a negative errno value otherwise and rte_errno is set. 84 */ 85 static int 86 mlx5_init_shared_data(void) 87 { 88 const struct rte_memzone *mz; 89 int ret = 0; 90 91 rte_spinlock_lock(&mlx5_shared_data_lock); 92 if (mlx5_shared_data == NULL) { 93 /* Allocate shared memory. */ 94 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 95 sizeof(*mlx5_shared_data), 96 SOCKET_ID_ANY, 0); 97 if (mz == NULL) { 98 DRV_LOG(ERR, 99 "Cannot allocate mlx5 shared data"); 100 ret = -rte_errno; 101 goto error; 102 } 103 mlx5_shared_data = mz->addr; 104 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 105 rte_spinlock_init(&mlx5_shared_data->lock); 106 } 107 error: 108 rte_spinlock_unlock(&mlx5_shared_data_lock); 109 return ret; 110 } 111 112 /** 113 * PMD global initialization. 114 * 115 * Independent from individual device, this function initializes global 116 * per-PMD data structures distinguishing primary and secondary processes. 117 * Hence, each initialization is called once per a process. 118 * 119 * @return 120 * 0 on success, a negative errno value otherwise and rte_errno is set. 121 */ 122 static int 123 mlx5_init_once(void) 124 { 125 if (mlx5_init_shared_data()) 126 return -rte_errno; 127 return 0; 128 } 129 130 /** 131 * Get mlx5 device attributes. 132 * 133 * @param ctx 134 * Pointer to device context. 135 * 136 * @param device_attr 137 * Pointer to mlx5 device attributes. 138 * 139 * @return 140 * 0 on success, non zero error number otherwise 141 */ 142 int 143 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr) 144 { 145 struct mlx5_context *mlx5_ctx; 146 struct mlx5_hca_attr hca_attr; 147 void *pv_iseg = NULL; 148 u32 cb_iseg = 0; 149 int err = 0; 150 151 if (!ctx) 152 return -EINVAL; 153 mlx5_ctx = (struct mlx5_context *)ctx; 154 memset(device_attr, 0, sizeof(*device_attr)); 155 err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr); 156 if (err) { 157 DRV_LOG(ERR, "Failed to get device hca_cap"); 158 return err; 159 } 160 device_attr->max_cq = 1 << hca_attr.log_max_cq; 161 device_attr->max_qp = 1 << hca_attr.log_max_qp; 162 device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz; 163 device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz; 164 device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz; 165 device_attr->max_pd = 1 << hca_attr.log_max_pd; 166 device_attr->max_srq = 1 << hca_attr.log_max_srq; 167 device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz; 168 if (hca_attr.rss_ind_tbl_cap) { 169 device_attr->max_rwq_indirection_table_size = 170 1 << hca_attr.rss_ind_tbl_cap; 171 } 172 pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg); 173 if (pv_iseg == NULL) { 174 DRV_LOG(ERR, "Failed to get device hca_iseg"); 175 return errno; 176 } 177 if (!err) { 178 snprintf(device_attr->fw_ver, 64, "%x.%x.%04x", 179 MLX5_GET(initial_seg, pv_iseg, fw_rev_major), 180 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor), 181 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor)); 182 } 183 return err; 184 } 185 186 /** 187 * Initialize DR related data within private structure. 188 * Routine checks the reference counter and does actual 189 * resources creation/initialization only if counter is zero. 190 * 191 * @param[in] priv 192 * Pointer to the private device data structure. 193 * 194 * @return 195 * Zero on success, positive error code otherwise. 196 */ 197 static int 198 mlx5_alloc_shared_dr(struct mlx5_priv *priv) 199 { 200 struct mlx5_dev_ctx_shared *sh = priv->sh; 201 int err = 0; 202 203 if (!sh->flow_tbls) 204 err = mlx5_alloc_table_hash_list(priv); 205 else 206 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse", 207 (void *)sh->flow_tbls); 208 return err; 209 } 210 /** 211 * Destroy DR related data within private structure. 212 * 213 * @param[in] priv 214 * Pointer to the private device data structure. 215 */ 216 void 217 mlx5_os_free_shared_dr(struct mlx5_priv *priv) 218 { 219 mlx5_free_table_hash_list(priv); 220 } 221 222 /** 223 * Set the completion channel file descriptor interrupt as non-blocking. 224 * Currently it has no support under Windows. 225 * 226 * @param[in] rxq_obj 227 * Pointer to RQ channel object, which includes the channel fd 228 * 229 * @param[out] fd 230 * The file descriptor (representing the intetrrupt) used in this channel. 231 * 232 * @return 233 * 0 on successfully setting the fd to non-blocking, non-zero otherwise. 234 */ 235 int 236 mlx5_os_set_nonblock_channel_fd(int fd) 237 { 238 (void)fd; 239 DRV_LOG(WARNING, "%s: is not supported", __func__); 240 return -ENOTSUP; 241 } 242 243 /** 244 * Function API open device under Windows 245 * 246 * This function calls the Windows glue APIs to open a device. 247 * 248 * @param[in] spawn 249 * Pointer to the device attributes (name, port, etc). 250 * @param[out] config 251 * Pointer to device configuration structure. 252 * @param[out] sh 253 * Pointer to shared context structure. 254 * 255 * @return 256 * 0 on success, a positive error value otherwise. 257 */ 258 int 259 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn, 260 const struct mlx5_dev_config *config, 261 struct mlx5_dev_ctx_shared *sh) 262 { 263 RTE_SET_USED(config); 264 int err = 0; 265 struct mlx5_context *mlx5_ctx; 266 267 pthread_mutex_init(&sh->txpp.mutex, NULL); 268 /* Set numa node from pci probe */ 269 sh->numa_node = spawn->pci_dev->device.numa_node; 270 271 /* Try to open device with DevX */ 272 rte_errno = 0; 273 sh->ctx = mlx5_glue->open_device(spawn->phys_dev); 274 if (!sh->ctx) { 275 DRV_LOG(ERR, "open_device failed"); 276 err = errno; 277 return err; 278 } 279 sh->devx = 1; 280 mlx5_ctx = (struct mlx5_context *)sh->ctx; 281 err = mlx5_glue->query_device(spawn->phys_dev, &mlx5_ctx->mlx5_dev); 282 if (err) 283 DRV_LOG(ERR, "Failed to query device context fields."); 284 return err; 285 } 286 287 /** 288 * DV flow counter mode detect and config. 289 * 290 * @param dev 291 * Pointer to rte_eth_dev structure. 292 * 293 */ 294 static void 295 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused) 296 { 297 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 298 struct mlx5_priv *priv = dev->data->dev_private; 299 struct mlx5_dev_ctx_shared *sh = priv->sh; 300 bool fallback; 301 302 #ifndef HAVE_IBV_DEVX_ASYNC 303 fallback = true; 304 #else 305 fallback = false; 306 if (!priv->config.devx || !priv->config.dv_flow_en || 307 !priv->config.hca_attr.flow_counters_dump || 308 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) || 309 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP)) 310 fallback = true; 311 #endif 312 if (fallback) 313 DRV_LOG(INFO, "Use fall-back DV counter management. Flow " 314 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.", 315 priv->config.hca_attr.flow_counters_dump, 316 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap); 317 /* Initialize fallback mode only on the port initializes sh. */ 318 if (sh->refcnt == 1) 319 sh->cmng.counter_fallback = fallback; 320 else if (fallback != sh->cmng.counter_fallback) 321 DRV_LOG(WARNING, "Port %d in sh has different fallback mode " 322 "with others:%d.", PORT_ID(priv), fallback); 323 #endif 324 } 325 326 /** 327 * Spawn an Ethernet device from Verbs information. 328 * 329 * @param dpdk_dev 330 * Backing DPDK device. 331 * @param spawn 332 * Verbs device parameters (name, port, switch_info) to spawn. 333 * @param config 334 * Device configuration parameters. 335 * 336 * @return 337 * A valid Ethernet device object on success, NULL otherwise and rte_errno 338 * is set. The following errors are defined: 339 * 340 * EEXIST: device is already spawned 341 */ 342 static struct rte_eth_dev * 343 mlx5_dev_spawn(struct rte_device *dpdk_dev, 344 struct mlx5_dev_spawn_data *spawn, 345 struct mlx5_dev_config *config) 346 { 347 const struct mlx5_switch_info *switch_info = &spawn->info; 348 struct mlx5_dev_ctx_shared *sh = NULL; 349 struct mlx5_dev_attr device_attr; 350 struct rte_eth_dev *eth_dev = NULL; 351 struct mlx5_priv *priv = NULL; 352 int err = 0; 353 unsigned int cqe_comp; 354 struct rte_ether_addr mac; 355 char name[RTE_ETH_NAME_MAX_LEN]; 356 int own_domain_id = 0; 357 uint16_t port_id; 358 int i; 359 360 /* Build device name. */ 361 strlcpy(name, dpdk_dev->name, sizeof(name)); 362 /* check if the device is already spawned */ 363 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 364 rte_errno = EEXIST; 365 return NULL; 366 } 367 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 368 /* 369 * Some parameters are needed in advance to create device context. We 370 * process the devargs here to get ones, and later process devargs 371 * again to override some hardware settings. 372 */ 373 err = mlx5_args(config, dpdk_dev->devargs); 374 if (err) { 375 err = rte_errno; 376 DRV_LOG(ERR, "failed to process device arguments: %s", 377 strerror(rte_errno)); 378 goto error; 379 } 380 mlx5_malloc_mem_select(config->sys_mem_en); 381 sh = mlx5_alloc_shared_dev_ctx(spawn, config); 382 if (!sh) 383 return NULL; 384 config->devx = sh->devx; 385 /* Initialize the shutdown event in mlx5_dev_spawn to 386 * support mlx5_is_removed for Windows. 387 */ 388 err = mlx5_glue->devx_init_showdown_event(sh->ctx); 389 if (err) { 390 DRV_LOG(ERR, "failed to init showdown event: %s", 391 strerror(errno)); 392 goto error; 393 } 394 DRV_LOG(DEBUG, "MPW isn't supported"); 395 mlx5_os_get_dev_attr(sh->ctx, &device_attr); 396 config->swp = 0; 397 config->ind_table_max_size = 398 sh->device_attr.max_rwq_indirection_table_size; 399 cqe_comp = 0; 400 config->cqe_comp = cqe_comp; 401 DRV_LOG(DEBUG, "tunnel offloading is not supported"); 402 config->tunnel_en = 0; 403 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported"); 404 config->mpls_en = 0; 405 /* Allocate private eth device data. */ 406 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 407 sizeof(*priv), 408 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 409 if (priv == NULL) { 410 DRV_LOG(ERR, "priv allocation failure"); 411 err = ENOMEM; 412 goto error; 413 } 414 priv->sh = sh; 415 priv->dev_port = spawn->phys_port; 416 priv->pci_dev = spawn->pci_dev; 417 priv->mtu = RTE_ETHER_MTU; 418 priv->mp_id.port_id = port_id; 419 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 420 priv->representor = !!switch_info->representor; 421 priv->master = !!switch_info->master; 422 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 423 priv->vport_meta_tag = 0; 424 priv->vport_meta_mask = 0; 425 priv->pf_bond = spawn->pf_bond; 426 priv->vport_id = -1; 427 /* representor_id field keeps the unmodified VF index. */ 428 priv->representor_id = -1; 429 /* 430 * Look for sibling devices in order to reuse their switch domain 431 * if any, otherwise allocate one. 432 */ 433 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) { 434 const struct mlx5_priv *opriv = 435 rte_eth_devices[port_id].data->dev_private; 436 437 if (!opriv || 438 opriv->sh != priv->sh || 439 opriv->domain_id == 440 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 441 continue; 442 priv->domain_id = opriv->domain_id; 443 break; 444 } 445 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 446 err = rte_eth_switch_domain_alloc(&priv->domain_id); 447 if (err) { 448 err = rte_errno; 449 DRV_LOG(ERR, "unable to allocate switch domain: %s", 450 strerror(rte_errno)); 451 goto error; 452 } 453 own_domain_id = 1; 454 } 455 /* Override some values set by hardware configuration. */ 456 mlx5_args(config, dpdk_dev->devargs); 457 err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev); 458 if (err) 459 goto error; 460 DRV_LOG(DEBUG, "counters are not supported"); 461 config->ind_table_max_size = 462 sh->device_attr.max_rwq_indirection_table_size; 463 /* 464 * Remove this check once DPDK supports larger/variable 465 * indirection tables. 466 */ 467 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 468 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512; 469 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 470 config->ind_table_max_size); 471 DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 472 (config->hw_vlan_strip ? "" : "not ")); 473 if (config->hw_padding) { 474 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 475 config->hw_padding = 0; 476 } 477 if (config->tso) 478 config->tso_max_payload_sz = sh->device_attr.max_tso; 479 DRV_LOG(DEBUG, "%sMPS is %s.", 480 config->mps == MLX5_MPW_ENHANCED ? "enhanced " : 481 config->mps == MLX5_MPW ? "legacy " : "", 482 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 483 if (config->cqe_comp && !cqe_comp) { 484 DRV_LOG(WARNING, "Rx CQE compression isn't supported."); 485 config->cqe_comp = 0; 486 } 487 if (config->devx) { 488 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr); 489 if (err) { 490 err = -err; 491 goto error; 492 } 493 /* Check relax ordering support. */ 494 sh->cmng.relaxed_ordering_read = 0; 495 sh->cmng.relaxed_ordering_write = 0; 496 if (!haswell_broadwell_cpu) { 497 sh->cmng.relaxed_ordering_write = 498 config->hca_attr.relaxed_ordering_write; 499 sh->cmng.relaxed_ordering_read = 500 config->hca_attr.relaxed_ordering_read; 501 } 502 config->hw_csum = config->hca_attr.csum_cap; 503 DRV_LOG(DEBUG, "checksum offloading is %ssupported", 504 (config->hw_csum ? "" : "not ")); 505 } 506 if (config->devx) { 507 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)]; 508 509 err = config->hca_attr.access_register_user ? 510 mlx5_devx_cmd_register_read 511 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0, 512 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP; 513 if (!err) { 514 uint32_t ts_mode; 515 516 /* MTUTC register is read successfully. */ 517 ts_mode = MLX5_GET(register_mtutc, reg, 518 time_stamp_mode); 519 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME) 520 config->rt_timestamp = 1; 521 } else { 522 /* Kernel does not support register reading. */ 523 if (config->hca_attr.dev_freq_khz == 524 (NS_PER_S / MS_PER_S)) 525 config->rt_timestamp = 1; 526 } 527 sh->rq_ts_format = config->hca_attr.rq_ts_format; 528 sh->sq_ts_format = config->hca_attr.sq_ts_format; 529 sh->qp_ts_format = config->hca_attr.qp_ts_format; 530 } 531 if (config->mprq.enabled) { 532 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 533 config->mprq.enabled = 0; 534 } 535 if (config->max_dump_files_num == 0) 536 config->max_dump_files_num = 128; 537 eth_dev = rte_eth_dev_allocate(name); 538 if (eth_dev == NULL) { 539 DRV_LOG(ERR, "can not allocate rte ethdev"); 540 err = ENOMEM; 541 goto error; 542 } 543 if (priv->representor) { 544 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 545 eth_dev->data->representor_id = priv->representor_id; 546 } 547 /* 548 * Store associated network device interface index. This index 549 * is permanent throughout the lifetime of device. So, we may store 550 * the ifindex here and use the cached value further. 551 */ 552 MLX5_ASSERT(spawn->ifindex); 553 priv->if_index = spawn->ifindex; 554 eth_dev->data->dev_private = priv; 555 priv->dev_data = eth_dev->data; 556 eth_dev->data->mac_addrs = priv->mac; 557 eth_dev->device = dpdk_dev; 558 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 559 /* Configure the first MAC address by default. */ 560 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 561 DRV_LOG(ERR, 562 "port %u cannot get MAC address, is mlx5_en" 563 " loaded? (errno: %s).", 564 eth_dev->data->port_id, strerror(rte_errno)); 565 err = ENODEV; 566 goto error; 567 } 568 DRV_LOG(INFO, 569 "port %u MAC address is " RTE_ETHER_ADDR_PRT_FMT, 570 eth_dev->data->port_id, RTE_ETHER_ADDR_BYTES(&mac)); 571 #ifdef RTE_LIBRTE_MLX5_DEBUG 572 { 573 char ifname[MLX5_NAMESIZE]; 574 575 if (mlx5_get_ifname(eth_dev, &ifname) == 0) 576 DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 577 eth_dev->data->port_id, ifname); 578 else 579 DRV_LOG(DEBUG, "port %u ifname is unknown.", 580 eth_dev->data->port_id); 581 } 582 #endif 583 /* Get actual MTU if possible. */ 584 err = mlx5_get_mtu(eth_dev, &priv->mtu); 585 if (err) { 586 err = rte_errno; 587 goto error; 588 } 589 DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id, 590 priv->mtu); 591 /* Initialize burst functions to prevent crashes before link-up. */ 592 eth_dev->rx_pkt_burst = removed_rx_burst; 593 eth_dev->tx_pkt_burst = removed_tx_burst; 594 eth_dev->dev_ops = &mlx5_dev_ops; 595 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; 596 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; 597 eth_dev->rx_queue_count = mlx5_rx_queue_count; 598 /* Register MAC address. */ 599 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 600 priv->ctrl_flows = 0; 601 TAILQ_INIT(&priv->flow_meters); 602 priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); 603 if (!priv->mtr_profile_tbl) 604 goto error; 605 /* Bring Ethernet device up. */ 606 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.", 607 eth_dev->data->port_id); 608 /* nl calls are unsupported - set to -1 not to fail on release */ 609 priv->nl_socket_rdma = -1; 610 priv->nl_socket_route = -1; 611 mlx5_set_link_up(eth_dev); 612 /* 613 * Even though the interrupt handler is not installed yet, 614 * interrupts will still trigger on the async_fd from 615 * Verbs context returned by ibv_open_device(). 616 */ 617 mlx5_link_update(eth_dev, 0); 618 config->dv_esw_en = 0; 619 /* Detect minimal data bytes to inline. */ 620 mlx5_set_min_inline(spawn, config); 621 /* Store device configuration on private structure. */ 622 priv->config = *config; 623 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { 624 icfg[i].release_mem_en = !!config->reclaim_mode; 625 if (config->reclaim_mode) 626 icfg[i].per_core_cache = 0; 627 priv->flows[i] = mlx5_ipool_create(&icfg[i]); 628 if (!priv->flows[i]) 629 goto error; 630 } 631 /* Create context for virtual machine VLAN workaround. */ 632 priv->vmwa_context = NULL; 633 if (config->dv_flow_en) { 634 err = mlx5_alloc_shared_dr(priv); 635 if (err) 636 goto error; 637 } 638 /* No supported flow priority number detection. */ 639 priv->config.flow_prio = -1; 640 if (!priv->config.dv_esw_en && 641 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 642 DRV_LOG(WARNING, "metadata mode %u is not supported " 643 "(no E-Switch)", priv->config.dv_xmeta_en); 644 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; 645 } 646 mlx5_set_metadata_mask(eth_dev); 647 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 648 !priv->sh->dv_regc0_mask) { 649 DRV_LOG(ERR, "metadata mode %u is not supported " 650 "(no metadata reg_c[0] is available).", 651 priv->config.dv_xmeta_en); 652 err = ENOTSUP; 653 goto error; 654 } 655 priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true, 656 mlx5_hrxq_create_cb, mlx5_hrxq_match_cb, 657 mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb, 658 mlx5_hrxq_clone_free_cb); 659 /* Query availability of metadata reg_c's. */ 660 err = mlx5_flow_discover_mreg_c(eth_dev); 661 if (err < 0) { 662 err = -err; 663 goto error; 664 } 665 if (!mlx5_flow_ext_mreg_supported(eth_dev)) { 666 DRV_LOG(DEBUG, 667 "port %u extensive metadata register is not supported.", 668 eth_dev->data->port_id); 669 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 670 DRV_LOG(ERR, "metadata mode %u is not supported " 671 "(no metadata registers available).", 672 priv->config.dv_xmeta_en); 673 err = ENOTSUP; 674 goto error; 675 } 676 } 677 if (config->devx && config->dv_flow_en) { 678 priv->obj_ops = devx_obj_ops; 679 } else { 680 DRV_LOG(ERR, "Flow mode %u is not supported " 681 "(Windows flow must be DevX with DV flow enabled).", 682 priv->config.dv_flow_en); 683 err = ENOTSUP; 684 goto error; 685 } 686 mlx5_flow_counter_mode_config(eth_dev); 687 return eth_dev; 688 error: 689 if (priv) { 690 if (priv->mtr_profile_tbl) 691 mlx5_l3t_destroy(priv->mtr_profile_tbl); 692 if (own_domain_id) 693 claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 694 mlx5_free(priv); 695 if (eth_dev != NULL) 696 eth_dev->data->dev_private = NULL; 697 } 698 if (eth_dev != NULL) { 699 /* mac_addrs must not be freed alone because part of 700 * dev_private 701 **/ 702 eth_dev->data->mac_addrs = NULL; 703 rte_eth_dev_release_port(eth_dev); 704 } 705 if (sh) 706 mlx5_free_shared_dev_ctx(sh); 707 MLX5_ASSERT(err > 0); 708 rte_errno = err; 709 return NULL; 710 } 711 712 /** 713 * This function should share events between multiple ports of single IB 714 * device. Currently it has no support under Windows. 715 * 716 * @param sh 717 * Pointer to mlx5_dev_ctx_shared object. 718 */ 719 void 720 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) 721 { 722 (void)sh; 723 DRV_LOG(WARNING, "%s: is not supported", __func__); 724 } 725 726 /** 727 * This function should share events between multiple ports of single IB 728 * device. Currently it has no support under Windows. 729 * 730 * @param dev 731 * Pointer to mlx5_dev_ctx_shared object. 732 */ 733 void 734 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) 735 { 736 (void)sh; 737 DRV_LOG(WARNING, "%s: is not supported", __func__); 738 } 739 740 /** 741 * Read statistics by a named counter. 742 * 743 * @param[in] priv 744 * Pointer to the private device data structure. 745 * @param[in] ctr_name 746 * Pointer to the name of the statistic counter to read 747 * @param[out] stat 748 * Pointer to read statistic value. 749 * @return 750 * 0 on success and stat is valud, 1 if failed to read the value 751 * rte_errno is set. 752 * 753 */ 754 int 755 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, 756 uint64_t *stat) 757 { 758 RTE_SET_USED(priv); 759 RTE_SET_USED(ctr_name); 760 RTE_SET_USED(stat); 761 DRV_LOG(WARNING, "%s: is not supported", __func__); 762 return -ENOTSUP; 763 } 764 765 /** 766 * Flush device MAC addresses 767 * Currently it has no support under Windows. 768 * 769 * @param dev 770 * Pointer to Ethernet device structure. 771 * 772 */ 773 void 774 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev) 775 { 776 (void)dev; 777 DRV_LOG(WARNING, "%s: is not supported", __func__); 778 } 779 780 /** 781 * Remove a MAC address from device 782 * Currently it has no support under Windows. 783 * 784 * @param dev 785 * Pointer to Ethernet device structure. 786 * @param index 787 * MAC address index. 788 */ 789 void 790 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 791 { 792 (void)dev; 793 (void)(index); 794 DRV_LOG(WARNING, "%s: is not supported", __func__); 795 } 796 797 /** 798 * Adds a MAC address to the device 799 * Currently it has no support under Windows. 800 * 801 * @param dev 802 * Pointer to Ethernet device structure. 803 * @param mac_addr 804 * MAC address to register. 805 * @param index 806 * MAC address index. 807 * 808 * @return 809 * 0 on success, a negative errno value otherwise 810 */ 811 int 812 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 813 uint32_t index) 814 { 815 (void)index; 816 struct rte_ether_addr lmac; 817 818 if (mlx5_get_mac(dev, &lmac.addr_bytes)) { 819 DRV_LOG(ERR, 820 "port %u cannot get MAC address, is mlx5_en" 821 " loaded? (errno: %s)", 822 dev->data->port_id, strerror(rte_errno)); 823 return rte_errno; 824 } 825 if (!rte_is_same_ether_addr(&lmac, mac)) { 826 DRV_LOG(ERR, 827 "adding new mac address to device is unsupported"); 828 return -ENOTSUP; 829 } 830 return 0; 831 } 832 833 /** 834 * Modify a VF MAC address 835 * Currently it has no support under Windows. 836 * 837 * @param priv 838 * Pointer to device private data. 839 * @param mac_addr 840 * MAC address to modify into. 841 * @param iface_idx 842 * Net device interface index 843 * @param vf_index 844 * VF index 845 * 846 * @return 847 * 0 on success, a negative errno value otherwise 848 */ 849 int 850 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv, 851 unsigned int iface_idx, 852 struct rte_ether_addr *mac_addr, 853 int vf_index) 854 { 855 (void)priv; 856 (void)iface_idx; 857 (void)mac_addr; 858 (void)vf_index; 859 DRV_LOG(WARNING, "%s: is not supported", __func__); 860 return -ENOTSUP; 861 } 862 863 /** 864 * Set device promiscuous mode 865 * Currently it has no support under Windows. 866 * 867 * @param dev 868 * Pointer to Ethernet device structure. 869 * @param enable 870 * 0 - promiscuous is disabled, otherwise - enabled 871 * 872 * @return 873 * 0 on success, a negative error value otherwise 874 */ 875 int 876 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable) 877 { 878 (void)dev; 879 (void)enable; 880 DRV_LOG(WARNING, "%s: is not supported", __func__); 881 return -ENOTSUP; 882 } 883 884 /** 885 * Set device allmulti mode 886 * 887 * @param dev 888 * Pointer to Ethernet device structure. 889 * @param enable 890 * 0 - all multicase is disabled, otherwise - enabled 891 * 892 * @return 893 * 0 on success, a negative error value otherwise 894 */ 895 int 896 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable) 897 { 898 (void)dev; 899 (void)enable; 900 DRV_LOG(WARNING, "%s: is not supported", __func__); 901 return -ENOTSUP; 902 } 903 904 /** 905 * Detect if a devx_device_bdf object has identical DBDF values to the 906 * rte_pci_addr found in bus/pci probing 907 * 908 * @param[in] devx_bdf 909 * Pointer to the devx_device_bdf structure. 910 * @param[in] addr 911 * Pointer to the rte_pci_addr structure. 912 * 913 * @return 914 * 1 on Device match, 0 on mismatch. 915 */ 916 static int 917 mlx5_match_devx_bdf_to_addr(struct devx_device_bdf *devx_bdf, 918 struct rte_pci_addr *addr) 919 { 920 if (addr->domain != (devx_bdf->bus_id >> 8) || 921 addr->bus != (devx_bdf->bus_id & 0xff) || 922 addr->devid != devx_bdf->dev_id || 923 addr->function != devx_bdf->fnc_id) { 924 return 0; 925 } 926 return 1; 927 } 928 929 /** 930 * Detect if a devx_device_bdf object matches the rte_pci_addr 931 * found in bus/pci probing 932 * Compare both the Native/PF BDF and the raw_bdf representing a VF BDF. 933 * 934 * @param[in] devx_bdf 935 * Pointer to the devx_device_bdf structure. 936 * @param[in] addr 937 * Pointer to the rte_pci_addr structure. 938 * 939 * @return 940 * 1 on Device match, 0 on mismatch, rte_errno code on failure. 941 */ 942 static int 943 mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf, 944 struct rte_pci_addr *addr) 945 { 946 int err; 947 struct devx_device mlx5_dev; 948 949 if (mlx5_match_devx_bdf_to_addr(devx_bdf, addr)) 950 return 1; 951 /** 952 * Didn't match on Native/PF BDF, could still 953 * Match a VF BDF, check it next 954 */ 955 err = mlx5_glue->query_device(devx_bdf, &mlx5_dev); 956 if (err) { 957 DRV_LOG(ERR, "query_device failed"); 958 rte_errno = err; 959 return rte_errno; 960 } 961 if (mlx5_match_devx_bdf_to_addr(&mlx5_dev.raw_bdf, addr)) 962 return 1; 963 return 0; 964 } 965 966 /** 967 * DPDK callback to register a PCI device. 968 * 969 * This function spawns Ethernet devices out of a given device. 970 * 971 * @param[in] dev 972 * Pointer to the generic device. 973 * 974 * @return 975 * 0 on success, a negative errno value otherwise and rte_errno is set. 976 */ 977 int 978 mlx5_os_net_probe(struct rte_device *dev) 979 { 980 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 981 struct devx_device_bdf *devx_bdf_devs, *orig_devx_bdf_devs; 982 /* 983 * Number of found IB Devices matching with requested PCI BDF. 984 * nd != 1 means there are multiple IB devices over the same 985 * PCI device and we have representors and master. 986 */ 987 unsigned int nd = 0; 988 /* 989 * Number of found IB device Ports. nd = 1 and np = 1..n means 990 * we have the single multiport IB device, and there may be 991 * representors attached to some of found ports. 992 * Currently not supported. 993 * unsigned int np = 0; 994 */ 995 996 /* 997 * Number of DPDK ethernet devices to Spawn - either over 998 * multiple IB devices or multiple ports of single IB device. 999 * Actually this is the number of iterations to spawn. 1000 */ 1001 unsigned int ns = 0; 1002 /* 1003 * Bonding device 1004 * < 0 - no bonding device (single one) 1005 * >= 0 - bonding device (value is slave PF index) 1006 */ 1007 int bd = -1; 1008 struct mlx5_dev_spawn_data *list = NULL; 1009 struct mlx5_dev_config dev_config; 1010 unsigned int dev_config_vf; 1011 int ret, err; 1012 uint32_t restore; 1013 1014 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1015 DRV_LOG(ERR, "Secondary process is not supported on Windows."); 1016 return -ENOTSUP; 1017 } 1018 ret = mlx5_init_once(); 1019 if (ret) { 1020 DRV_LOG(ERR, "unable to init PMD global data: %s", 1021 strerror(rte_errno)); 1022 return -rte_errno; 1023 } 1024 errno = 0; 1025 devx_bdf_devs = mlx5_glue->get_device_list(&ret); 1026 orig_devx_bdf_devs = devx_bdf_devs; 1027 if (!devx_bdf_devs) { 1028 rte_errno = errno ? errno : ENOSYS; 1029 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 1030 return -rte_errno; 1031 } 1032 /* 1033 * First scan the list of all Infiniband devices to find 1034 * matching ones, gathering into the list. 1035 */ 1036 struct devx_device_bdf *devx_bdf_match[ret + 1]; 1037 1038 while (ret-- > 0) { 1039 err = mlx5_match_devx_devices_to_addr(devx_bdf_devs, 1040 &pci_dev->addr); 1041 if (!err) { 1042 devx_bdf_devs++; 1043 continue; 1044 } 1045 if (err != 1) { 1046 ret = -err; 1047 goto exit; 1048 } 1049 devx_bdf_match[nd++] = devx_bdf_devs; 1050 } 1051 devx_bdf_match[nd] = NULL; 1052 if (!nd) { 1053 /* No device matches, just complain and bail out. */ 1054 DRV_LOG(WARNING, 1055 "no DevX device matches PCI device " PCI_PRI_FMT "," 1056 " is DevX Configured?", 1057 pci_dev->addr.domain, pci_dev->addr.bus, 1058 pci_dev->addr.devid, pci_dev->addr.function); 1059 rte_errno = ENOENT; 1060 ret = -rte_errno; 1061 goto exit; 1062 } 1063 /* 1064 * Now we can determine the maximal 1065 * amount of devices to be spawned. 1066 */ 1067 list = mlx5_malloc(MLX5_MEM_ZERO, 1068 sizeof(struct mlx5_dev_spawn_data), 1069 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 1070 if (!list) { 1071 DRV_LOG(ERR, "spawn data array allocation failure"); 1072 rte_errno = ENOMEM; 1073 ret = -rte_errno; 1074 goto exit; 1075 } 1076 memset(&list[ns].info, 0, sizeof(list[ns].info)); 1077 list[ns].max_port = 1; 1078 list[ns].phys_port = 1; 1079 list[ns].phys_dev = devx_bdf_match[ns]; 1080 list[ns].eth_dev = NULL; 1081 list[ns].pci_dev = pci_dev; 1082 list[ns].pf_bond = bd; 1083 list[ns].ifindex = -1; /* Spawn will assign */ 1084 list[ns].info = 1085 (struct mlx5_switch_info){ 1086 .master = 0, 1087 .representor = 0, 1088 .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK, 1089 .port_name = 0, 1090 .switch_id = 0, 1091 }; 1092 /* Device specific configuration. */ 1093 switch (pci_dev->id.device_id) { 1094 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 1095 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1096 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 1097 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 1098 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: 1099 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: 1100 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF: 1101 dev_config_vf = 1; 1102 break; 1103 default: 1104 dev_config_vf = 0; 1105 break; 1106 } 1107 /* Default configuration. */ 1108 memset(&dev_config, 0, sizeof(struct mlx5_dev_config)); 1109 dev_config.vf = dev_config_vf; 1110 dev_config.mps = 0; 1111 dev_config.dbnc = MLX5_ARG_UNSET; 1112 dev_config.rx_vec_en = 1; 1113 dev_config.txq_inline_max = MLX5_ARG_UNSET; 1114 dev_config.txq_inline_min = MLX5_ARG_UNSET; 1115 dev_config.txq_inline_mpw = MLX5_ARG_UNSET; 1116 dev_config.txqs_inline = MLX5_ARG_UNSET; 1117 dev_config.vf_nl_en = 0; 1118 dev_config.mr_ext_memseg_en = 1; 1119 dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; 1120 dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; 1121 dev_config.dv_esw_en = 0; 1122 dev_config.dv_flow_en = 1; 1123 dev_config.decap_en = 0; 1124 dev_config.log_hp_size = MLX5_ARG_UNSET; 1125 list[ns].numa_node = pci_dev->device.numa_node; 1126 list[ns].eth_dev = mlx5_dev_spawn(&pci_dev->device, 1127 &list[ns], 1128 &dev_config); 1129 if (!list[ns].eth_dev) 1130 goto exit; 1131 restore = list[ns].eth_dev->data->dev_flags; 1132 rte_eth_copy_pci_info(list[ns].eth_dev, pci_dev); 1133 /* Restore non-PCI flags cleared by the above call. */ 1134 list[ns].eth_dev->data->dev_flags |= restore; 1135 rte_eth_dev_probing_finish(list[ns].eth_dev); 1136 ret = 0; 1137 exit: 1138 /* 1139 * Do the routine cleanup: 1140 * - free allocated spawn data array 1141 * - free the device list 1142 */ 1143 if (list) 1144 mlx5_free(list); 1145 MLX5_ASSERT(orig_devx_bdf_devs); 1146 mlx5_glue->free_device_list(orig_devx_bdf_devs); 1147 return ret; 1148 } 1149 1150 /** 1151 * Set the reg_mr and dereg_mr call backs 1152 * 1153 * @param reg_mr_cb[out] 1154 * Pointer to reg_mr func 1155 * @param dereg_mr_cb[out] 1156 * Pointer to dereg_mr func 1157 * 1158 */ 1159 void 1160 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, 1161 mlx5_dereg_mr_t *dereg_mr_cb) 1162 { 1163 *reg_mr_cb = mlx5_os_reg_mr; 1164 *dereg_mr_cb = mlx5_os_dereg_mr; 1165 } 1166 1167 /** 1168 * Extract pdn of PD object using DevX 1169 * 1170 * @param[in] pd 1171 * Pointer to the DevX PD object. 1172 * @param[out] pdn 1173 * Pointer to the PD object number variable. 1174 * 1175 * @return 1176 * 0 on success, error value otherwise. 1177 */ 1178 int 1179 mlx5_os_get_pdn(void *pd, uint32_t *pdn) 1180 { 1181 if (!pd) 1182 return -EINVAL; 1183 1184 *pdn = ((struct mlx5_pd *)pd)->pdn; 1185 return 0; 1186 } 1187 1188 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0}; 1189