1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <errno.h> 6 #include <stdalign.h> 7 #include <stddef.h> 8 #include <stdint.h> 9 #include <stdlib.h> 10 11 #include <rte_windows.h> 12 #include <ethdev_pci.h> 13 14 #include <mlx5_glue.h> 15 #include <mlx5_devx_cmds.h> 16 #include <mlx5_common.h> 17 #include <mlx5_common_mp.h> 18 #include <mlx5_common_mr.h> 19 #include <mlx5_malloc.h> 20 21 #include "mlx5_defs.h" 22 #include "mlx5.h" 23 #include "mlx5_common_os.h" 24 #include "mlx5_utils.h" 25 #include "mlx5_rxtx.h" 26 #include "mlx5_rx.h" 27 #include "mlx5_tx.h" 28 #include "mlx5_autoconf.h" 29 #include "mlx5_mr.h" 30 #include "mlx5_flow.h" 31 #include "mlx5_devx.h" 32 33 static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data"; 34 35 /* Spinlock for mlx5_shared_data allocation. */ 36 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER; 37 38 /* rte flow indexed pool configuration. */ 39 static struct mlx5_indexed_pool_config icfg[] = { 40 { 41 .size = sizeof(struct rte_flow), 42 .trunk_size = 64, 43 .need_lock = 1, 44 .release_mem_en = 0, 45 .malloc = mlx5_malloc, 46 .free = mlx5_free, 47 .per_core_cache = 0, 48 .type = "ctl_flow_ipool", 49 }, 50 { 51 .size = sizeof(struct rte_flow), 52 .trunk_size = 64, 53 .grow_trunk = 3, 54 .grow_shift = 2, 55 .need_lock = 1, 56 .release_mem_en = 0, 57 .malloc = mlx5_malloc, 58 .free = mlx5_free, 59 .per_core_cache = 1 << 14, 60 .type = "rte_flow_ipool", 61 }, 62 { 63 .size = sizeof(struct rte_flow), 64 .trunk_size = 64, 65 .grow_trunk = 3, 66 .grow_shift = 2, 67 .need_lock = 1, 68 .release_mem_en = 0, 69 .malloc = mlx5_malloc, 70 .free = mlx5_free, 71 .per_core_cache = 0, 72 .type = "mcp_flow_ipool", 73 }, 74 }; 75 76 /** 77 * Initialize shared data between primary and secondary process. 78 * 79 * A memzone is reserved by primary process and secondary processes attach to 80 * the memzone. 81 * 82 * @return 83 * 0 on success, a negative errno value otherwise and rte_errno is set. 84 */ 85 static int 86 mlx5_init_shared_data(void) 87 { 88 const struct rte_memzone *mz; 89 int ret = 0; 90 91 rte_spinlock_lock(&mlx5_shared_data_lock); 92 if (mlx5_shared_data == NULL) { 93 /* Allocate shared memory. */ 94 mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA, 95 sizeof(*mlx5_shared_data), 96 SOCKET_ID_ANY, 0); 97 if (mz == NULL) { 98 DRV_LOG(ERR, 99 "Cannot allocate mlx5 shared data"); 100 ret = -rte_errno; 101 goto error; 102 } 103 mlx5_shared_data = mz->addr; 104 memset(mlx5_shared_data, 0, sizeof(*mlx5_shared_data)); 105 rte_spinlock_init(&mlx5_shared_data->lock); 106 } 107 error: 108 rte_spinlock_unlock(&mlx5_shared_data_lock); 109 return ret; 110 } 111 112 /** 113 * PMD global initialization. 114 * 115 * Independent from individual device, this function initializes global 116 * per-PMD data structures distinguishing primary and secondary processes. 117 * Hence, each initialization is called once per a process. 118 * 119 * @return 120 * 0 on success, a negative errno value otherwise and rte_errno is set. 121 */ 122 static int 123 mlx5_init_once(void) 124 { 125 if (mlx5_init_shared_data()) 126 return -rte_errno; 127 return 0; 128 } 129 130 /** 131 * Get mlx5 device attributes. 132 * 133 * @param ctx 134 * Pointer to device context. 135 * 136 * @param device_attr 137 * Pointer to mlx5 device attributes. 138 * 139 * @return 140 * 0 on success, non zero error number otherwise 141 */ 142 int 143 mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr) 144 { 145 struct mlx5_context *mlx5_ctx; 146 struct mlx5_hca_attr hca_attr; 147 void *pv_iseg = NULL; 148 u32 cb_iseg = 0; 149 int err = 0; 150 151 if (!ctx) 152 return -EINVAL; 153 mlx5_ctx = (struct mlx5_context *)ctx; 154 memset(device_attr, 0, sizeof(*device_attr)); 155 err = mlx5_devx_cmd_query_hca_attr(mlx5_ctx, &hca_attr); 156 if (err) { 157 DRV_LOG(ERR, "Failed to get device hca_cap"); 158 return err; 159 } 160 device_attr->max_cq = 1 << hca_attr.log_max_cq; 161 device_attr->max_qp = 1 << hca_attr.log_max_qp; 162 device_attr->max_qp_wr = 1 << hca_attr.log_max_qp_sz; 163 device_attr->max_cqe = 1 << hca_attr.log_max_cq_sz; 164 device_attr->max_mr = 1 << hca_attr.log_max_mrw_sz; 165 device_attr->max_pd = 1 << hca_attr.log_max_pd; 166 device_attr->max_srq = 1 << hca_attr.log_max_srq; 167 device_attr->max_srq_wr = 1 << hca_attr.log_max_srq_sz; 168 if (hca_attr.rss_ind_tbl_cap) { 169 device_attr->max_rwq_indirection_table_size = 170 1 << hca_attr.rss_ind_tbl_cap; 171 } 172 pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg); 173 if (pv_iseg == NULL) { 174 DRV_LOG(ERR, "Failed to get device hca_iseg"); 175 return errno; 176 } 177 if (!err) { 178 snprintf(device_attr->fw_ver, 64, "%x.%x.%04x", 179 MLX5_GET(initial_seg, pv_iseg, fw_rev_major), 180 MLX5_GET(initial_seg, pv_iseg, fw_rev_minor), 181 MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor)); 182 } 183 return err; 184 } 185 186 /** 187 * Initialize DR related data within private structure. 188 * Routine checks the reference counter and does actual 189 * resources creation/initialization only if counter is zero. 190 * 191 * @param[in] priv 192 * Pointer to the private device data structure. 193 * 194 * @return 195 * Zero on success, positive error code otherwise. 196 */ 197 static int 198 mlx5_alloc_shared_dr(struct mlx5_priv *priv) 199 { 200 struct mlx5_dev_ctx_shared *sh = priv->sh; 201 int err = 0; 202 203 if (!sh->flow_tbls) 204 err = mlx5_alloc_table_hash_list(priv); 205 else 206 DRV_LOG(DEBUG, "sh->flow_tbls[%p] already created, reuse", 207 (void *)sh->flow_tbls); 208 return err; 209 } 210 /** 211 * Destroy DR related data within private structure. 212 * 213 * @param[in] priv 214 * Pointer to the private device data structure. 215 */ 216 void 217 mlx5_os_free_shared_dr(struct mlx5_priv *priv) 218 { 219 mlx5_free_table_hash_list(priv); 220 } 221 222 /** 223 * Set the completion channel file descriptor interrupt as non-blocking. 224 * Currently it has no support under Windows. 225 * 226 * @param[in] rxq_obj 227 * Pointer to RQ channel object, which includes the channel fd 228 * 229 * @param[out] fd 230 * The file descriptor (representing the intetrrupt) used in this channel. 231 * 232 * @return 233 * 0 on successfully setting the fd to non-blocking, non-zero otherwise. 234 */ 235 int 236 mlx5_os_set_nonblock_channel_fd(int fd) 237 { 238 (void)fd; 239 DRV_LOG(WARNING, "%s: is not supported", __func__); 240 return -ENOTSUP; 241 } 242 243 /** 244 * Function API open device under Windows 245 * 246 * This function calls the Windows glue APIs to open a device. 247 * 248 * @param[in] spawn 249 * Pointer to the device attributes (name, port, etc). 250 * @param[out] config 251 * Pointer to device configuration structure. 252 * @param[out] sh 253 * Pointer to shared context structure. 254 * 255 * @return 256 * 0 on success, a positive error value otherwise. 257 */ 258 int 259 mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn, 260 const struct mlx5_dev_config *config, 261 struct mlx5_dev_ctx_shared *sh) 262 { 263 RTE_SET_USED(config); 264 int err = 0; 265 struct mlx5_context *mlx5_ctx; 266 267 pthread_mutex_init(&sh->txpp.mutex, NULL); 268 /* Set numa node from pci probe */ 269 sh->numa_node = spawn->pci_dev->device.numa_node; 270 271 /* Try to open device with DevX */ 272 rte_errno = 0; 273 sh->ctx = mlx5_glue->open_device(spawn->phys_dev); 274 if (!sh->ctx) { 275 DRV_LOG(ERR, "open_device failed"); 276 err = errno; 277 return err; 278 } 279 sh->devx = 1; 280 mlx5_ctx = (struct mlx5_context *)sh->ctx; 281 err = mlx5_glue->query_device(spawn->phys_dev, &mlx5_ctx->mlx5_dev); 282 if (err) 283 DRV_LOG(ERR, "Failed to query device context fields."); 284 return err; 285 } 286 287 /** 288 * DV flow counter mode detect and config. 289 * 290 * @param dev 291 * Pointer to rte_eth_dev structure. 292 * 293 */ 294 static void 295 mlx5_flow_counter_mode_config(struct rte_eth_dev *dev __rte_unused) 296 { 297 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 298 struct mlx5_priv *priv = dev->data->dev_private; 299 struct mlx5_dev_ctx_shared *sh = priv->sh; 300 bool fallback; 301 302 #ifndef HAVE_IBV_DEVX_ASYNC 303 fallback = true; 304 #else 305 fallback = false; 306 if (!priv->config.devx || !priv->config.dv_flow_en || 307 !priv->config.hca_attr.flow_counters_dump || 308 !(priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4) || 309 (mlx5_flow_dv_discover_counter_offset_support(dev) == -ENOTSUP)) 310 fallback = true; 311 #endif 312 if (fallback) 313 DRV_LOG(INFO, "Use fall-back DV counter management. Flow " 314 "counter dump:%d, bulk_alloc_bitmap:0x%hhx.", 315 priv->config.hca_attr.flow_counters_dump, 316 priv->config.hca_attr.flow_counter_bulk_alloc_bitmap); 317 /* Initialize fallback mode only on the port initializes sh. */ 318 if (sh->refcnt == 1) 319 sh->cmng.counter_fallback = fallback; 320 else if (fallback != sh->cmng.counter_fallback) 321 DRV_LOG(WARNING, "Port %d in sh has different fallback mode " 322 "with others:%d.", PORT_ID(priv), fallback); 323 #endif 324 } 325 326 /** 327 * Spawn an Ethernet device from Verbs information. 328 * 329 * @param dpdk_dev 330 * Backing DPDK device. 331 * @param spawn 332 * Verbs device parameters (name, port, switch_info) to spawn. 333 * @param config 334 * Device configuration parameters. 335 * 336 * @return 337 * A valid Ethernet device object on success, NULL otherwise and rte_errno 338 * is set. The following errors are defined: 339 * 340 * EEXIST: device is already spawned 341 */ 342 static struct rte_eth_dev * 343 mlx5_dev_spawn(struct rte_device *dpdk_dev, 344 struct mlx5_dev_spawn_data *spawn, 345 struct mlx5_dev_config *config) 346 { 347 const struct mlx5_switch_info *switch_info = &spawn->info; 348 struct mlx5_dev_ctx_shared *sh = NULL; 349 struct mlx5_dev_attr device_attr; 350 struct rte_eth_dev *eth_dev = NULL; 351 struct mlx5_priv *priv = NULL; 352 int err = 0; 353 unsigned int cqe_comp; 354 struct rte_ether_addr mac; 355 char name[RTE_ETH_NAME_MAX_LEN]; 356 int own_domain_id = 0; 357 uint16_t port_id; 358 int i; 359 360 /* Build device name. */ 361 strlcpy(name, dpdk_dev->name, sizeof(name)); 362 /* check if the device is already spawned */ 363 if (rte_eth_dev_get_port_by_name(name, &port_id) == 0) { 364 rte_errno = EEXIST; 365 return NULL; 366 } 367 DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name); 368 /* 369 * Some parameters are needed in advance to create device context. We 370 * process the devargs here to get ones, and later process devargs 371 * again to override some hardware settings. 372 */ 373 err = mlx5_args(config, dpdk_dev->devargs); 374 if (err) { 375 err = rte_errno; 376 DRV_LOG(ERR, "failed to process device arguments: %s", 377 strerror(rte_errno)); 378 goto error; 379 } 380 mlx5_malloc_mem_select(config->sys_mem_en); 381 sh = mlx5_alloc_shared_dev_ctx(spawn, config); 382 if (!sh) 383 return NULL; 384 config->devx = sh->devx; 385 /* Initialize the shutdown event in mlx5_dev_spawn to 386 * support mlx5_is_removed for Windows. 387 */ 388 err = mlx5_glue->devx_init_showdown_event(sh->ctx); 389 if (err) { 390 DRV_LOG(ERR, "failed to init showdown event: %s", 391 strerror(errno)); 392 goto error; 393 } 394 DRV_LOG(DEBUG, "MPW isn't supported"); 395 mlx5_os_get_dev_attr(sh->ctx, &device_attr); 396 config->swp = 0; 397 config->ind_table_max_size = 398 sh->device_attr.max_rwq_indirection_table_size; 399 cqe_comp = 0; 400 config->cqe_comp = cqe_comp; 401 DRV_LOG(DEBUG, "tunnel offloading is not supported"); 402 config->tunnel_en = 0; 403 DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported"); 404 config->mpls_en = 0; 405 /* Allocate private eth device data. */ 406 priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE, 407 sizeof(*priv), 408 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 409 if (priv == NULL) { 410 DRV_LOG(ERR, "priv allocation failure"); 411 err = ENOMEM; 412 goto error; 413 } 414 priv->sh = sh; 415 priv->dev_port = spawn->phys_port; 416 priv->pci_dev = spawn->pci_dev; 417 priv->mtu = RTE_ETHER_MTU; 418 priv->mp_id.port_id = port_id; 419 strlcpy(priv->mp_id.name, MLX5_MP_NAME, RTE_MP_MAX_NAME_LEN); 420 priv->representor = !!switch_info->representor; 421 priv->master = !!switch_info->master; 422 priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; 423 priv->vport_meta_tag = 0; 424 priv->vport_meta_mask = 0; 425 priv->pf_bond = spawn->pf_bond; 426 priv->vport_id = -1; 427 /* representor_id field keeps the unmodified VF index. */ 428 priv->representor_id = -1; 429 /* 430 * Look for sibling devices in order to reuse their switch domain 431 * if any, otherwise allocate one. 432 */ 433 MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) { 434 const struct mlx5_priv *opriv = 435 rte_eth_devices[port_id].data->dev_private; 436 437 if (!opriv || 438 opriv->sh != priv->sh || 439 opriv->domain_id == 440 RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) 441 continue; 442 priv->domain_id = opriv->domain_id; 443 break; 444 } 445 if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { 446 err = rte_eth_switch_domain_alloc(&priv->domain_id); 447 if (err) { 448 err = rte_errno; 449 DRV_LOG(ERR, "unable to allocate switch domain: %s", 450 strerror(rte_errno)); 451 goto error; 452 } 453 own_domain_id = 1; 454 } 455 /* Override some values set by hardware configuration. */ 456 mlx5_args(config, dpdk_dev->devargs); 457 err = mlx5_dev_check_sibling_config(priv, config, dpdk_dev); 458 if (err) 459 goto error; 460 DRV_LOG(DEBUG, "counters are not supported"); 461 config->ind_table_max_size = 462 sh->device_attr.max_rwq_indirection_table_size; 463 /* 464 * Remove this check once DPDK supports larger/variable 465 * indirection tables. 466 */ 467 if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) 468 config->ind_table_max_size = ETH_RSS_RETA_SIZE_512; 469 DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", 470 config->ind_table_max_size); 471 DRV_LOG(DEBUG, "VLAN stripping is %ssupported", 472 (config->hw_vlan_strip ? "" : "not ")); 473 if (config->hw_padding) { 474 DRV_LOG(DEBUG, "Rx end alignment padding isn't supported"); 475 config->hw_padding = 0; 476 } 477 if (config->tso) 478 config->tso_max_payload_sz = sh->device_attr.max_tso; 479 DRV_LOG(DEBUG, "%sMPS is %s.", 480 config->mps == MLX5_MPW_ENHANCED ? "enhanced " : 481 config->mps == MLX5_MPW ? "legacy " : "", 482 config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled"); 483 if (config->cqe_comp && !cqe_comp) { 484 DRV_LOG(WARNING, "Rx CQE compression isn't supported."); 485 config->cqe_comp = 0; 486 } 487 if (config->devx) { 488 err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr); 489 if (err) { 490 err = -err; 491 goto error; 492 } 493 /* Check relax ordering support. */ 494 sh->cmng.relaxed_ordering_read = 0; 495 sh->cmng.relaxed_ordering_write = 0; 496 if (!haswell_broadwell_cpu) { 497 sh->cmng.relaxed_ordering_write = 498 config->hca_attr.relaxed_ordering_write; 499 sh->cmng.relaxed_ordering_read = 500 config->hca_attr.relaxed_ordering_read; 501 } 502 config->hw_csum = config->hca_attr.csum_cap; 503 DRV_LOG(DEBUG, "checksum offloading is %ssupported", 504 (config->hw_csum ? "" : "not ")); 505 } 506 if (config->devx) { 507 uint32_t reg[MLX5_ST_SZ_DW(register_mtutc)]; 508 509 err = config->hca_attr.access_register_user ? 510 mlx5_devx_cmd_register_read 511 (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0, 512 reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP; 513 if (!err) { 514 uint32_t ts_mode; 515 516 /* MTUTC register is read successfully. */ 517 ts_mode = MLX5_GET(register_mtutc, reg, 518 time_stamp_mode); 519 if (ts_mode == MLX5_MTUTC_TIMESTAMP_MODE_REAL_TIME) 520 config->rt_timestamp = 1; 521 } else { 522 /* Kernel does not support register reading. */ 523 if (config->hca_attr.dev_freq_khz == 524 (NS_PER_S / MS_PER_S)) 525 config->rt_timestamp = 1; 526 } 527 sh->rq_ts_format = config->hca_attr.rq_ts_format; 528 sh->sq_ts_format = config->hca_attr.sq_ts_format; 529 sh->qp_ts_format = config->hca_attr.qp_ts_format; 530 } 531 if (config->mprq.enabled) { 532 DRV_LOG(WARNING, "Multi-Packet RQ isn't supported"); 533 config->mprq.enabled = 0; 534 } 535 if (config->max_dump_files_num == 0) 536 config->max_dump_files_num = 128; 537 eth_dev = rte_eth_dev_allocate(name); 538 if (eth_dev == NULL) { 539 DRV_LOG(ERR, "can not allocate rte ethdev"); 540 err = ENOMEM; 541 goto error; 542 } 543 if (priv->representor) { 544 eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR; 545 eth_dev->data->representor_id = priv->representor_id; 546 } 547 /* 548 * Store associated network device interface index. This index 549 * is permanent throughout the lifetime of device. So, we may store 550 * the ifindex here and use the cached value further. 551 */ 552 MLX5_ASSERT(spawn->ifindex); 553 priv->if_index = spawn->ifindex; 554 eth_dev->data->dev_private = priv; 555 priv->dev_data = eth_dev->data; 556 eth_dev->data->mac_addrs = priv->mac; 557 eth_dev->device = dpdk_dev; 558 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 559 /* Configure the first MAC address by default. */ 560 if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) { 561 DRV_LOG(ERR, 562 "port %u cannot get MAC address, is mlx5_en" 563 " loaded? (errno: %s).", 564 eth_dev->data->port_id, strerror(rte_errno)); 565 err = ENODEV; 566 goto error; 567 } 568 DRV_LOG(INFO, 569 "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x", 570 eth_dev->data->port_id, 571 mac.addr_bytes[0], mac.addr_bytes[1], 572 mac.addr_bytes[2], mac.addr_bytes[3], 573 mac.addr_bytes[4], mac.addr_bytes[5]); 574 #ifdef RTE_LIBRTE_MLX5_DEBUG 575 { 576 char ifname[MLX5_NAMESIZE]; 577 578 if (mlx5_get_ifname(eth_dev, &ifname) == 0) 579 DRV_LOG(DEBUG, "port %u ifname is \"%s\"", 580 eth_dev->data->port_id, ifname); 581 else 582 DRV_LOG(DEBUG, "port %u ifname is unknown.", 583 eth_dev->data->port_id); 584 } 585 #endif 586 /* Get actual MTU if possible. */ 587 err = mlx5_get_mtu(eth_dev, &priv->mtu); 588 if (err) { 589 err = rte_errno; 590 goto error; 591 } 592 DRV_LOG(DEBUG, "port %u MTU is %u.", eth_dev->data->port_id, 593 priv->mtu); 594 /* Initialize burst functions to prevent crashes before link-up. */ 595 eth_dev->rx_pkt_burst = removed_rx_burst; 596 eth_dev->tx_pkt_burst = removed_tx_burst; 597 eth_dev->dev_ops = &mlx5_dev_ops; 598 eth_dev->rx_descriptor_status = mlx5_rx_descriptor_status; 599 eth_dev->tx_descriptor_status = mlx5_tx_descriptor_status; 600 eth_dev->rx_queue_count = mlx5_rx_queue_count; 601 /* Register MAC address. */ 602 claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0)); 603 priv->ctrl_flows = 0; 604 TAILQ_INIT(&priv->flow_meters); 605 priv->mtr_profile_tbl = mlx5_l3t_create(MLX5_L3T_TYPE_PTR); 606 if (!priv->mtr_profile_tbl) 607 goto error; 608 /* Bring Ethernet device up. */ 609 DRV_LOG(DEBUG, "port %u forcing Ethernet interface up.", 610 eth_dev->data->port_id); 611 /* nl calls are unsupported - set to -1 not to fail on release */ 612 priv->nl_socket_rdma = -1; 613 priv->nl_socket_route = -1; 614 mlx5_set_link_up(eth_dev); 615 /* 616 * Even though the interrupt handler is not installed yet, 617 * interrupts will still trigger on the async_fd from 618 * Verbs context returned by ibv_open_device(). 619 */ 620 mlx5_link_update(eth_dev, 0); 621 config->dv_esw_en = 0; 622 /* Detect minimal data bytes to inline. */ 623 mlx5_set_min_inline(spawn, config); 624 /* Store device configuration on private structure. */ 625 priv->config = *config; 626 for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) { 627 icfg[i].release_mem_en = !!config->reclaim_mode; 628 if (config->reclaim_mode) 629 icfg[i].per_core_cache = 0; 630 priv->flows[i] = mlx5_ipool_create(&icfg[i]); 631 if (!priv->flows[i]) 632 goto error; 633 } 634 /* Create context for virtual machine VLAN workaround. */ 635 priv->vmwa_context = NULL; 636 if (config->dv_flow_en) { 637 err = mlx5_alloc_shared_dr(priv); 638 if (err) 639 goto error; 640 } 641 /* No supported flow priority number detection. */ 642 priv->config.flow_prio = -1; 643 if (!priv->config.dv_esw_en && 644 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 645 DRV_LOG(WARNING, "metadata mode %u is not supported " 646 "(no E-Switch)", priv->config.dv_xmeta_en); 647 priv->config.dv_xmeta_en = MLX5_XMETA_MODE_LEGACY; 648 } 649 mlx5_set_metadata_mask(eth_dev); 650 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY && 651 !priv->sh->dv_regc0_mask) { 652 DRV_LOG(ERR, "metadata mode %u is not supported " 653 "(no metadata reg_c[0] is available).", 654 priv->config.dv_xmeta_en); 655 err = ENOTSUP; 656 goto error; 657 } 658 priv->hrxqs = mlx5_list_create("hrxq", eth_dev, true, 659 mlx5_hrxq_create_cb, mlx5_hrxq_match_cb, 660 mlx5_hrxq_remove_cb, mlx5_hrxq_clone_cb, 661 mlx5_hrxq_clone_free_cb); 662 /* Query availability of metadata reg_c's. */ 663 err = mlx5_flow_discover_mreg_c(eth_dev); 664 if (err < 0) { 665 err = -err; 666 goto error; 667 } 668 if (!mlx5_flow_ext_mreg_supported(eth_dev)) { 669 DRV_LOG(DEBUG, 670 "port %u extensive metadata register is not supported.", 671 eth_dev->data->port_id); 672 if (priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) { 673 DRV_LOG(ERR, "metadata mode %u is not supported " 674 "(no metadata registers available).", 675 priv->config.dv_xmeta_en); 676 err = ENOTSUP; 677 goto error; 678 } 679 } 680 if (config->devx && config->dv_flow_en) { 681 priv->obj_ops = devx_obj_ops; 682 } else { 683 DRV_LOG(ERR, "Flow mode %u is not supported " 684 "(Windows flow must be DevX with DV flow enabled).", 685 priv->config.dv_flow_en); 686 err = ENOTSUP; 687 goto error; 688 } 689 mlx5_flow_counter_mode_config(eth_dev); 690 return eth_dev; 691 error: 692 if (priv) { 693 if (priv->mtr_profile_tbl) 694 mlx5_l3t_destroy(priv->mtr_profile_tbl); 695 if (own_domain_id) 696 claim_zero(rte_eth_switch_domain_free(priv->domain_id)); 697 mlx5_free(priv); 698 if (eth_dev != NULL) 699 eth_dev->data->dev_private = NULL; 700 } 701 if (eth_dev != NULL) { 702 /* mac_addrs must not be freed alone because part of 703 * dev_private 704 **/ 705 eth_dev->data->mac_addrs = NULL; 706 rte_eth_dev_release_port(eth_dev); 707 } 708 if (sh) 709 mlx5_free_shared_dev_ctx(sh); 710 MLX5_ASSERT(err > 0); 711 rte_errno = err; 712 return NULL; 713 } 714 715 /** 716 * This function should share events between multiple ports of single IB 717 * device. Currently it has no support under Windows. 718 * 719 * @param sh 720 * Pointer to mlx5_dev_ctx_shared object. 721 */ 722 void 723 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) 724 { 725 (void)sh; 726 DRV_LOG(WARNING, "%s: is not supported", __func__); 727 } 728 729 /** 730 * This function should share events between multiple ports of single IB 731 * device. Currently it has no support under Windows. 732 * 733 * @param dev 734 * Pointer to mlx5_dev_ctx_shared object. 735 */ 736 void 737 mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) 738 { 739 (void)sh; 740 DRV_LOG(WARNING, "%s: is not supported", __func__); 741 } 742 743 /** 744 * Read statistics by a named counter. 745 * 746 * @param[in] priv 747 * Pointer to the private device data structure. 748 * @param[in] ctr_name 749 * Pointer to the name of the statistic counter to read 750 * @param[out] stat 751 * Pointer to read statistic value. 752 * @return 753 * 0 on success and stat is valud, 1 if failed to read the value 754 * rte_errno is set. 755 * 756 */ 757 int 758 mlx5_os_read_dev_stat(struct mlx5_priv *priv, const char *ctr_name, 759 uint64_t *stat) 760 { 761 RTE_SET_USED(priv); 762 RTE_SET_USED(ctr_name); 763 RTE_SET_USED(stat); 764 DRV_LOG(WARNING, "%s: is not supported", __func__); 765 return -ENOTSUP; 766 } 767 768 /** 769 * Flush device MAC addresses 770 * Currently it has no support under Windows. 771 * 772 * @param dev 773 * Pointer to Ethernet device structure. 774 * 775 */ 776 void 777 mlx5_os_mac_addr_flush(struct rte_eth_dev *dev) 778 { 779 (void)dev; 780 DRV_LOG(WARNING, "%s: is not supported", __func__); 781 } 782 783 /** 784 * Remove a MAC address from device 785 * Currently it has no support under Windows. 786 * 787 * @param dev 788 * Pointer to Ethernet device structure. 789 * @param index 790 * MAC address index. 791 */ 792 void 793 mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) 794 { 795 (void)dev; 796 (void)(index); 797 DRV_LOG(WARNING, "%s: is not supported", __func__); 798 } 799 800 /** 801 * Adds a MAC address to the device 802 * Currently it has no support under Windows. 803 * 804 * @param dev 805 * Pointer to Ethernet device structure. 806 * @param mac_addr 807 * MAC address to register. 808 * @param index 809 * MAC address index. 810 * 811 * @return 812 * 0 on success, a negative errno value otherwise 813 */ 814 int 815 mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac, 816 uint32_t index) 817 { 818 (void)index; 819 struct rte_ether_addr lmac; 820 821 if (mlx5_get_mac(dev, &lmac.addr_bytes)) { 822 DRV_LOG(ERR, 823 "port %u cannot get MAC address, is mlx5_en" 824 " loaded? (errno: %s)", 825 dev->data->port_id, strerror(rte_errno)); 826 return rte_errno; 827 } 828 if (!rte_is_same_ether_addr(&lmac, mac)) { 829 DRV_LOG(ERR, 830 "adding new mac address to device is unsupported"); 831 return -ENOTSUP; 832 } 833 return 0; 834 } 835 836 /** 837 * Modify a VF MAC address 838 * Currently it has no support under Windows. 839 * 840 * @param priv 841 * Pointer to device private data. 842 * @param mac_addr 843 * MAC address to modify into. 844 * @param iface_idx 845 * Net device interface index 846 * @param vf_index 847 * VF index 848 * 849 * @return 850 * 0 on success, a negative errno value otherwise 851 */ 852 int 853 mlx5_os_vf_mac_addr_modify(struct mlx5_priv *priv, 854 unsigned int iface_idx, 855 struct rte_ether_addr *mac_addr, 856 int vf_index) 857 { 858 (void)priv; 859 (void)iface_idx; 860 (void)mac_addr; 861 (void)vf_index; 862 DRV_LOG(WARNING, "%s: is not supported", __func__); 863 return -ENOTSUP; 864 } 865 866 /** 867 * Set device promiscuous mode 868 * Currently it has no support under Windows. 869 * 870 * @param dev 871 * Pointer to Ethernet device structure. 872 * @param enable 873 * 0 - promiscuous is disabled, otherwise - enabled 874 * 875 * @return 876 * 0 on success, a negative error value otherwise 877 */ 878 int 879 mlx5_os_set_promisc(struct rte_eth_dev *dev, int enable) 880 { 881 (void)dev; 882 (void)enable; 883 DRV_LOG(WARNING, "%s: is not supported", __func__); 884 return -ENOTSUP; 885 } 886 887 /** 888 * Set device allmulti mode 889 * 890 * @param dev 891 * Pointer to Ethernet device structure. 892 * @param enable 893 * 0 - all multicase is disabled, otherwise - enabled 894 * 895 * @return 896 * 0 on success, a negative error value otherwise 897 */ 898 int 899 mlx5_os_set_allmulti(struct rte_eth_dev *dev, int enable) 900 { 901 (void)dev; 902 (void)enable; 903 DRV_LOG(WARNING, "%s: is not supported", __func__); 904 return -ENOTSUP; 905 } 906 907 /** 908 * Detect if a devx_device_bdf object has identical DBDF values to the 909 * rte_pci_addr found in bus/pci probing 910 * 911 * @param[in] devx_bdf 912 * Pointer to the devx_device_bdf structure. 913 * @param[in] addr 914 * Pointer to the rte_pci_addr structure. 915 * 916 * @return 917 * 1 on Device match, 0 on mismatch. 918 */ 919 static int 920 mlx5_match_devx_bdf_to_addr(struct devx_device_bdf *devx_bdf, 921 struct rte_pci_addr *addr) 922 { 923 if (addr->domain != (devx_bdf->bus_id >> 8) || 924 addr->bus != (devx_bdf->bus_id & 0xff) || 925 addr->devid != devx_bdf->dev_id || 926 addr->function != devx_bdf->fnc_id) { 927 return 0; 928 } 929 return 1; 930 } 931 932 /** 933 * Detect if a devx_device_bdf object matches the rte_pci_addr 934 * found in bus/pci probing 935 * Compare both the Native/PF BDF and the raw_bdf representing a VF BDF. 936 * 937 * @param[in] devx_bdf 938 * Pointer to the devx_device_bdf structure. 939 * @param[in] addr 940 * Pointer to the rte_pci_addr structure. 941 * 942 * @return 943 * 1 on Device match, 0 on mismatch, rte_errno code on failure. 944 */ 945 static int 946 mlx5_match_devx_devices_to_addr(struct devx_device_bdf *devx_bdf, 947 struct rte_pci_addr *addr) 948 { 949 int err; 950 struct devx_device mlx5_dev; 951 952 if (mlx5_match_devx_bdf_to_addr(devx_bdf, addr)) 953 return 1; 954 /** 955 * Didn't match on Native/PF BDF, could still 956 * Match a VF BDF, check it next 957 */ 958 err = mlx5_glue->query_device(devx_bdf, &mlx5_dev); 959 if (err) { 960 DRV_LOG(ERR, "query_device failed"); 961 rte_errno = err; 962 return rte_errno; 963 } 964 if (mlx5_match_devx_bdf_to_addr(&mlx5_dev.raw_bdf, addr)) 965 return 1; 966 return 0; 967 } 968 969 /** 970 * DPDK callback to register a PCI device. 971 * 972 * This function spawns Ethernet devices out of a given device. 973 * 974 * @param[in] dev 975 * Pointer to the generic device. 976 * 977 * @return 978 * 0 on success, a negative errno value otherwise and rte_errno is set. 979 */ 980 int 981 mlx5_os_net_probe(struct rte_device *dev) 982 { 983 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev); 984 struct devx_device_bdf *devx_bdf_devs, *orig_devx_bdf_devs; 985 /* 986 * Number of found IB Devices matching with requested PCI BDF. 987 * nd != 1 means there are multiple IB devices over the same 988 * PCI device and we have representors and master. 989 */ 990 unsigned int nd = 0; 991 /* 992 * Number of found IB device Ports. nd = 1 and np = 1..n means 993 * we have the single multiport IB device, and there may be 994 * representors attached to some of found ports. 995 * Currently not supported. 996 * unsigned int np = 0; 997 */ 998 999 /* 1000 * Number of DPDK ethernet devices to Spawn - either over 1001 * multiple IB devices or multiple ports of single IB device. 1002 * Actually this is the number of iterations to spawn. 1003 */ 1004 unsigned int ns = 0; 1005 /* 1006 * Bonding device 1007 * < 0 - no bonding device (single one) 1008 * >= 0 - bonding device (value is slave PF index) 1009 */ 1010 int bd = -1; 1011 struct mlx5_dev_spawn_data *list = NULL; 1012 struct mlx5_dev_config dev_config; 1013 unsigned int dev_config_vf; 1014 int ret, err; 1015 uint32_t restore; 1016 1017 if (rte_eal_process_type() == RTE_PROC_SECONDARY) { 1018 DRV_LOG(ERR, "Secondary process is not supported on Windows."); 1019 return -ENOTSUP; 1020 } 1021 ret = mlx5_init_once(); 1022 if (ret) { 1023 DRV_LOG(ERR, "unable to init PMD global data: %s", 1024 strerror(rte_errno)); 1025 return -rte_errno; 1026 } 1027 errno = 0; 1028 devx_bdf_devs = mlx5_glue->get_device_list(&ret); 1029 orig_devx_bdf_devs = devx_bdf_devs; 1030 if (!devx_bdf_devs) { 1031 rte_errno = errno ? errno : ENOSYS; 1032 DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?"); 1033 return -rte_errno; 1034 } 1035 /* 1036 * First scan the list of all Infiniband devices to find 1037 * matching ones, gathering into the list. 1038 */ 1039 struct devx_device_bdf *devx_bdf_match[ret + 1]; 1040 1041 while (ret-- > 0) { 1042 err = mlx5_match_devx_devices_to_addr(devx_bdf_devs, 1043 &pci_dev->addr); 1044 if (!err) { 1045 devx_bdf_devs++; 1046 continue; 1047 } 1048 if (err != 1) { 1049 ret = -err; 1050 goto exit; 1051 } 1052 devx_bdf_match[nd++] = devx_bdf_devs; 1053 } 1054 devx_bdf_match[nd] = NULL; 1055 if (!nd) { 1056 /* No device matches, just complain and bail out. */ 1057 DRV_LOG(WARNING, 1058 "no DevX device matches PCI device " PCI_PRI_FMT "," 1059 " is DevX Configured?", 1060 pci_dev->addr.domain, pci_dev->addr.bus, 1061 pci_dev->addr.devid, pci_dev->addr.function); 1062 rte_errno = ENOENT; 1063 ret = -rte_errno; 1064 goto exit; 1065 } 1066 /* 1067 * Now we can determine the maximal 1068 * amount of devices to be spawned. 1069 */ 1070 list = mlx5_malloc(MLX5_MEM_ZERO, 1071 sizeof(struct mlx5_dev_spawn_data), 1072 RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); 1073 if (!list) { 1074 DRV_LOG(ERR, "spawn data array allocation failure"); 1075 rte_errno = ENOMEM; 1076 ret = -rte_errno; 1077 goto exit; 1078 } 1079 memset(&list[ns].info, 0, sizeof(list[ns].info)); 1080 list[ns].max_port = 1; 1081 list[ns].phys_port = 1; 1082 list[ns].phys_dev = devx_bdf_match[ns]; 1083 list[ns].eth_dev = NULL; 1084 list[ns].pci_dev = pci_dev; 1085 list[ns].pf_bond = bd; 1086 list[ns].ifindex = -1; /* Spawn will assign */ 1087 list[ns].info = 1088 (struct mlx5_switch_info){ 1089 .master = 0, 1090 .representor = 0, 1091 .name_type = MLX5_PHYS_PORT_NAME_TYPE_UPLINK, 1092 .port_name = 0, 1093 .switch_id = 0, 1094 }; 1095 /* Device specific configuration. */ 1096 switch (pci_dev->id.device_id) { 1097 case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: 1098 case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF: 1099 case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: 1100 case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: 1101 case PCI_DEVICE_ID_MELLANOX_CONNECTX5BFVF: 1102 case PCI_DEVICE_ID_MELLANOX_CONNECTX6VF: 1103 case PCI_DEVICE_ID_MELLANOX_CONNECTXVF: 1104 dev_config_vf = 1; 1105 break; 1106 default: 1107 dev_config_vf = 0; 1108 break; 1109 } 1110 /* Default configuration. */ 1111 memset(&dev_config, 0, sizeof(struct mlx5_dev_config)); 1112 dev_config.vf = dev_config_vf; 1113 dev_config.mps = 0; 1114 dev_config.dbnc = MLX5_ARG_UNSET; 1115 dev_config.rx_vec_en = 1; 1116 dev_config.txq_inline_max = MLX5_ARG_UNSET; 1117 dev_config.txq_inline_min = MLX5_ARG_UNSET; 1118 dev_config.txq_inline_mpw = MLX5_ARG_UNSET; 1119 dev_config.txqs_inline = MLX5_ARG_UNSET; 1120 dev_config.vf_nl_en = 0; 1121 dev_config.mr_ext_memseg_en = 1; 1122 dev_config.mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN; 1123 dev_config.mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS; 1124 dev_config.dv_esw_en = 0; 1125 dev_config.dv_flow_en = 1; 1126 dev_config.decap_en = 0; 1127 dev_config.log_hp_size = MLX5_ARG_UNSET; 1128 list[ns].numa_node = pci_dev->device.numa_node; 1129 list[ns].eth_dev = mlx5_dev_spawn(&pci_dev->device, 1130 &list[ns], 1131 &dev_config); 1132 if (!list[ns].eth_dev) 1133 goto exit; 1134 restore = list[ns].eth_dev->data->dev_flags; 1135 rte_eth_copy_pci_info(list[ns].eth_dev, pci_dev); 1136 /* Restore non-PCI flags cleared by the above call. */ 1137 list[ns].eth_dev->data->dev_flags |= restore; 1138 rte_eth_dev_probing_finish(list[ns].eth_dev); 1139 ret = 0; 1140 exit: 1141 /* 1142 * Do the routine cleanup: 1143 * - free allocated spawn data array 1144 * - free the device list 1145 */ 1146 if (list) 1147 mlx5_free(list); 1148 MLX5_ASSERT(orig_devx_bdf_devs); 1149 mlx5_glue->free_device_list(orig_devx_bdf_devs); 1150 return ret; 1151 } 1152 1153 /** 1154 * Set the reg_mr and dereg_mr call backs 1155 * 1156 * @param reg_mr_cb[out] 1157 * Pointer to reg_mr func 1158 * @param dereg_mr_cb[out] 1159 * Pointer to dereg_mr func 1160 * 1161 */ 1162 void 1163 mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, 1164 mlx5_dereg_mr_t *dereg_mr_cb) 1165 { 1166 *reg_mr_cb = mlx5_os_reg_mr; 1167 *dereg_mr_cb = mlx5_os_dereg_mr; 1168 } 1169 1170 /** 1171 * Extract pdn of PD object using DevX 1172 * 1173 * @param[in] pd 1174 * Pointer to the DevX PD object. 1175 * @param[out] pdn 1176 * Pointer to the PD object number variable. 1177 * 1178 * @return 1179 * 0 on success, error value otherwise. 1180 */ 1181 int 1182 mlx5_os_get_pdn(void *pd, uint32_t *pdn) 1183 { 1184 if (!pd) 1185 return -EINVAL; 1186 1187 *pdn = ((struct mlx5_pd *)pd)->pdn; 1188 return 0; 1189 } 1190 1191 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0}; 1192