1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 */ 5 6 #include <stdio.h> 7 #include <stdint.h> 8 9 #include <rte_dev.h> 10 #include <rte_pci.h> 11 #include <rte_bus_pci.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_ethdev_pci.h> 14 #include <rte_kvargs.h> 15 #include <rte_string_fns.h> 16 17 #include "vnic_intr.h" 18 #include "vnic_cq.h" 19 #include "vnic_wq.h" 20 #include "vnic_rq.h" 21 #include "vnic_enet.h" 22 #include "enic.h" 23 24 int enic_pmd_logtype; 25 26 /* 27 * The set of PCI devices this driver supports 28 */ 29 #define CISCO_PCI_VENDOR_ID 0x1137 30 static const struct rte_pci_id pci_id_enic_map[] = { 31 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 32 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, 33 {.vendor_id = 0, /* sentinel */}, 34 }; 35 36 /* Supported link speeds of production VIC models */ 37 static const struct vic_speed_capa { 38 uint16_t sub_devid; 39 uint32_t capa; 40 } vic_speed_capa_map[] = { 41 { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */ 42 { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */ 43 { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */ 44 { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */ 45 { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */ 46 { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */ 47 { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */ 48 { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */ 49 { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */ 50 { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */ 51 { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */ 52 { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */ 53 { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */ 54 { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */ 55 { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | 56 ETH_LINK_SPEED_40G }, /* 1440 Mezz */ 57 { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | 58 ETH_LINK_SPEED_40G }, /* 1480 MLOM */ 59 { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */ 60 { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */ 61 { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */ 62 { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */ 63 { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */ 64 { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */ 65 { 0, 0 }, /* End marker */ 66 }; 67 68 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay" 69 #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx" 70 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite" 71 72 RTE_INIT(enicpmd_init_log) 73 { 74 enic_pmd_logtype = rte_log_register("pmd.net.enic"); 75 if (enic_pmd_logtype >= 0) 76 rte_log_set_level(enic_pmd_logtype, RTE_LOG_INFO); 77 } 78 79 static int 80 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev, 81 enum rte_filter_op filter_op, void *arg) 82 { 83 struct enic *enic = pmd_priv(eth_dev); 84 int ret = 0; 85 86 ENICPMD_FUNC_TRACE(); 87 if (filter_op == RTE_ETH_FILTER_NOP) 88 return 0; 89 90 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 91 return -EINVAL; 92 93 switch (filter_op) { 94 case RTE_ETH_FILTER_ADD: 95 case RTE_ETH_FILTER_UPDATE: 96 ret = enic_fdir_add_fltr(enic, 97 (struct rte_eth_fdir_filter *)arg); 98 break; 99 100 case RTE_ETH_FILTER_DELETE: 101 ret = enic_fdir_del_fltr(enic, 102 (struct rte_eth_fdir_filter *)arg); 103 break; 104 105 case RTE_ETH_FILTER_STATS: 106 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg); 107 break; 108 109 case RTE_ETH_FILTER_FLUSH: 110 dev_warning(enic, "unsupported operation %u", filter_op); 111 ret = -ENOTSUP; 112 break; 113 case RTE_ETH_FILTER_INFO: 114 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg); 115 break; 116 default: 117 dev_err(enic, "unknown operation %u", filter_op); 118 ret = -EINVAL; 119 break; 120 } 121 return ret; 122 } 123 124 static int 125 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev, 126 enum rte_filter_type filter_type, 127 enum rte_filter_op filter_op, 128 void *arg) 129 { 130 int ret = 0; 131 132 ENICPMD_FUNC_TRACE(); 133 134 switch (filter_type) { 135 case RTE_ETH_FILTER_GENERIC: 136 if (filter_op != RTE_ETH_FILTER_GET) 137 return -EINVAL; 138 *(const void **)arg = &enic_flow_ops; 139 break; 140 case RTE_ETH_FILTER_FDIR: 141 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg); 142 break; 143 default: 144 dev_warning(enic, "Filter type (%d) not supported", 145 filter_type); 146 ret = -EINVAL; 147 break; 148 } 149 150 return ret; 151 } 152 153 static void enicpmd_dev_tx_queue_release(void *txq) 154 { 155 ENICPMD_FUNC_TRACE(); 156 157 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 158 return; 159 160 enic_free_wq(txq); 161 } 162 163 static int enicpmd_dev_setup_intr(struct enic *enic) 164 { 165 int ret; 166 unsigned int index; 167 168 ENICPMD_FUNC_TRACE(); 169 170 /* Are we done with the init of all the queues? */ 171 for (index = 0; index < enic->cq_count; index++) { 172 if (!enic->cq[index].ctrl) 173 break; 174 } 175 if (enic->cq_count != index) 176 return 0; 177 for (index = 0; index < enic->wq_count; index++) { 178 if (!enic->wq[index].ctrl) 179 break; 180 } 181 if (enic->wq_count != index) 182 return 0; 183 /* check start of packet (SOP) RQs only in case scatter is disabled. */ 184 for (index = 0; index < enic->rq_count; index++) { 185 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl) 186 break; 187 } 188 if (enic->rq_count != index) 189 return 0; 190 191 ret = enic_alloc_intr_resources(enic); 192 if (ret) { 193 dev_err(enic, "alloc intr failed\n"); 194 return ret; 195 } 196 enic_init_vnic_resources(enic); 197 198 ret = enic_setup_finish(enic); 199 if (ret) 200 dev_err(enic, "setup could not be finished\n"); 201 202 return ret; 203 } 204 205 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, 206 uint16_t queue_idx, 207 uint16_t nb_desc, 208 unsigned int socket_id, 209 const struct rte_eth_txconf *tx_conf) 210 { 211 int ret; 212 struct enic *enic = pmd_priv(eth_dev); 213 struct vnic_wq *wq; 214 215 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 216 return -E_RTE_SECONDARY; 217 218 ENICPMD_FUNC_TRACE(); 219 RTE_ASSERT(queue_idx < enic->conf_wq_count); 220 wq = &enic->wq[queue_idx]; 221 wq->offloads = tx_conf->offloads | 222 eth_dev->data->dev_conf.txmode.offloads; 223 eth_dev->data->tx_queues[queue_idx] = (void *)wq; 224 225 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); 226 if (ret) { 227 dev_err(enic, "error in allocating wq\n"); 228 return ret; 229 } 230 231 return enicpmd_dev_setup_intr(enic); 232 } 233 234 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev, 235 uint16_t queue_idx) 236 { 237 struct enic *enic = pmd_priv(eth_dev); 238 239 ENICPMD_FUNC_TRACE(); 240 241 enic_start_wq(enic, queue_idx); 242 243 return 0; 244 } 245 246 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, 247 uint16_t queue_idx) 248 { 249 int ret; 250 struct enic *enic = pmd_priv(eth_dev); 251 252 ENICPMD_FUNC_TRACE(); 253 254 ret = enic_stop_wq(enic, queue_idx); 255 if (ret) 256 dev_err(enic, "error in stopping wq %d\n", queue_idx); 257 258 return ret; 259 } 260 261 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev, 262 uint16_t queue_idx) 263 { 264 struct enic *enic = pmd_priv(eth_dev); 265 266 ENICPMD_FUNC_TRACE(); 267 268 enic_start_rq(enic, queue_idx); 269 270 return 0; 271 } 272 273 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, 274 uint16_t queue_idx) 275 { 276 int ret; 277 struct enic *enic = pmd_priv(eth_dev); 278 279 ENICPMD_FUNC_TRACE(); 280 281 ret = enic_stop_rq(enic, queue_idx); 282 if (ret) 283 dev_err(enic, "error in stopping rq %d\n", queue_idx); 284 285 return ret; 286 } 287 288 static void enicpmd_dev_rx_queue_release(void *rxq) 289 { 290 ENICPMD_FUNC_TRACE(); 291 292 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 293 return; 294 295 enic_free_rq(rxq); 296 } 297 298 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev, 299 uint16_t rx_queue_id) 300 { 301 struct enic *enic = pmd_priv(dev); 302 uint32_t queue_count = 0; 303 struct vnic_cq *cq; 304 uint32_t cq_tail; 305 uint16_t cq_idx; 306 int rq_num; 307 308 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id); 309 cq = &enic->cq[enic_cq_rq(enic, rq_num)]; 310 cq_idx = cq->to_clean; 311 312 cq_tail = ioread32(&cq->ctrl->cq_tail); 313 314 if (cq_tail < cq_idx) 315 cq_tail += cq->ring.desc_count; 316 317 queue_count = cq_tail - cq_idx; 318 319 return queue_count; 320 } 321 322 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 323 uint16_t queue_idx, 324 uint16_t nb_desc, 325 unsigned int socket_id, 326 const struct rte_eth_rxconf *rx_conf, 327 struct rte_mempool *mp) 328 { 329 int ret; 330 struct enic *enic = pmd_priv(eth_dev); 331 332 ENICPMD_FUNC_TRACE(); 333 334 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 335 return -E_RTE_SECONDARY; 336 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count); 337 eth_dev->data->rx_queues[queue_idx] = 338 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; 339 340 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc, 341 rx_conf->rx_free_thresh); 342 if (ret) { 343 dev_err(enic, "error in allocating rq\n"); 344 return ret; 345 } 346 347 return enicpmd_dev_setup_intr(enic); 348 } 349 350 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 351 { 352 struct enic *enic = pmd_priv(eth_dev); 353 uint64_t offloads; 354 355 ENICPMD_FUNC_TRACE(); 356 357 offloads = eth_dev->data->dev_conf.rxmode.offloads; 358 if (mask & ETH_VLAN_STRIP_MASK) { 359 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 360 enic->ig_vlan_strip_en = 1; 361 else 362 enic->ig_vlan_strip_en = 0; 363 } 364 365 if ((mask & ETH_VLAN_FILTER_MASK) && 366 (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 367 dev_warning(enic, 368 "Configuration of VLAN filter is not supported\n"); 369 } 370 371 if ((mask & ETH_VLAN_EXTEND_MASK) && 372 (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) { 373 dev_warning(enic, 374 "Configuration of extended VLAN is not supported\n"); 375 } 376 377 return enic_set_vlan_strip(enic); 378 } 379 380 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) 381 { 382 int ret; 383 int mask; 384 struct enic *enic = pmd_priv(eth_dev); 385 386 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 387 return -E_RTE_SECONDARY; 388 389 ENICPMD_FUNC_TRACE(); 390 ret = enic_set_vnic_res(enic); 391 if (ret) { 392 dev_err(enic, "Set vNIC resource num failed, aborting\n"); 393 return ret; 394 } 395 396 enic->mc_count = 0; 397 enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads & 398 DEV_RX_OFFLOAD_CHECKSUM); 399 /* All vlan offload masks to apply the current settings */ 400 mask = ETH_VLAN_STRIP_MASK | 401 ETH_VLAN_FILTER_MASK | 402 ETH_VLAN_EXTEND_MASK; 403 ret = enicpmd_vlan_offload_set(eth_dev, mask); 404 if (ret) { 405 dev_err(enic, "Failed to configure VLAN offloads\n"); 406 return ret; 407 } 408 /* 409 * Initialize RSS with the default reta and key. If the user key is 410 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the 411 * default key. 412 */ 413 return enic_init_rss_nic_cfg(enic); 414 } 415 416 /* Start the device. 417 * It returns 0 on success. 418 */ 419 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev) 420 { 421 struct enic *enic = pmd_priv(eth_dev); 422 423 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 424 return -E_RTE_SECONDARY; 425 426 ENICPMD_FUNC_TRACE(); 427 return enic_enable(enic); 428 } 429 430 /* 431 * Stop device: disable rx and tx functions to allow for reconfiguring. 432 */ 433 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev) 434 { 435 struct rte_eth_link link; 436 struct enic *enic = pmd_priv(eth_dev); 437 438 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 439 return; 440 441 ENICPMD_FUNC_TRACE(); 442 enic_disable(enic); 443 444 memset(&link, 0, sizeof(link)); 445 rte_eth_linkstatus_set(eth_dev, &link); 446 } 447 448 /* 449 * Stop device. 450 */ 451 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev) 452 { 453 struct enic *enic = pmd_priv(eth_dev); 454 455 ENICPMD_FUNC_TRACE(); 456 enic_remove(enic); 457 } 458 459 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev, 460 __rte_unused int wait_to_complete) 461 { 462 struct enic *enic = pmd_priv(eth_dev); 463 464 ENICPMD_FUNC_TRACE(); 465 return enic_link_update(enic); 466 } 467 468 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev, 469 struct rte_eth_stats *stats) 470 { 471 struct enic *enic = pmd_priv(eth_dev); 472 473 ENICPMD_FUNC_TRACE(); 474 return enic_dev_stats_get(enic, stats); 475 } 476 477 static int enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev) 478 { 479 struct enic *enic = pmd_priv(eth_dev); 480 481 ENICPMD_FUNC_TRACE(); 482 return enic_dev_stats_clear(enic); 483 } 484 485 static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev) 486 { 487 const struct vic_speed_capa *m; 488 struct rte_pci_device *pdev; 489 uint16_t id; 490 491 pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 492 id = pdev->id.subsystem_device_id; 493 for (m = vic_speed_capa_map; m->sub_devid != 0; m++) { 494 if (m->sub_devid == id) 495 return m->capa; 496 } 497 /* 1300 and later models are at least 40G */ 498 if (id >= 0x0100) 499 return ETH_LINK_SPEED_40G; 500 return ETH_LINK_SPEED_10G; 501 } 502 503 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, 504 struct rte_eth_dev_info *device_info) 505 { 506 struct enic *enic = pmd_priv(eth_dev); 507 508 ENICPMD_FUNC_TRACE(); 509 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */ 510 device_info->max_rx_queues = enic->conf_rq_count / 2; 511 device_info->max_tx_queues = enic->conf_wq_count; 512 device_info->min_rx_bufsize = ENIC_MIN_MTU; 513 /* "Max" mtu is not a typo. HW receives packet sizes up to the 514 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is 515 * a hint to the driver to size receive buffers accordingly so that 516 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let 517 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically 518 * ignoring vNIC mtu. 519 */ 520 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu); 521 device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS; 522 device_info->min_mtu = ENIC_MIN_MTU; 523 device_info->max_mtu = enic->max_mtu; 524 device_info->rx_offload_capa = enic->rx_offload_capa; 525 device_info->tx_offload_capa = enic->tx_offload_capa; 526 device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa; 527 device_info->default_rxconf = (struct rte_eth_rxconf) { 528 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH 529 }; 530 device_info->reta_size = enic->reta_size; 531 device_info->hash_key_size = enic->hash_key_size; 532 device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads; 533 device_info->rx_desc_lim = (struct rte_eth_desc_lim) { 534 .nb_max = enic->config.rq_desc_count, 535 .nb_min = ENIC_MIN_RQ_DESCS, 536 .nb_align = ENIC_ALIGN_DESCS, 537 }; 538 device_info->tx_desc_lim = (struct rte_eth_desc_lim) { 539 .nb_max = enic->config.wq_desc_count, 540 .nb_min = ENIC_MIN_WQ_DESCS, 541 .nb_align = ENIC_ALIGN_DESCS, 542 .nb_seg_max = ENIC_TX_XMIT_MAX, 543 .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC, 544 }; 545 device_info->default_rxportconf = (struct rte_eth_dev_portconf) { 546 .burst_size = ENIC_DEFAULT_RX_BURST, 547 .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max, 548 ENIC_DEFAULT_RX_RING_SIZE), 549 .nb_queues = ENIC_DEFAULT_RX_RINGS, 550 }; 551 device_info->default_txportconf = (struct rte_eth_dev_portconf) { 552 .burst_size = ENIC_DEFAULT_TX_BURST, 553 .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max, 554 ENIC_DEFAULT_TX_RING_SIZE), 555 .nb_queues = ENIC_DEFAULT_TX_RINGS, 556 }; 557 device_info->speed_capa = speed_capa_from_pci_id(eth_dev); 558 559 return 0; 560 } 561 562 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) 563 { 564 static const uint32_t ptypes[] = { 565 RTE_PTYPE_L2_ETHER, 566 RTE_PTYPE_L2_ETHER_VLAN, 567 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 568 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 569 RTE_PTYPE_L4_TCP, 570 RTE_PTYPE_L4_UDP, 571 RTE_PTYPE_L4_FRAG, 572 RTE_PTYPE_L4_NONFRAG, 573 RTE_PTYPE_UNKNOWN 574 }; 575 static const uint32_t ptypes_overlay[] = { 576 RTE_PTYPE_L2_ETHER, 577 RTE_PTYPE_L2_ETHER_VLAN, 578 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 579 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 580 RTE_PTYPE_L4_TCP, 581 RTE_PTYPE_L4_UDP, 582 RTE_PTYPE_L4_FRAG, 583 RTE_PTYPE_L4_NONFRAG, 584 RTE_PTYPE_TUNNEL_GRENAT, 585 RTE_PTYPE_INNER_L2_ETHER, 586 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 587 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 588 RTE_PTYPE_INNER_L4_TCP, 589 RTE_PTYPE_INNER_L4_UDP, 590 RTE_PTYPE_INNER_L4_FRAG, 591 RTE_PTYPE_INNER_L4_NONFRAG, 592 RTE_PTYPE_UNKNOWN 593 }; 594 595 if (dev->rx_pkt_burst != enic_dummy_recv_pkts && 596 dev->rx_pkt_burst != NULL) { 597 struct enic *enic = pmd_priv(dev); 598 if (enic->overlay_offload) 599 return ptypes_overlay; 600 else 601 return ptypes; 602 } 603 return NULL; 604 } 605 606 static int enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 607 { 608 struct enic *enic = pmd_priv(eth_dev); 609 int ret; 610 611 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 612 return -E_RTE_SECONDARY; 613 614 ENICPMD_FUNC_TRACE(); 615 616 enic->promisc = 1; 617 ret = enic_add_packet_filter(enic); 618 if (ret != 0) 619 enic->promisc = 0; 620 621 return ret; 622 } 623 624 static int enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 625 { 626 struct enic *enic = pmd_priv(eth_dev); 627 int ret; 628 629 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 630 return -E_RTE_SECONDARY; 631 632 ENICPMD_FUNC_TRACE(); 633 enic->promisc = 0; 634 ret = enic_add_packet_filter(enic); 635 if (ret != 0) 636 enic->promisc = 1; 637 638 return ret; 639 } 640 641 static int enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 642 { 643 struct enic *enic = pmd_priv(eth_dev); 644 int ret; 645 646 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 647 return -E_RTE_SECONDARY; 648 649 ENICPMD_FUNC_TRACE(); 650 enic->allmulti = 1; 651 ret = enic_add_packet_filter(enic); 652 if (ret != 0) 653 enic->allmulti = 0; 654 655 return ret; 656 } 657 658 static int enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 659 { 660 struct enic *enic = pmd_priv(eth_dev); 661 int ret; 662 663 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 664 return -E_RTE_SECONDARY; 665 666 ENICPMD_FUNC_TRACE(); 667 enic->allmulti = 0; 668 ret = enic_add_packet_filter(enic); 669 if (ret != 0) 670 enic->allmulti = 1; 671 672 return ret; 673 } 674 675 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, 676 struct rte_ether_addr *mac_addr, 677 __rte_unused uint32_t index, __rte_unused uint32_t pool) 678 { 679 struct enic *enic = pmd_priv(eth_dev); 680 681 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 682 return -E_RTE_SECONDARY; 683 684 ENICPMD_FUNC_TRACE(); 685 return enic_set_mac_address(enic, mac_addr->addr_bytes); 686 } 687 688 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index) 689 { 690 struct enic *enic = pmd_priv(eth_dev); 691 692 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 693 return; 694 695 ENICPMD_FUNC_TRACE(); 696 if (enic_del_mac_address(enic, index)) 697 dev_err(enic, "del mac addr failed\n"); 698 } 699 700 static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev, 701 struct rte_ether_addr *addr) 702 { 703 struct enic *enic = pmd_priv(eth_dev); 704 int ret; 705 706 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 707 return -E_RTE_SECONDARY; 708 709 ENICPMD_FUNC_TRACE(); 710 ret = enic_del_mac_address(enic, 0); 711 if (ret) 712 return ret; 713 return enic_set_mac_address(enic, addr->addr_bytes); 714 } 715 716 static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add) 717 { 718 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 719 720 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); 721 ENICPMD_LOG(DEBUG, " %s address %s\n", 722 add ? "add" : "remove", mac_str); 723 } 724 725 static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev, 726 struct rte_ether_addr *mc_addr_set, 727 uint32_t nb_mc_addr) 728 { 729 struct enic *enic = pmd_priv(eth_dev); 730 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 731 struct rte_ether_addr *addr; 732 uint32_t i, j; 733 int ret; 734 735 ENICPMD_FUNC_TRACE(); 736 737 /* Validate the given addresses first */ 738 for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) { 739 addr = &mc_addr_set[i]; 740 if (!rte_is_multicast_ether_addr(addr) || 741 rte_is_broadcast_ether_addr(addr)) { 742 rte_ether_format_addr(mac_str, 743 RTE_ETHER_ADDR_FMT_SIZE, addr); 744 ENICPMD_LOG(ERR, " invalid multicast address %s\n", 745 mac_str); 746 return -EINVAL; 747 } 748 } 749 750 /* Flush all if requested */ 751 if (nb_mc_addr == 0 || mc_addr_set == NULL) { 752 ENICPMD_LOG(DEBUG, " flush multicast addresses\n"); 753 for (i = 0; i < enic->mc_count; i++) { 754 addr = &enic->mc_addrs[i]; 755 debug_log_add_del_addr(addr, false); 756 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes); 757 if (ret) 758 return ret; 759 } 760 enic->mc_count = 0; 761 return 0; 762 } 763 764 if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) { 765 ENICPMD_LOG(ERR, " too many multicast addresses: max=%d\n", 766 ENIC_MULTICAST_PERFECT_FILTERS); 767 return -ENOSPC; 768 } 769 /* 770 * devcmd is slow, so apply the difference instead of flushing and 771 * adding everything. 772 * 1. Delete addresses on the NIC but not on the host 773 */ 774 for (i = 0; i < enic->mc_count; i++) { 775 addr = &enic->mc_addrs[i]; 776 for (j = 0; j < nb_mc_addr; j++) { 777 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) 778 break; 779 } 780 if (j < nb_mc_addr) 781 continue; 782 debug_log_add_del_addr(addr, false); 783 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes); 784 if (ret) 785 return ret; 786 } 787 /* 2. Add addresses on the host but not on the NIC */ 788 for (i = 0; i < nb_mc_addr; i++) { 789 addr = &mc_addr_set[i]; 790 for (j = 0; j < enic->mc_count; j++) { 791 if (rte_is_same_ether_addr(addr, &enic->mc_addrs[j])) 792 break; 793 } 794 if (j < enic->mc_count) 795 continue; 796 debug_log_add_del_addr(addr, true); 797 ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes); 798 if (ret) 799 return ret; 800 } 801 /* Keep a copy so we can flush/apply later on.. */ 802 memcpy(enic->mc_addrs, mc_addr_set, 803 nb_mc_addr * sizeof(struct rte_ether_addr)); 804 enic->mc_count = nb_mc_addr; 805 return 0; 806 } 807 808 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) 809 { 810 struct enic *enic = pmd_priv(eth_dev); 811 812 ENICPMD_FUNC_TRACE(); 813 return enic_set_mtu(enic, mtu); 814 } 815 816 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev, 817 struct rte_eth_rss_reta_entry64 818 *reta_conf, 819 uint16_t reta_size) 820 { 821 struct enic *enic = pmd_priv(dev); 822 uint16_t i, idx, shift; 823 824 ENICPMD_FUNC_TRACE(); 825 if (reta_size != ENIC_RSS_RETA_SIZE) { 826 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n", 827 reta_size, ENIC_RSS_RETA_SIZE); 828 return -EINVAL; 829 } 830 831 for (i = 0; i < reta_size; i++) { 832 idx = i / RTE_RETA_GROUP_SIZE; 833 shift = i % RTE_RETA_GROUP_SIZE; 834 if (reta_conf[idx].mask & (1ULL << shift)) 835 reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx( 836 enic->rss_cpu.cpu[i / 4].b[i % 4]); 837 } 838 839 return 0; 840 } 841 842 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev, 843 struct rte_eth_rss_reta_entry64 844 *reta_conf, 845 uint16_t reta_size) 846 { 847 struct enic *enic = pmd_priv(dev); 848 union vnic_rss_cpu rss_cpu; 849 uint16_t i, idx, shift; 850 851 ENICPMD_FUNC_TRACE(); 852 if (reta_size != ENIC_RSS_RETA_SIZE) { 853 dev_err(enic, "reta_update: wrong reta_size. given=%u" 854 " expected=%u\n", 855 reta_size, ENIC_RSS_RETA_SIZE); 856 return -EINVAL; 857 } 858 /* 859 * Start with the current reta and modify it per reta_conf, as we 860 * need to push the entire reta even if we only modify one entry. 861 */ 862 rss_cpu = enic->rss_cpu; 863 for (i = 0; i < reta_size; i++) { 864 idx = i / RTE_RETA_GROUP_SIZE; 865 shift = i % RTE_RETA_GROUP_SIZE; 866 if (reta_conf[idx].mask & (1ULL << shift)) 867 rss_cpu.cpu[i / 4].b[i % 4] = 868 enic_rte_rq_idx_to_sop_idx( 869 reta_conf[idx].reta[shift]); 870 } 871 return enic_set_rss_reta(enic, &rss_cpu); 872 } 873 874 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev, 875 struct rte_eth_rss_conf *rss_conf) 876 { 877 struct enic *enic = pmd_priv(dev); 878 879 ENICPMD_FUNC_TRACE(); 880 return enic_set_rss_conf(enic, rss_conf); 881 } 882 883 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 884 struct rte_eth_rss_conf *rss_conf) 885 { 886 struct enic *enic = pmd_priv(dev); 887 888 ENICPMD_FUNC_TRACE(); 889 if (rss_conf == NULL) 890 return -EINVAL; 891 if (rss_conf->rss_key != NULL && 892 rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) { 893 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u" 894 " expected=%u+\n", 895 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); 896 return -EINVAL; 897 } 898 rss_conf->rss_hf = enic->rss_hf; 899 if (rss_conf->rss_key != NULL) { 900 int i; 901 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) { 902 rss_conf->rss_key[i] = 903 enic->rss_key.key[i / 10].b[i % 10]; 904 } 905 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE; 906 } 907 return 0; 908 } 909 910 static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev, 911 uint16_t rx_queue_id, 912 struct rte_eth_rxq_info *qinfo) 913 { 914 struct enic *enic = pmd_priv(dev); 915 struct vnic_rq *rq_sop; 916 struct vnic_rq *rq_data; 917 struct rte_eth_rxconf *conf; 918 uint16_t sop_queue_idx; 919 uint16_t data_queue_idx; 920 921 ENICPMD_FUNC_TRACE(); 922 sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id); 923 data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id); 924 rq_sop = &enic->rq[sop_queue_idx]; 925 rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */ 926 qinfo->mp = rq_sop->mp; 927 qinfo->scattered_rx = rq_sop->data_queue_enable; 928 qinfo->nb_desc = rq_sop->ring.desc_count; 929 if (qinfo->scattered_rx) 930 qinfo->nb_desc += rq_data->ring.desc_count; 931 conf = &qinfo->conf; 932 memset(conf, 0, sizeof(*conf)); 933 conf->rx_free_thresh = rq_sop->rx_free_thresh; 934 conf->rx_drop_en = 1; 935 /* 936 * Except VLAN stripping (port setting), all the checksum offloads 937 * are always enabled. 938 */ 939 conf->offloads = enic->rx_offload_capa; 940 if (!enic->ig_vlan_strip_en) 941 conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 942 /* rx_thresh and other fields are not applicable for enic */ 943 } 944 945 static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev, 946 uint16_t tx_queue_id, 947 struct rte_eth_txq_info *qinfo) 948 { 949 struct enic *enic = pmd_priv(dev); 950 struct vnic_wq *wq = &enic->wq[tx_queue_id]; 951 952 ENICPMD_FUNC_TRACE(); 953 qinfo->nb_desc = wq->ring.desc_count; 954 memset(&qinfo->conf, 0, sizeof(qinfo->conf)); 955 qinfo->conf.offloads = wq->offloads; 956 /* tx_thresh, and all the other fields are not applicable for enic */ 957 } 958 959 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, 960 uint16_t rx_queue_id) 961 { 962 struct enic *enic = pmd_priv(eth_dev); 963 964 ENICPMD_FUNC_TRACE(); 965 vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); 966 return 0; 967 } 968 969 static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, 970 uint16_t rx_queue_id) 971 { 972 struct enic *enic = pmd_priv(eth_dev); 973 974 ENICPMD_FUNC_TRACE(); 975 vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); 976 return 0; 977 } 978 979 static int udp_tunnel_common_check(struct enic *enic, 980 struct rte_eth_udp_tunnel *tnl) 981 { 982 if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) 983 return -ENOTSUP; 984 if (!enic->overlay_offload) { 985 ENICPMD_LOG(DEBUG, " vxlan (overlay offload) is not " 986 "supported\n"); 987 return -ENOTSUP; 988 } 989 return 0; 990 } 991 992 static int update_vxlan_port(struct enic *enic, uint16_t port) 993 { 994 if (vnic_dev_overlay_offload_cfg(enic->vdev, 995 OVERLAY_CFG_VXLAN_PORT_UPDATE, 996 port)) { 997 ENICPMD_LOG(DEBUG, " failed to update vxlan port\n"); 998 return -EINVAL; 999 } 1000 ENICPMD_LOG(DEBUG, " updated vxlan port to %u\n", port); 1001 enic->vxlan_port = port; 1002 return 0; 1003 } 1004 1005 static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev, 1006 struct rte_eth_udp_tunnel *tnl) 1007 { 1008 struct enic *enic = pmd_priv(eth_dev); 1009 int ret; 1010 1011 ENICPMD_FUNC_TRACE(); 1012 ret = udp_tunnel_common_check(enic, tnl); 1013 if (ret) 1014 return ret; 1015 /* 1016 * The NIC has 1 configurable VXLAN port number. "Adding" a new port 1017 * number replaces it. 1018 */ 1019 if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) { 1020 ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n", 1021 tnl->udp_port); 1022 return -EINVAL; 1023 } 1024 return update_vxlan_port(enic, tnl->udp_port); 1025 } 1026 1027 static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev, 1028 struct rte_eth_udp_tunnel *tnl) 1029 { 1030 struct enic *enic = pmd_priv(eth_dev); 1031 int ret; 1032 1033 ENICPMD_FUNC_TRACE(); 1034 ret = udp_tunnel_common_check(enic, tnl); 1035 if (ret) 1036 return ret; 1037 /* 1038 * Clear the previously set port number and restore the 1039 * hardware default port number. Some drivers disable VXLAN 1040 * offloads when there are no configured port numbers. But 1041 * enic does not do that as VXLAN is part of overlay offload, 1042 * which is tied to inner RSS and TSO. 1043 */ 1044 if (tnl->udp_port != enic->vxlan_port) { 1045 ENICPMD_LOG(DEBUG, " %u is not a configured vxlan port\n", 1046 tnl->udp_port); 1047 return -EINVAL; 1048 } 1049 return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT); 1050 } 1051 1052 static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev, 1053 char *fw_version, size_t fw_size) 1054 { 1055 struct vnic_devcmd_fw_info *info; 1056 struct enic *enic; 1057 int ret; 1058 1059 ENICPMD_FUNC_TRACE(); 1060 if (fw_version == NULL || fw_size <= 0) 1061 return -EINVAL; 1062 enic = pmd_priv(eth_dev); 1063 ret = vnic_dev_fw_info(enic->vdev, &info); 1064 if (ret) 1065 return ret; 1066 snprintf(fw_version, fw_size, "%s %s", 1067 info->fw_version, info->fw_build); 1068 fw_version[fw_size - 1] = '\0'; 1069 return 0; 1070 } 1071 1072 static const struct eth_dev_ops enicpmd_eth_dev_ops = { 1073 .dev_configure = enicpmd_dev_configure, 1074 .dev_start = enicpmd_dev_start, 1075 .dev_stop = enicpmd_dev_stop, 1076 .dev_set_link_up = NULL, 1077 .dev_set_link_down = NULL, 1078 .dev_close = enicpmd_dev_close, 1079 .promiscuous_enable = enicpmd_dev_promiscuous_enable, 1080 .promiscuous_disable = enicpmd_dev_promiscuous_disable, 1081 .allmulticast_enable = enicpmd_dev_allmulticast_enable, 1082 .allmulticast_disable = enicpmd_dev_allmulticast_disable, 1083 .link_update = enicpmd_dev_link_update, 1084 .stats_get = enicpmd_dev_stats_get, 1085 .stats_reset = enicpmd_dev_stats_reset, 1086 .queue_stats_mapping_set = NULL, 1087 .dev_infos_get = enicpmd_dev_info_get, 1088 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, 1089 .mtu_set = enicpmd_mtu_set, 1090 .vlan_filter_set = NULL, 1091 .vlan_tpid_set = NULL, 1092 .vlan_offload_set = enicpmd_vlan_offload_set, 1093 .vlan_strip_queue_set = NULL, 1094 .rx_queue_start = enicpmd_dev_rx_queue_start, 1095 .rx_queue_stop = enicpmd_dev_rx_queue_stop, 1096 .tx_queue_start = enicpmd_dev_tx_queue_start, 1097 .tx_queue_stop = enicpmd_dev_tx_queue_stop, 1098 .rx_queue_setup = enicpmd_dev_rx_queue_setup, 1099 .rx_queue_release = enicpmd_dev_rx_queue_release, 1100 .rx_queue_count = enicpmd_dev_rx_queue_count, 1101 .rx_descriptor_done = NULL, 1102 .tx_queue_setup = enicpmd_dev_tx_queue_setup, 1103 .tx_queue_release = enicpmd_dev_tx_queue_release, 1104 .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable, 1105 .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable, 1106 .rxq_info_get = enicpmd_dev_rxq_info_get, 1107 .txq_info_get = enicpmd_dev_txq_info_get, 1108 .dev_led_on = NULL, 1109 .dev_led_off = NULL, 1110 .flow_ctrl_get = NULL, 1111 .flow_ctrl_set = NULL, 1112 .priority_flow_ctrl_set = NULL, 1113 .mac_addr_add = enicpmd_add_mac_addr, 1114 .mac_addr_remove = enicpmd_remove_mac_addr, 1115 .mac_addr_set = enicpmd_set_mac_addr, 1116 .set_mc_addr_list = enicpmd_set_mc_addr_list, 1117 .filter_ctrl = enicpmd_dev_filter_ctrl, 1118 .reta_query = enicpmd_dev_rss_reta_query, 1119 .reta_update = enicpmd_dev_rss_reta_update, 1120 .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get, 1121 .rss_hash_update = enicpmd_dev_rss_hash_update, 1122 .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add, 1123 .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del, 1124 .fw_version_get = enicpmd_dev_fw_version_get, 1125 }; 1126 1127 static int enic_parse_zero_one(const char *key, 1128 const char *value, 1129 void *opaque) 1130 { 1131 struct enic *enic; 1132 bool b; 1133 1134 enic = (struct enic *)opaque; 1135 if (strcmp(value, "0") == 0) { 1136 b = false; 1137 } else if (strcmp(value, "1") == 0) { 1138 b = true; 1139 } else { 1140 dev_err(enic, "Invalid value for %s" 1141 ": expected=0|1 given=%s\n", key, value); 1142 return -EINVAL; 1143 } 1144 if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0) 1145 enic->disable_overlay = b; 1146 if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0) 1147 enic->enable_avx2_rx = b; 1148 return 0; 1149 } 1150 1151 static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key, 1152 const char *value, 1153 void *opaque) 1154 { 1155 struct enic *enic; 1156 1157 enic = (struct enic *)opaque; 1158 if (strcmp(value, "trunk") == 0) { 1159 /* Trunk mode: always tag */ 1160 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK; 1161 } else if (strcmp(value, "untag") == 0) { 1162 /* Untag default VLAN mode: untag if VLAN = default VLAN */ 1163 enic->ig_vlan_rewrite_mode = 1164 IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN; 1165 } else if (strcmp(value, "priority") == 0) { 1166 /* 1167 * Priority-tag default VLAN mode: priority tag (VLAN header 1168 * with ID=0) if VLAN = default 1169 */ 1170 enic->ig_vlan_rewrite_mode = 1171 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN; 1172 } else if (strcmp(value, "pass") == 0) { 1173 /* Pass through mode: do not touch tags */ 1174 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; 1175 } else { 1176 dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE 1177 ": expected=trunk|untag|priority|pass given=%s\n", 1178 value); 1179 return -EINVAL; 1180 } 1181 return 0; 1182 } 1183 1184 static int enic_check_devargs(struct rte_eth_dev *dev) 1185 { 1186 static const char *const valid_keys[] = { 1187 ENIC_DEVARG_DISABLE_OVERLAY, 1188 ENIC_DEVARG_ENABLE_AVX2_RX, 1189 ENIC_DEVARG_IG_VLAN_REWRITE, 1190 NULL}; 1191 struct enic *enic = pmd_priv(dev); 1192 struct rte_kvargs *kvlist; 1193 1194 ENICPMD_FUNC_TRACE(); 1195 1196 enic->disable_overlay = false; 1197 enic->enable_avx2_rx = false; 1198 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; 1199 if (!dev->device->devargs) 1200 return 0; 1201 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); 1202 if (!kvlist) 1203 return -EINVAL; 1204 if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY, 1205 enic_parse_zero_one, enic) < 0 || 1206 rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX, 1207 enic_parse_zero_one, enic) < 0 || 1208 rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE, 1209 enic_parse_ig_vlan_rewrite, enic) < 0) { 1210 rte_kvargs_free(kvlist); 1211 return -EINVAL; 1212 } 1213 rte_kvargs_free(kvlist); 1214 return 0; 1215 } 1216 1217 /* Initialize the driver 1218 * It returns 0 on success. 1219 */ 1220 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) 1221 { 1222 struct rte_pci_device *pdev; 1223 struct rte_pci_addr *addr; 1224 struct enic *enic = pmd_priv(eth_dev); 1225 int err; 1226 1227 ENICPMD_FUNC_TRACE(); 1228 1229 enic->port_id = eth_dev->data->port_id; 1230 enic->rte_dev = eth_dev; 1231 eth_dev->dev_ops = &enicpmd_eth_dev_ops; 1232 eth_dev->rx_pkt_burst = &enic_recv_pkts; 1233 eth_dev->tx_pkt_burst = &enic_xmit_pkts; 1234 eth_dev->tx_pkt_prepare = &enic_prep_pkts; 1235 /* Let rte_eth_dev_close() release the port resources */ 1236 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 1237 1238 pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 1239 rte_eth_copy_pci_info(eth_dev, pdev); 1240 enic->pdev = pdev; 1241 addr = &pdev->addr; 1242 1243 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", 1244 addr->domain, addr->bus, addr->devid, addr->function); 1245 1246 err = enic_check_devargs(eth_dev); 1247 if (err) 1248 return err; 1249 return enic_probe(enic); 1250 } 1251 1252 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1253 struct rte_pci_device *pci_dev) 1254 { 1255 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic), 1256 eth_enicpmd_dev_init); 1257 } 1258 1259 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev) 1260 { 1261 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 1262 } 1263 1264 static struct rte_pci_driver rte_enic_pmd = { 1265 .id_table = pci_id_enic_map, 1266 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1267 .probe = eth_enic_pci_probe, 1268 .remove = eth_enic_pci_remove, 1269 }; 1270 1271 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd); 1272 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map); 1273 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci"); 1274 RTE_PMD_REGISTER_PARAM_STRING(net_enic, 1275 ENIC_DEVARG_DISABLE_OVERLAY "=0|1 " 1276 ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 " 1277 ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass"); 1278