1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 */ 5 6 #include <stdio.h> 7 #include <stdint.h> 8 9 #include <rte_dev.h> 10 #include <rte_pci.h> 11 #include <rte_bus_pci.h> 12 #include <ethdev_driver.h> 13 #include <ethdev_pci.h> 14 #include <rte_geneve.h> 15 #include <rte_kvargs.h> 16 #include <rte_string_fns.h> 17 18 #include "vnic_intr.h" 19 #include "vnic_cq.h" 20 #include "vnic_wq.h" 21 #include "vnic_rq.h" 22 #include "vnic_enet.h" 23 #include "enic.h" 24 25 /* 26 * The set of PCI devices this driver supports 27 */ 28 #define CISCO_PCI_VENDOR_ID 0x1137 29 static const struct rte_pci_id pci_id_enic_map[] = { 30 {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET)}, 31 {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)}, 32 {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_SN)}, 33 {.vendor_id = 0, /* sentinel */}, 34 }; 35 36 /* Supported link speeds of production VIC models */ 37 static const struct vic_speed_capa { 38 uint16_t sub_devid; 39 uint32_t capa; 40 } vic_speed_capa_map[] = { 41 { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */ 42 { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */ 43 { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */ 44 { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */ 45 { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */ 46 { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */ 47 { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */ 48 { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */ 49 { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */ 50 { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */ 51 { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */ 52 { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */ 53 { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */ 54 { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */ 55 { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | 56 ETH_LINK_SPEED_40G }, /* 1440 Mezz */ 57 { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | 58 ETH_LINK_SPEED_40G }, /* 1480 MLOM */ 59 { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */ 60 { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */ 61 { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */ 62 { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */ 63 { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */ 64 { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */ 65 { 0, 0 }, /* End marker */ 66 }; 67 68 #define ENIC_DEVARG_CQ64 "cq64" 69 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay" 70 #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx" 71 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite" 72 #define ENIC_DEVARG_REPRESENTOR "representor" 73 74 RTE_LOG_REGISTER_DEFAULT(enic_pmd_logtype, INFO); 75 76 static int 77 enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev, 78 const struct rte_flow_ops **ops) 79 { 80 struct enic *enic = pmd_priv(dev); 81 82 ENICPMD_FUNC_TRACE(); 83 84 if (enic->flow_filter_mode == FILTER_FLOWMAN) 85 *ops = &enic_fm_flow_ops; 86 else 87 *ops = &enic_flow_ops; 88 return 0; 89 } 90 91 static void enicpmd_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 92 { 93 void *txq = dev->data->tx_queues[qid]; 94 95 ENICPMD_FUNC_TRACE(); 96 97 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 98 return; 99 100 enic_free_wq(txq); 101 } 102 103 static int enicpmd_dev_setup_intr(struct enic *enic) 104 { 105 int ret; 106 unsigned int index; 107 108 ENICPMD_FUNC_TRACE(); 109 110 /* Are we done with the init of all the queues? */ 111 for (index = 0; index < enic->cq_count; index++) { 112 if (!enic->cq[index].ctrl) 113 break; 114 } 115 if (enic->cq_count != index) 116 return 0; 117 for (index = 0; index < enic->wq_count; index++) { 118 if (!enic->wq[index].ctrl) 119 break; 120 } 121 if (enic->wq_count != index) 122 return 0; 123 /* check start of packet (SOP) RQs only in case scatter is disabled. */ 124 for (index = 0; index < enic->rq_count; index++) { 125 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl) 126 break; 127 } 128 if (enic->rq_count != index) 129 return 0; 130 131 ret = enic_alloc_intr_resources(enic); 132 if (ret) { 133 dev_err(enic, "alloc intr failed\n"); 134 return ret; 135 } 136 enic_init_vnic_resources(enic); 137 138 ret = enic_setup_finish(enic); 139 if (ret) 140 dev_err(enic, "setup could not be finished\n"); 141 142 return ret; 143 } 144 145 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, 146 uint16_t queue_idx, 147 uint16_t nb_desc, 148 unsigned int socket_id, 149 const struct rte_eth_txconf *tx_conf) 150 { 151 int ret; 152 struct enic *enic = pmd_priv(eth_dev); 153 struct vnic_wq *wq; 154 155 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 156 return -E_RTE_SECONDARY; 157 158 ENICPMD_FUNC_TRACE(); 159 RTE_ASSERT(queue_idx < enic->conf_wq_count); 160 wq = &enic->wq[queue_idx]; 161 wq->offloads = tx_conf->offloads | 162 eth_dev->data->dev_conf.txmode.offloads; 163 eth_dev->data->tx_queues[queue_idx] = (void *)wq; 164 165 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); 166 if (ret) { 167 dev_err(enic, "error in allocating wq\n"); 168 return ret; 169 } 170 171 return enicpmd_dev_setup_intr(enic); 172 } 173 174 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev, 175 uint16_t queue_idx) 176 { 177 struct enic *enic = pmd_priv(eth_dev); 178 179 ENICPMD_FUNC_TRACE(); 180 181 enic_start_wq(enic, queue_idx); 182 183 return 0; 184 } 185 186 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, 187 uint16_t queue_idx) 188 { 189 int ret; 190 struct enic *enic = pmd_priv(eth_dev); 191 192 ENICPMD_FUNC_TRACE(); 193 194 ret = enic_stop_wq(enic, queue_idx); 195 if (ret) 196 dev_err(enic, "error in stopping wq %d\n", queue_idx); 197 198 return ret; 199 } 200 201 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev, 202 uint16_t queue_idx) 203 { 204 struct enic *enic = pmd_priv(eth_dev); 205 206 ENICPMD_FUNC_TRACE(); 207 208 enic_start_rq(enic, queue_idx); 209 210 return 0; 211 } 212 213 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, 214 uint16_t queue_idx) 215 { 216 int ret; 217 struct enic *enic = pmd_priv(eth_dev); 218 219 ENICPMD_FUNC_TRACE(); 220 221 ret = enic_stop_rq(enic, queue_idx); 222 if (ret) 223 dev_err(enic, "error in stopping rq %d\n", queue_idx); 224 225 return ret; 226 } 227 228 static void enicpmd_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 229 { 230 void *rxq = dev->data->rx_queues[qid]; 231 232 ENICPMD_FUNC_TRACE(); 233 234 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 235 return; 236 237 enic_free_rq(rxq); 238 } 239 240 static uint32_t enicpmd_dev_rx_queue_count(void *rx_queue) 241 { 242 struct enic *enic; 243 struct vnic_rq *sop_rq; 244 uint32_t queue_count = 0; 245 struct vnic_cq *cq; 246 uint32_t cq_tail; 247 uint16_t cq_idx; 248 249 sop_rq = rx_queue; 250 enic = vnic_dev_priv(sop_rq->vdev); 251 cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; 252 cq_idx = cq->to_clean; 253 254 cq_tail = ioread32(&cq->ctrl->cq_tail); 255 256 if (cq_tail < cq_idx) 257 cq_tail += cq->ring.desc_count; 258 259 queue_count = cq_tail - cq_idx; 260 261 return queue_count; 262 } 263 264 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 265 uint16_t queue_idx, 266 uint16_t nb_desc, 267 unsigned int socket_id, 268 const struct rte_eth_rxconf *rx_conf, 269 struct rte_mempool *mp) 270 { 271 int ret; 272 struct enic *enic = pmd_priv(eth_dev); 273 274 ENICPMD_FUNC_TRACE(); 275 276 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 277 return -E_RTE_SECONDARY; 278 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count); 279 eth_dev->data->rx_queues[queue_idx] = 280 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; 281 282 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc, 283 rx_conf->rx_free_thresh); 284 if (ret) { 285 dev_err(enic, "error in allocating rq\n"); 286 return ret; 287 } 288 289 return enicpmd_dev_setup_intr(enic); 290 } 291 292 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 293 { 294 struct enic *enic = pmd_priv(eth_dev); 295 uint64_t offloads; 296 297 ENICPMD_FUNC_TRACE(); 298 299 offloads = eth_dev->data->dev_conf.rxmode.offloads; 300 if (mask & ETH_VLAN_STRIP_MASK) { 301 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 302 enic->ig_vlan_strip_en = 1; 303 else 304 enic->ig_vlan_strip_en = 0; 305 } 306 307 return enic_set_vlan_strip(enic); 308 } 309 310 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) 311 { 312 int ret; 313 int mask; 314 struct enic *enic = pmd_priv(eth_dev); 315 316 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 317 return -E_RTE_SECONDARY; 318 319 ENICPMD_FUNC_TRACE(); 320 ret = enic_set_vnic_res(enic); 321 if (ret) { 322 dev_err(enic, "Set vNIC resource num failed, aborting\n"); 323 return ret; 324 } 325 326 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) 327 eth_dev->data->dev_conf.rxmode.offloads |= 328 DEV_RX_OFFLOAD_RSS_HASH; 329 330 enic->mc_count = 0; 331 enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads & 332 DEV_RX_OFFLOAD_CHECKSUM); 333 /* All vlan offload masks to apply the current settings */ 334 mask = ETH_VLAN_STRIP_MASK | 335 ETH_VLAN_FILTER_MASK | 336 ETH_VLAN_EXTEND_MASK; 337 ret = enicpmd_vlan_offload_set(eth_dev, mask); 338 if (ret) { 339 dev_err(enic, "Failed to configure VLAN offloads\n"); 340 return ret; 341 } 342 /* 343 * Initialize RSS with the default reta and key. If the user key is 344 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the 345 * default key. 346 */ 347 return enic_init_rss_nic_cfg(enic); 348 } 349 350 /* Start the device. 351 * It returns 0 on success. 352 */ 353 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev) 354 { 355 struct enic *enic = pmd_priv(eth_dev); 356 357 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 358 return -E_RTE_SECONDARY; 359 360 ENICPMD_FUNC_TRACE(); 361 return enic_enable(enic); 362 } 363 364 /* 365 * Stop device: disable rx and tx functions to allow for reconfiguring. 366 */ 367 static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev) 368 { 369 struct rte_eth_link link; 370 struct enic *enic = pmd_priv(eth_dev); 371 372 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 373 return 0; 374 375 ENICPMD_FUNC_TRACE(); 376 enic_disable(enic); 377 378 memset(&link, 0, sizeof(link)); 379 rte_eth_linkstatus_set(eth_dev, &link); 380 381 return 0; 382 } 383 384 /* 385 * Stop device. 386 */ 387 static int enicpmd_dev_close(struct rte_eth_dev *eth_dev) 388 { 389 struct enic *enic = pmd_priv(eth_dev); 390 391 ENICPMD_FUNC_TRACE(); 392 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 393 return 0; 394 395 enic_remove(enic); 396 397 return 0; 398 } 399 400 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev, 401 __rte_unused int wait_to_complete) 402 { 403 ENICPMD_FUNC_TRACE(); 404 return enic_link_update(eth_dev); 405 } 406 407 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev, 408 struct rte_eth_stats *stats) 409 { 410 struct enic *enic = pmd_priv(eth_dev); 411 412 ENICPMD_FUNC_TRACE(); 413 return enic_dev_stats_get(enic, stats); 414 } 415 416 static int enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev) 417 { 418 struct enic *enic = pmd_priv(eth_dev); 419 420 ENICPMD_FUNC_TRACE(); 421 return enic_dev_stats_clear(enic); 422 } 423 424 static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev) 425 { 426 const struct vic_speed_capa *m; 427 struct rte_pci_device *pdev; 428 uint16_t id; 429 430 pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 431 id = pdev->id.subsystem_device_id; 432 for (m = vic_speed_capa_map; m->sub_devid != 0; m++) { 433 if (m->sub_devid == id) 434 return m->capa; 435 } 436 /* 1300 and later models are at least 40G */ 437 if (id >= 0x0100) 438 return ETH_LINK_SPEED_40G; 439 /* VFs have subsystem id 0, check device id */ 440 if (id == 0) { 441 /* Newer VF implies at least 40G model */ 442 if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN) 443 return ETH_LINK_SPEED_40G; 444 } 445 return ETH_LINK_SPEED_10G; 446 } 447 448 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, 449 struct rte_eth_dev_info *device_info) 450 { 451 struct enic *enic = pmd_priv(eth_dev); 452 453 ENICPMD_FUNC_TRACE(); 454 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */ 455 device_info->max_rx_queues = enic->conf_rq_count / 2; 456 device_info->max_tx_queues = enic->conf_wq_count; 457 device_info->min_rx_bufsize = ENIC_MIN_MTU; 458 /* "Max" mtu is not a typo. HW receives packet sizes up to the 459 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is 460 * a hint to the driver to size receive buffers accordingly so that 461 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let 462 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically 463 * ignoring vNIC mtu. 464 */ 465 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu); 466 device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS; 467 device_info->min_mtu = ENIC_MIN_MTU; 468 device_info->max_mtu = enic->max_mtu; 469 device_info->rx_offload_capa = enic->rx_offload_capa; 470 device_info->tx_offload_capa = enic->tx_offload_capa; 471 device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa; 472 device_info->default_rxconf = (struct rte_eth_rxconf) { 473 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH 474 }; 475 device_info->reta_size = enic->reta_size; 476 device_info->hash_key_size = enic->hash_key_size; 477 device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads; 478 device_info->rx_desc_lim = (struct rte_eth_desc_lim) { 479 .nb_max = enic->config.rq_desc_count, 480 .nb_min = ENIC_MIN_RQ_DESCS, 481 .nb_align = ENIC_ALIGN_DESCS, 482 }; 483 device_info->tx_desc_lim = (struct rte_eth_desc_lim) { 484 .nb_max = enic->config.wq_desc_count, 485 .nb_min = ENIC_MIN_WQ_DESCS, 486 .nb_align = ENIC_ALIGN_DESCS, 487 .nb_seg_max = ENIC_TX_XMIT_MAX, 488 .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC, 489 }; 490 device_info->default_rxportconf = (struct rte_eth_dev_portconf) { 491 .burst_size = ENIC_DEFAULT_RX_BURST, 492 .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max, 493 ENIC_DEFAULT_RX_RING_SIZE), 494 .nb_queues = ENIC_DEFAULT_RX_RINGS, 495 }; 496 device_info->default_txportconf = (struct rte_eth_dev_portconf) { 497 .burst_size = ENIC_DEFAULT_TX_BURST, 498 .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max, 499 ENIC_DEFAULT_TX_RING_SIZE), 500 .nb_queues = ENIC_DEFAULT_TX_RINGS, 501 }; 502 device_info->speed_capa = speed_capa_from_pci_id(eth_dev); 503 504 return 0; 505 } 506 507 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) 508 { 509 static const uint32_t ptypes[] = { 510 RTE_PTYPE_L2_ETHER, 511 RTE_PTYPE_L2_ETHER_VLAN, 512 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 513 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 514 RTE_PTYPE_L4_TCP, 515 RTE_PTYPE_L4_UDP, 516 RTE_PTYPE_L4_FRAG, 517 RTE_PTYPE_L4_NONFRAG, 518 RTE_PTYPE_UNKNOWN 519 }; 520 static const uint32_t ptypes_overlay[] = { 521 RTE_PTYPE_L2_ETHER, 522 RTE_PTYPE_L2_ETHER_VLAN, 523 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 524 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 525 RTE_PTYPE_L4_TCP, 526 RTE_PTYPE_L4_UDP, 527 RTE_PTYPE_L4_FRAG, 528 RTE_PTYPE_L4_NONFRAG, 529 RTE_PTYPE_TUNNEL_GRENAT, 530 RTE_PTYPE_INNER_L2_ETHER, 531 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, 532 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, 533 RTE_PTYPE_INNER_L4_TCP, 534 RTE_PTYPE_INNER_L4_UDP, 535 RTE_PTYPE_INNER_L4_FRAG, 536 RTE_PTYPE_INNER_L4_NONFRAG, 537 RTE_PTYPE_UNKNOWN 538 }; 539 540 if (dev->rx_pkt_burst != enic_dummy_recv_pkts && 541 dev->rx_pkt_burst != NULL) { 542 struct enic *enic = pmd_priv(dev); 543 if (enic->overlay_offload) 544 return ptypes_overlay; 545 else 546 return ptypes; 547 } 548 return NULL; 549 } 550 551 static int enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 552 { 553 struct enic *enic = pmd_priv(eth_dev); 554 int ret; 555 556 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 557 return -E_RTE_SECONDARY; 558 559 ENICPMD_FUNC_TRACE(); 560 561 enic->promisc = 1; 562 ret = enic_add_packet_filter(enic); 563 if (ret != 0) 564 enic->promisc = 0; 565 566 return ret; 567 } 568 569 static int enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 570 { 571 struct enic *enic = pmd_priv(eth_dev); 572 int ret; 573 574 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 575 return -E_RTE_SECONDARY; 576 577 ENICPMD_FUNC_TRACE(); 578 enic->promisc = 0; 579 ret = enic_add_packet_filter(enic); 580 if (ret != 0) 581 enic->promisc = 1; 582 583 return ret; 584 } 585 586 static int enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 587 { 588 struct enic *enic = pmd_priv(eth_dev); 589 int ret; 590 591 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 592 return -E_RTE_SECONDARY; 593 594 ENICPMD_FUNC_TRACE(); 595 enic->allmulti = 1; 596 ret = enic_add_packet_filter(enic); 597 if (ret != 0) 598 enic->allmulti = 0; 599 600 return ret; 601 } 602 603 static int enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 604 { 605 struct enic *enic = pmd_priv(eth_dev); 606 int ret; 607 608 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 609 return -E_RTE_SECONDARY; 610 611 ENICPMD_FUNC_TRACE(); 612 enic->allmulti = 0; 613 ret = enic_add_packet_filter(enic); 614 if (ret != 0) 615 enic->allmulti = 1; 616 617 return ret; 618 } 619 620 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, 621 struct rte_ether_addr *mac_addr, 622 __rte_unused uint32_t index, __rte_unused uint32_t pool) 623 { 624 struct enic *enic = pmd_priv(eth_dev); 625 626 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 627 return -E_RTE_SECONDARY; 628 629 ENICPMD_FUNC_TRACE(); 630 return enic_set_mac_address(enic, mac_addr->addr_bytes); 631 } 632 633 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index) 634 { 635 struct enic *enic = pmd_priv(eth_dev); 636 637 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 638 return; 639 640 ENICPMD_FUNC_TRACE(); 641 if (enic_del_mac_address(enic, index)) 642 dev_err(enic, "del mac addr failed\n"); 643 } 644 645 static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev, 646 struct rte_ether_addr *addr) 647 { 648 struct enic *enic = pmd_priv(eth_dev); 649 int ret; 650 651 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 652 return -E_RTE_SECONDARY; 653 654 ENICPMD_FUNC_TRACE(); 655 ret = enic_del_mac_address(enic, 0); 656 if (ret) 657 return ret; 658 return enic_set_mac_address(enic, addr->addr_bytes); 659 } 660 661 static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add) 662 { 663 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 664 665 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr); 666 ENICPMD_LOG(DEBUG, " %s address %s\n", 667 add ? "add" : "remove", mac_str); 668 } 669 670 static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev, 671 struct rte_ether_addr *mc_addr_set, 672 uint32_t nb_mc_addr) 673 { 674 struct enic *enic = pmd_priv(eth_dev); 675 char mac_str[RTE_ETHER_ADDR_FMT_SIZE]; 676 struct rte_ether_addr *addr; 677 uint32_t i, j; 678 int ret; 679 680 ENICPMD_FUNC_TRACE(); 681 682 /* Validate the given addresses first */ 683 for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) { 684 addr = &mc_addr_set[i]; 685 if (!rte_is_multicast_ether_addr(addr) || 686 rte_is_broadcast_ether_addr(addr)) { 687 rte_ether_format_addr(mac_str, 688 RTE_ETHER_ADDR_FMT_SIZE, addr); 689 ENICPMD_LOG(ERR, " invalid multicast address %s\n", 690 mac_str); 691 return -EINVAL; 692 } 693 } 694 695 /* Flush all if requested */ 696 if (nb_mc_addr == 0 || mc_addr_set == NULL) { 697 ENICPMD_LOG(DEBUG, " flush multicast addresses\n"); 698 for (i = 0; i < enic->mc_count; i++) { 699 addr = &enic->mc_addrs[i]; 700 debug_log_add_del_addr(addr, false); 701 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes); 702 if (ret) 703 return ret; 704 } 705 enic->mc_count = 0; 706 return 0; 707 } 708 709 if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) { 710 ENICPMD_LOG(ERR, " too many multicast addresses: max=%d\n", 711 ENIC_MULTICAST_PERFECT_FILTERS); 712 return -ENOSPC; 713 } 714 /* 715 * devcmd is slow, so apply the difference instead of flushing and 716 * adding everything. 717 * 1. Delete addresses on the NIC but not on the host 718 */ 719 for (i = 0; i < enic->mc_count; i++) { 720 addr = &enic->mc_addrs[i]; 721 for (j = 0; j < nb_mc_addr; j++) { 722 if (rte_is_same_ether_addr(addr, &mc_addr_set[j])) 723 break; 724 } 725 if (j < nb_mc_addr) 726 continue; 727 debug_log_add_del_addr(addr, false); 728 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes); 729 if (ret) 730 return ret; 731 } 732 /* 2. Add addresses on the host but not on the NIC */ 733 for (i = 0; i < nb_mc_addr; i++) { 734 addr = &mc_addr_set[i]; 735 for (j = 0; j < enic->mc_count; j++) { 736 if (rte_is_same_ether_addr(addr, &enic->mc_addrs[j])) 737 break; 738 } 739 if (j < enic->mc_count) 740 continue; 741 debug_log_add_del_addr(addr, true); 742 ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes); 743 if (ret) 744 return ret; 745 } 746 /* Keep a copy so we can flush/apply later on.. */ 747 memcpy(enic->mc_addrs, mc_addr_set, 748 nb_mc_addr * sizeof(struct rte_ether_addr)); 749 enic->mc_count = nb_mc_addr; 750 return 0; 751 } 752 753 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) 754 { 755 struct enic *enic = pmd_priv(eth_dev); 756 757 ENICPMD_FUNC_TRACE(); 758 return enic_set_mtu(enic, mtu); 759 } 760 761 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev, 762 struct rte_eth_rss_reta_entry64 763 *reta_conf, 764 uint16_t reta_size) 765 { 766 struct enic *enic = pmd_priv(dev); 767 uint16_t i, idx, shift; 768 769 ENICPMD_FUNC_TRACE(); 770 if (reta_size != ENIC_RSS_RETA_SIZE) { 771 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n", 772 reta_size, ENIC_RSS_RETA_SIZE); 773 return -EINVAL; 774 } 775 776 for (i = 0; i < reta_size; i++) { 777 idx = i / RTE_RETA_GROUP_SIZE; 778 shift = i % RTE_RETA_GROUP_SIZE; 779 if (reta_conf[idx].mask & (1ULL << shift)) 780 reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx( 781 enic->rss_cpu.cpu[i / 4].b[i % 4]); 782 } 783 784 return 0; 785 } 786 787 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev, 788 struct rte_eth_rss_reta_entry64 789 *reta_conf, 790 uint16_t reta_size) 791 { 792 struct enic *enic = pmd_priv(dev); 793 union vnic_rss_cpu rss_cpu; 794 uint16_t i, idx, shift; 795 796 ENICPMD_FUNC_TRACE(); 797 if (reta_size != ENIC_RSS_RETA_SIZE) { 798 dev_err(enic, "reta_update: wrong reta_size. given=%u" 799 " expected=%u\n", 800 reta_size, ENIC_RSS_RETA_SIZE); 801 return -EINVAL; 802 } 803 /* 804 * Start with the current reta and modify it per reta_conf, as we 805 * need to push the entire reta even if we only modify one entry. 806 */ 807 rss_cpu = enic->rss_cpu; 808 for (i = 0; i < reta_size; i++) { 809 idx = i / RTE_RETA_GROUP_SIZE; 810 shift = i % RTE_RETA_GROUP_SIZE; 811 if (reta_conf[idx].mask & (1ULL << shift)) 812 rss_cpu.cpu[i / 4].b[i % 4] = 813 enic_rte_rq_idx_to_sop_idx( 814 reta_conf[idx].reta[shift]); 815 } 816 return enic_set_rss_reta(enic, &rss_cpu); 817 } 818 819 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev, 820 struct rte_eth_rss_conf *rss_conf) 821 { 822 struct enic *enic = pmd_priv(dev); 823 824 ENICPMD_FUNC_TRACE(); 825 return enic_set_rss_conf(enic, rss_conf); 826 } 827 828 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 829 struct rte_eth_rss_conf *rss_conf) 830 { 831 struct enic *enic = pmd_priv(dev); 832 833 ENICPMD_FUNC_TRACE(); 834 if (rss_conf == NULL) 835 return -EINVAL; 836 if (rss_conf->rss_key != NULL && 837 rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) { 838 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u" 839 " expected=%u+\n", 840 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); 841 return -EINVAL; 842 } 843 rss_conf->rss_hf = enic->rss_hf; 844 if (rss_conf->rss_key != NULL) { 845 int i; 846 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) { 847 rss_conf->rss_key[i] = 848 enic->rss_key.key[i / 10].b[i % 10]; 849 } 850 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE; 851 } 852 return 0; 853 } 854 855 static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev, 856 uint16_t rx_queue_id, 857 struct rte_eth_rxq_info *qinfo) 858 { 859 struct enic *enic = pmd_priv(dev); 860 struct vnic_rq *rq_sop; 861 struct vnic_rq *rq_data; 862 struct rte_eth_rxconf *conf; 863 uint16_t sop_queue_idx; 864 uint16_t data_queue_idx; 865 866 ENICPMD_FUNC_TRACE(); 867 sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id); 868 data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic); 869 rq_sop = &enic->rq[sop_queue_idx]; 870 rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */ 871 qinfo->mp = rq_sop->mp; 872 qinfo->scattered_rx = rq_sop->data_queue_enable; 873 qinfo->nb_desc = rq_sop->ring.desc_count; 874 if (qinfo->scattered_rx) 875 qinfo->nb_desc += rq_data->ring.desc_count; 876 conf = &qinfo->conf; 877 memset(conf, 0, sizeof(*conf)); 878 conf->rx_free_thresh = rq_sop->rx_free_thresh; 879 conf->rx_drop_en = 1; 880 /* 881 * Except VLAN stripping (port setting), all the checksum offloads 882 * are always enabled. 883 */ 884 conf->offloads = enic->rx_offload_capa; 885 if (!enic->ig_vlan_strip_en) 886 conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 887 /* rx_thresh and other fields are not applicable for enic */ 888 } 889 890 static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev, 891 uint16_t tx_queue_id, 892 struct rte_eth_txq_info *qinfo) 893 { 894 struct enic *enic = pmd_priv(dev); 895 struct vnic_wq *wq = &enic->wq[tx_queue_id]; 896 897 ENICPMD_FUNC_TRACE(); 898 qinfo->nb_desc = wq->ring.desc_count; 899 memset(&qinfo->conf, 0, sizeof(qinfo->conf)); 900 qinfo->conf.offloads = wq->offloads; 901 /* tx_thresh, and all the other fields are not applicable for enic */ 902 } 903 904 static int enicpmd_dev_rx_burst_mode_get(struct rte_eth_dev *dev, 905 __rte_unused uint16_t queue_id, 906 struct rte_eth_burst_mode *mode) 907 { 908 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst; 909 struct enic *enic = pmd_priv(dev); 910 const char *info_str = NULL; 911 int ret = -EINVAL; 912 913 ENICPMD_FUNC_TRACE(); 914 if (enic->use_noscatter_vec_rx_handler) 915 info_str = "Vector AVX2 No Scatter"; 916 else if (pkt_burst == enic_noscatter_recv_pkts) 917 info_str = "Scalar No Scatter"; 918 else if (pkt_burst == enic_recv_pkts) 919 info_str = "Scalar"; 920 else if (pkt_burst == enic_recv_pkts_64) 921 info_str = "Scalar 64B Completion"; 922 if (info_str) { 923 strlcpy(mode->info, info_str, sizeof(mode->info)); 924 ret = 0; 925 } 926 return ret; 927 } 928 929 static int enicpmd_dev_tx_burst_mode_get(struct rte_eth_dev *dev, 930 __rte_unused uint16_t queue_id, 931 struct rte_eth_burst_mode *mode) 932 { 933 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst; 934 const char *info_str = NULL; 935 int ret = -EINVAL; 936 937 ENICPMD_FUNC_TRACE(); 938 if (pkt_burst == enic_simple_xmit_pkts) 939 info_str = "Scalar Simplified"; 940 else if (pkt_burst == enic_xmit_pkts) 941 info_str = "Scalar"; 942 if (info_str) { 943 strlcpy(mode->info, info_str, sizeof(mode->info)); 944 ret = 0; 945 } 946 return ret; 947 } 948 949 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, 950 uint16_t rx_queue_id) 951 { 952 struct enic *enic = pmd_priv(eth_dev); 953 954 ENICPMD_FUNC_TRACE(); 955 vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); 956 return 0; 957 } 958 959 static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, 960 uint16_t rx_queue_id) 961 { 962 struct enic *enic = pmd_priv(eth_dev); 963 964 ENICPMD_FUNC_TRACE(); 965 vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); 966 return 0; 967 } 968 969 static int udp_tunnel_common_check(struct enic *enic, 970 struct rte_eth_udp_tunnel *tnl) 971 { 972 if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN && 973 tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE) 974 return -ENOTSUP; 975 if (!enic->overlay_offload) { 976 ENICPMD_LOG(DEBUG, " overlay offload is not supported\n"); 977 return -ENOTSUP; 978 } 979 return 0; 980 } 981 982 static int update_tunnel_port(struct enic *enic, uint16_t port, bool vxlan) 983 { 984 uint8_t cfg; 985 986 cfg = vxlan ? OVERLAY_CFG_VXLAN_PORT_UPDATE : 987 OVERLAY_CFG_GENEVE_PORT_UPDATE; 988 if (vnic_dev_overlay_offload_cfg(enic->vdev, cfg, port)) { 989 ENICPMD_LOG(DEBUG, " failed to update tunnel port\n"); 990 return -EINVAL; 991 } 992 ENICPMD_LOG(DEBUG, " updated %s port to %u\n", 993 vxlan ? "vxlan" : "geneve", port); 994 if (vxlan) 995 enic->vxlan_port = port; 996 else 997 enic->geneve_port = port; 998 return 0; 999 } 1000 1001 static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev, 1002 struct rte_eth_udp_tunnel *tnl) 1003 { 1004 struct enic *enic = pmd_priv(eth_dev); 1005 uint16_t port; 1006 bool vxlan; 1007 int ret; 1008 1009 ENICPMD_FUNC_TRACE(); 1010 ret = udp_tunnel_common_check(enic, tnl); 1011 if (ret) 1012 return ret; 1013 vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN); 1014 if (vxlan) 1015 port = enic->vxlan_port; 1016 else 1017 port = enic->geneve_port; 1018 /* 1019 * The NIC has 1 configurable port number per tunnel type. 1020 * "Adding" a new port number replaces it. 1021 */ 1022 if (tnl->udp_port == port || tnl->udp_port == 0) { 1023 ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n", 1024 tnl->udp_port); 1025 return -EINVAL; 1026 } 1027 return update_tunnel_port(enic, tnl->udp_port, vxlan); 1028 } 1029 1030 static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev, 1031 struct rte_eth_udp_tunnel *tnl) 1032 { 1033 struct enic *enic = pmd_priv(eth_dev); 1034 uint16_t port; 1035 bool vxlan; 1036 int ret; 1037 1038 ENICPMD_FUNC_TRACE(); 1039 ret = udp_tunnel_common_check(enic, tnl); 1040 if (ret) 1041 return ret; 1042 vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN); 1043 if (vxlan) 1044 port = enic->vxlan_port; 1045 else 1046 port = enic->geneve_port; 1047 /* 1048 * Clear the previously set port number and restore the 1049 * hardware default port number. Some drivers disable VXLAN 1050 * offloads when there are no configured port numbers. But 1051 * enic does not do that as VXLAN is part of overlay offload, 1052 * which is tied to inner RSS and TSO. 1053 */ 1054 if (tnl->udp_port != port) { 1055 ENICPMD_LOG(DEBUG, " %u is not a configured tunnel port\n", 1056 tnl->udp_port); 1057 return -EINVAL; 1058 } 1059 port = vxlan ? RTE_VXLAN_DEFAULT_PORT : RTE_GENEVE_DEFAULT_PORT; 1060 return update_tunnel_port(enic, port, vxlan); 1061 } 1062 1063 static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev, 1064 char *fw_version, size_t fw_size) 1065 { 1066 struct vnic_devcmd_fw_info *info; 1067 struct enic *enic; 1068 int ret; 1069 1070 ENICPMD_FUNC_TRACE(); 1071 1072 enic = pmd_priv(eth_dev); 1073 ret = vnic_dev_fw_info(enic->vdev, &info); 1074 if (ret) 1075 return ret; 1076 ret = snprintf(fw_version, fw_size, "%s %s", 1077 info->fw_version, info->fw_build); 1078 if (ret < 0) 1079 return -EINVAL; 1080 1081 ret += 1; /* add the size of '\0' */ 1082 if (fw_size < (size_t)ret) 1083 return ret; 1084 else 1085 return 0; 1086 } 1087 1088 static const struct eth_dev_ops enicpmd_eth_dev_ops = { 1089 .dev_configure = enicpmd_dev_configure, 1090 .dev_start = enicpmd_dev_start, 1091 .dev_stop = enicpmd_dev_stop, 1092 .dev_set_link_up = NULL, 1093 .dev_set_link_down = NULL, 1094 .dev_close = enicpmd_dev_close, 1095 .promiscuous_enable = enicpmd_dev_promiscuous_enable, 1096 .promiscuous_disable = enicpmd_dev_promiscuous_disable, 1097 .allmulticast_enable = enicpmd_dev_allmulticast_enable, 1098 .allmulticast_disable = enicpmd_dev_allmulticast_disable, 1099 .link_update = enicpmd_dev_link_update, 1100 .stats_get = enicpmd_dev_stats_get, 1101 .stats_reset = enicpmd_dev_stats_reset, 1102 .queue_stats_mapping_set = NULL, 1103 .dev_infos_get = enicpmd_dev_info_get, 1104 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, 1105 .mtu_set = enicpmd_mtu_set, 1106 .vlan_filter_set = NULL, 1107 .vlan_tpid_set = NULL, 1108 .vlan_offload_set = enicpmd_vlan_offload_set, 1109 .vlan_strip_queue_set = NULL, 1110 .rx_queue_start = enicpmd_dev_rx_queue_start, 1111 .rx_queue_stop = enicpmd_dev_rx_queue_stop, 1112 .tx_queue_start = enicpmd_dev_tx_queue_start, 1113 .tx_queue_stop = enicpmd_dev_tx_queue_stop, 1114 .rx_queue_setup = enicpmd_dev_rx_queue_setup, 1115 .rx_queue_release = enicpmd_dev_rx_queue_release, 1116 .tx_queue_setup = enicpmd_dev_tx_queue_setup, 1117 .tx_queue_release = enicpmd_dev_tx_queue_release, 1118 .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable, 1119 .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable, 1120 .rxq_info_get = enicpmd_dev_rxq_info_get, 1121 .txq_info_get = enicpmd_dev_txq_info_get, 1122 .rx_burst_mode_get = enicpmd_dev_rx_burst_mode_get, 1123 .tx_burst_mode_get = enicpmd_dev_tx_burst_mode_get, 1124 .dev_led_on = NULL, 1125 .dev_led_off = NULL, 1126 .flow_ctrl_get = NULL, 1127 .flow_ctrl_set = NULL, 1128 .priority_flow_ctrl_set = NULL, 1129 .mac_addr_add = enicpmd_add_mac_addr, 1130 .mac_addr_remove = enicpmd_remove_mac_addr, 1131 .mac_addr_set = enicpmd_set_mac_addr, 1132 .set_mc_addr_list = enicpmd_set_mc_addr_list, 1133 .flow_ops_get = enicpmd_dev_flow_ops_get, 1134 .reta_query = enicpmd_dev_rss_reta_query, 1135 .reta_update = enicpmd_dev_rss_reta_update, 1136 .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get, 1137 .rss_hash_update = enicpmd_dev_rss_hash_update, 1138 .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add, 1139 .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del, 1140 .fw_version_get = enicpmd_dev_fw_version_get, 1141 }; 1142 1143 static int enic_parse_zero_one(const char *key, 1144 const char *value, 1145 void *opaque) 1146 { 1147 struct enic *enic; 1148 bool b; 1149 1150 enic = (struct enic *)opaque; 1151 if (strcmp(value, "0") == 0) { 1152 b = false; 1153 } else if (strcmp(value, "1") == 0) { 1154 b = true; 1155 } else { 1156 dev_err(enic, "Invalid value for %s" 1157 ": expected=0|1 given=%s\n", key, value); 1158 return -EINVAL; 1159 } 1160 if (strcmp(key, ENIC_DEVARG_CQ64) == 0) 1161 enic->cq64_request = b; 1162 if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0) 1163 enic->disable_overlay = b; 1164 if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0) 1165 enic->enable_avx2_rx = b; 1166 return 0; 1167 } 1168 1169 static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key, 1170 const char *value, 1171 void *opaque) 1172 { 1173 struct enic *enic; 1174 1175 enic = (struct enic *)opaque; 1176 if (strcmp(value, "trunk") == 0) { 1177 /* Trunk mode: always tag */ 1178 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK; 1179 } else if (strcmp(value, "untag") == 0) { 1180 /* Untag default VLAN mode: untag if VLAN = default VLAN */ 1181 enic->ig_vlan_rewrite_mode = 1182 IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN; 1183 } else if (strcmp(value, "priority") == 0) { 1184 /* 1185 * Priority-tag default VLAN mode: priority tag (VLAN header 1186 * with ID=0) if VLAN = default 1187 */ 1188 enic->ig_vlan_rewrite_mode = 1189 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN; 1190 } else if (strcmp(value, "pass") == 0) { 1191 /* Pass through mode: do not touch tags */ 1192 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; 1193 } else { 1194 dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE 1195 ": expected=trunk|untag|priority|pass given=%s\n", 1196 value); 1197 return -EINVAL; 1198 } 1199 return 0; 1200 } 1201 1202 static int enic_check_devargs(struct rte_eth_dev *dev) 1203 { 1204 static const char *const valid_keys[] = { 1205 ENIC_DEVARG_CQ64, 1206 ENIC_DEVARG_DISABLE_OVERLAY, 1207 ENIC_DEVARG_ENABLE_AVX2_RX, 1208 ENIC_DEVARG_IG_VLAN_REWRITE, 1209 ENIC_DEVARG_REPRESENTOR, 1210 NULL}; 1211 struct enic *enic = pmd_priv(dev); 1212 struct rte_kvargs *kvlist; 1213 1214 ENICPMD_FUNC_TRACE(); 1215 1216 enic->cq64_request = true; /* Use 64B entry if available */ 1217 enic->disable_overlay = false; 1218 enic->enable_avx2_rx = false; 1219 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; 1220 if (!dev->device->devargs) 1221 return 0; 1222 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); 1223 if (!kvlist) 1224 return -EINVAL; 1225 if (rte_kvargs_process(kvlist, ENIC_DEVARG_CQ64, 1226 enic_parse_zero_one, enic) < 0 || 1227 rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY, 1228 enic_parse_zero_one, enic) < 0 || 1229 rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX, 1230 enic_parse_zero_one, enic) < 0 || 1231 rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE, 1232 enic_parse_ig_vlan_rewrite, enic) < 0) { 1233 rte_kvargs_free(kvlist); 1234 return -EINVAL; 1235 } 1236 rte_kvargs_free(kvlist); 1237 return 0; 1238 } 1239 1240 /* Initialize the driver for PF */ 1241 static int eth_enic_dev_init(struct rte_eth_dev *eth_dev, 1242 void *init_params __rte_unused) 1243 { 1244 struct rte_pci_device *pdev; 1245 struct rte_pci_addr *addr; 1246 struct enic *enic = pmd_priv(eth_dev); 1247 int err; 1248 1249 ENICPMD_FUNC_TRACE(); 1250 eth_dev->dev_ops = &enicpmd_eth_dev_ops; 1251 eth_dev->rx_queue_count = enicpmd_dev_rx_queue_count; 1252 eth_dev->rx_pkt_burst = &enic_recv_pkts; 1253 eth_dev->tx_pkt_burst = &enic_xmit_pkts; 1254 eth_dev->tx_pkt_prepare = &enic_prep_pkts; 1255 if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1256 enic_pick_tx_handler(eth_dev); 1257 enic_pick_rx_handler(eth_dev); 1258 return 0; 1259 } 1260 /* Only the primary sets up adapter and other data in shared memory */ 1261 enic->port_id = eth_dev->data->port_id; 1262 enic->rte_dev = eth_dev; 1263 enic->dev_data = eth_dev->data; 1264 1265 pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 1266 rte_eth_copy_pci_info(eth_dev, pdev); 1267 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; 1268 enic->pdev = pdev; 1269 addr = &pdev->addr; 1270 1271 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", 1272 addr->domain, addr->bus, addr->devid, addr->function); 1273 1274 err = enic_check_devargs(eth_dev); 1275 if (err) 1276 return err; 1277 err = enic_probe(enic); 1278 if (!err && enic->fm) { 1279 err = enic_fm_allocate_switch_domain(enic); 1280 if (err) 1281 ENICPMD_LOG(ERR, "failed to allocate switch domain id"); 1282 } 1283 return err; 1284 } 1285 1286 static int eth_enic_dev_uninit(struct rte_eth_dev *eth_dev) 1287 { 1288 struct enic *enic = pmd_priv(eth_dev); 1289 int err; 1290 1291 ENICPMD_FUNC_TRACE(); 1292 eth_dev->device = NULL; 1293 eth_dev->intr_handle = NULL; 1294 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1295 return 0; 1296 err = rte_eth_switch_domain_free(enic->switch_domain_id); 1297 if (err) 1298 ENICPMD_LOG(WARNING, "failed to free switch domain: %d", err); 1299 return 0; 1300 } 1301 1302 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1303 struct rte_pci_device *pci_dev) 1304 { 1305 char name[RTE_ETH_NAME_MAX_LEN]; 1306 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 }; 1307 struct rte_eth_dev *pf_ethdev; 1308 struct enic *pf_enic; 1309 int i, retval; 1310 1311 ENICPMD_FUNC_TRACE(); 1312 if (pci_dev->device.devargs) { 1313 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args, 1314 ð_da); 1315 if (retval) 1316 return retval; 1317 } 1318 if (eth_da.nb_representor_ports > 0 && 1319 eth_da.type != RTE_ETH_REPRESENTOR_VF) { 1320 ENICPMD_LOG(ERR, "unsupported representor type: %s\n", 1321 pci_dev->device.devargs->args); 1322 return -ENOTSUP; 1323 } 1324 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name, 1325 sizeof(struct enic), 1326 eth_dev_pci_specific_init, pci_dev, 1327 eth_enic_dev_init, NULL); 1328 if (retval || eth_da.nb_representor_ports < 1) 1329 return retval; 1330 1331 /* Probe VF representor */ 1332 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1333 if (pf_ethdev == NULL) 1334 return -ENODEV; 1335 /* Representors require flowman */ 1336 pf_enic = pmd_priv(pf_ethdev); 1337 if (pf_enic->fm == NULL) { 1338 ENICPMD_LOG(ERR, "VF representors require flowman"); 1339 return -ENOTSUP; 1340 } 1341 /* 1342 * For now representors imply switchdev, as firmware does not support 1343 * legacy mode SR-IOV 1344 */ 1345 pf_enic->switchdev_mode = 1; 1346 /* Calculate max VF ID before initializing representor*/ 1347 pf_enic->max_vf_id = 0; 1348 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1349 pf_enic->max_vf_id = RTE_MAX(pf_enic->max_vf_id, 1350 eth_da.representor_ports[i]); 1351 } 1352 for (i = 0; i < eth_da.nb_representor_ports; i++) { 1353 struct enic_vf_representor representor; 1354 1355 representor.vf_id = eth_da.representor_ports[i]; 1356 representor.switch_domain_id = 1357 pmd_priv(pf_ethdev)->switch_domain_id; 1358 representor.pf = pmd_priv(pf_ethdev); 1359 snprintf(name, sizeof(name), "net_%s_representor_%d", 1360 pci_dev->device.name, eth_da.representor_ports[i]); 1361 retval = rte_eth_dev_create(&pci_dev->device, name, 1362 sizeof(struct enic_vf_representor), NULL, NULL, 1363 enic_vf_representor_init, &representor); 1364 if (retval) { 1365 ENICPMD_LOG(ERR, "failed to create enic vf representor %s", 1366 name); 1367 return retval; 1368 } 1369 } 1370 return 0; 1371 } 1372 1373 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev) 1374 { 1375 struct rte_eth_dev *ethdev; 1376 1377 ENICPMD_FUNC_TRACE(); 1378 ethdev = rte_eth_dev_allocated(pci_dev->device.name); 1379 if (!ethdev) 1380 return -ENODEV; 1381 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR) 1382 return rte_eth_dev_destroy(ethdev, enic_vf_representor_uninit); 1383 else 1384 return rte_eth_dev_destroy(ethdev, eth_enic_dev_uninit); 1385 } 1386 1387 static struct rte_pci_driver rte_enic_pmd = { 1388 .id_table = pci_id_enic_map, 1389 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1390 .probe = eth_enic_pci_probe, 1391 .remove = eth_enic_pci_remove, 1392 }; 1393 1394 int dev_is_enic(struct rte_eth_dev *dev) 1395 { 1396 return dev->device->driver == &rte_enic_pmd.driver; 1397 } 1398 1399 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd); 1400 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map); 1401 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci"); 1402 RTE_PMD_REGISTER_PARAM_STRING(net_enic, 1403 ENIC_DEVARG_CQ64 "=0|1" 1404 ENIC_DEVARG_DISABLE_OVERLAY "=0|1 " 1405 ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 " 1406 ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass"); 1407