1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 */ 5 6 #include <stdio.h> 7 #include <stdint.h> 8 9 #include <rte_dev.h> 10 #include <rte_pci.h> 11 #include <rte_bus_pci.h> 12 #include <rte_ethdev_driver.h> 13 #include <rte_ethdev_pci.h> 14 #include <rte_kvargs.h> 15 #include <rte_string_fns.h> 16 17 #include "vnic_intr.h" 18 #include "vnic_cq.h" 19 #include "vnic_wq.h" 20 #include "vnic_rq.h" 21 #include "vnic_enet.h" 22 #include "enic.h" 23 24 int enicpmd_logtype_init; 25 int enicpmd_logtype_flow; 26 27 #define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") 28 29 /* 30 * The set of PCI devices this driver supports 31 */ 32 #define CISCO_PCI_VENDOR_ID 0x1137 33 static const struct rte_pci_id pci_id_enic_map[] = { 34 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 35 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, 36 {.vendor_id = 0, /* sentinel */}, 37 }; 38 39 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay" 40 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite" 41 42 RTE_INIT(enicpmd_init_log) 43 { 44 enicpmd_logtype_init = rte_log_register("pmd.net.enic.init"); 45 if (enicpmd_logtype_init >= 0) 46 rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE); 47 enicpmd_logtype_flow = rte_log_register("pmd.net.enic.flow"); 48 if (enicpmd_logtype_flow >= 0) 49 rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE); 50 } 51 52 static int 53 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev, 54 enum rte_filter_op filter_op, void *arg) 55 { 56 struct enic *enic = pmd_priv(eth_dev); 57 int ret = 0; 58 59 ENICPMD_FUNC_TRACE(); 60 if (filter_op == RTE_ETH_FILTER_NOP) 61 return 0; 62 63 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 64 return -EINVAL; 65 66 switch (filter_op) { 67 case RTE_ETH_FILTER_ADD: 68 case RTE_ETH_FILTER_UPDATE: 69 ret = enic_fdir_add_fltr(enic, 70 (struct rte_eth_fdir_filter *)arg); 71 break; 72 73 case RTE_ETH_FILTER_DELETE: 74 ret = enic_fdir_del_fltr(enic, 75 (struct rte_eth_fdir_filter *)arg); 76 break; 77 78 case RTE_ETH_FILTER_STATS: 79 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg); 80 break; 81 82 case RTE_ETH_FILTER_FLUSH: 83 dev_warning(enic, "unsupported operation %u", filter_op); 84 ret = -ENOTSUP; 85 break; 86 case RTE_ETH_FILTER_INFO: 87 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg); 88 break; 89 default: 90 dev_err(enic, "unknown operation %u", filter_op); 91 ret = -EINVAL; 92 break; 93 } 94 return ret; 95 } 96 97 static int 98 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev, 99 enum rte_filter_type filter_type, 100 enum rte_filter_op filter_op, 101 void *arg) 102 { 103 int ret = 0; 104 105 ENICPMD_FUNC_TRACE(); 106 107 switch (filter_type) { 108 case RTE_ETH_FILTER_GENERIC: 109 if (filter_op != RTE_ETH_FILTER_GET) 110 return -EINVAL; 111 *(const void **)arg = &enic_flow_ops; 112 break; 113 case RTE_ETH_FILTER_FDIR: 114 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg); 115 break; 116 default: 117 dev_warning(enic, "Filter type (%d) not supported", 118 filter_type); 119 ret = -EINVAL; 120 break; 121 } 122 123 return ret; 124 } 125 126 static void enicpmd_dev_tx_queue_release(void *txq) 127 { 128 ENICPMD_FUNC_TRACE(); 129 130 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 131 return; 132 133 enic_free_wq(txq); 134 } 135 136 static int enicpmd_dev_setup_intr(struct enic *enic) 137 { 138 int ret; 139 unsigned int index; 140 141 ENICPMD_FUNC_TRACE(); 142 143 /* Are we done with the init of all the queues? */ 144 for (index = 0; index < enic->cq_count; index++) { 145 if (!enic->cq[index].ctrl) 146 break; 147 } 148 if (enic->cq_count != index) 149 return 0; 150 for (index = 0; index < enic->wq_count; index++) { 151 if (!enic->wq[index].ctrl) 152 break; 153 } 154 if (enic->wq_count != index) 155 return 0; 156 /* check start of packet (SOP) RQs only in case scatter is disabled. */ 157 for (index = 0; index < enic->rq_count; index++) { 158 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl) 159 break; 160 } 161 if (enic->rq_count != index) 162 return 0; 163 164 ret = enic_alloc_intr_resources(enic); 165 if (ret) { 166 dev_err(enic, "alloc intr failed\n"); 167 return ret; 168 } 169 enic_init_vnic_resources(enic); 170 171 ret = enic_setup_finish(enic); 172 if (ret) 173 dev_err(enic, "setup could not be finished\n"); 174 175 return ret; 176 } 177 178 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, 179 uint16_t queue_idx, 180 uint16_t nb_desc, 181 unsigned int socket_id, 182 const struct rte_eth_txconf *tx_conf) 183 { 184 int ret; 185 struct enic *enic = pmd_priv(eth_dev); 186 struct vnic_wq *wq; 187 188 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 189 return -E_RTE_SECONDARY; 190 191 ENICPMD_FUNC_TRACE(); 192 RTE_ASSERT(queue_idx < enic->conf_wq_count); 193 wq = &enic->wq[queue_idx]; 194 wq->offloads = tx_conf->offloads | 195 eth_dev->data->dev_conf.txmode.offloads; 196 eth_dev->data->tx_queues[queue_idx] = (void *)wq; 197 198 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); 199 if (ret) { 200 dev_err(enic, "error in allocating wq\n"); 201 return ret; 202 } 203 204 return enicpmd_dev_setup_intr(enic); 205 } 206 207 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev, 208 uint16_t queue_idx) 209 { 210 struct enic *enic = pmd_priv(eth_dev); 211 212 ENICPMD_FUNC_TRACE(); 213 214 enic_start_wq(enic, queue_idx); 215 216 return 0; 217 } 218 219 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, 220 uint16_t queue_idx) 221 { 222 int ret; 223 struct enic *enic = pmd_priv(eth_dev); 224 225 ENICPMD_FUNC_TRACE(); 226 227 ret = enic_stop_wq(enic, queue_idx); 228 if (ret) 229 dev_err(enic, "error in stopping wq %d\n", queue_idx); 230 231 return ret; 232 } 233 234 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev, 235 uint16_t queue_idx) 236 { 237 struct enic *enic = pmd_priv(eth_dev); 238 239 ENICPMD_FUNC_TRACE(); 240 241 enic_start_rq(enic, queue_idx); 242 243 return 0; 244 } 245 246 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, 247 uint16_t queue_idx) 248 { 249 int ret; 250 struct enic *enic = pmd_priv(eth_dev); 251 252 ENICPMD_FUNC_TRACE(); 253 254 ret = enic_stop_rq(enic, queue_idx); 255 if (ret) 256 dev_err(enic, "error in stopping rq %d\n", queue_idx); 257 258 return ret; 259 } 260 261 static void enicpmd_dev_rx_queue_release(void *rxq) 262 { 263 ENICPMD_FUNC_TRACE(); 264 265 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 266 return; 267 268 enic_free_rq(rxq); 269 } 270 271 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev, 272 uint16_t rx_queue_id) 273 { 274 struct enic *enic = pmd_priv(dev); 275 uint32_t queue_count = 0; 276 struct vnic_cq *cq; 277 uint32_t cq_tail; 278 uint16_t cq_idx; 279 int rq_num; 280 281 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id); 282 cq = &enic->cq[enic_cq_rq(enic, rq_num)]; 283 cq_idx = cq->to_clean; 284 285 cq_tail = ioread32(&cq->ctrl->cq_tail); 286 287 if (cq_tail < cq_idx) 288 cq_tail += cq->ring.desc_count; 289 290 queue_count = cq_tail - cq_idx; 291 292 return queue_count; 293 } 294 295 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 296 uint16_t queue_idx, 297 uint16_t nb_desc, 298 unsigned int socket_id, 299 const struct rte_eth_rxconf *rx_conf, 300 struct rte_mempool *mp) 301 { 302 int ret; 303 struct enic *enic = pmd_priv(eth_dev); 304 305 ENICPMD_FUNC_TRACE(); 306 307 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 308 return -E_RTE_SECONDARY; 309 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count); 310 eth_dev->data->rx_queues[queue_idx] = 311 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)]; 312 313 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc, 314 rx_conf->rx_free_thresh); 315 if (ret) { 316 dev_err(enic, "error in allocating rq\n"); 317 return ret; 318 } 319 320 return enicpmd_dev_setup_intr(enic); 321 } 322 323 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 324 { 325 struct enic *enic = pmd_priv(eth_dev); 326 uint64_t offloads; 327 328 ENICPMD_FUNC_TRACE(); 329 330 offloads = eth_dev->data->dev_conf.rxmode.offloads; 331 if (mask & ETH_VLAN_STRIP_MASK) { 332 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 333 enic->ig_vlan_strip_en = 1; 334 else 335 enic->ig_vlan_strip_en = 0; 336 } 337 338 if ((mask & ETH_VLAN_FILTER_MASK) && 339 (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) { 340 dev_warning(enic, 341 "Configuration of VLAN filter is not supported\n"); 342 } 343 344 if ((mask & ETH_VLAN_EXTEND_MASK) && 345 (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) { 346 dev_warning(enic, 347 "Configuration of extended VLAN is not supported\n"); 348 } 349 350 return enic_set_vlan_strip(enic); 351 } 352 353 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) 354 { 355 int ret; 356 int mask; 357 struct enic *enic = pmd_priv(eth_dev); 358 359 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 360 return -E_RTE_SECONDARY; 361 362 ENICPMD_FUNC_TRACE(); 363 ret = enic_set_vnic_res(enic); 364 if (ret) { 365 dev_err(enic, "Set vNIC resource num failed, aborting\n"); 366 return ret; 367 } 368 369 enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads & 370 DEV_RX_OFFLOAD_CHECKSUM); 371 /* All vlan offload masks to apply the current settings */ 372 mask = ETH_VLAN_STRIP_MASK | 373 ETH_VLAN_FILTER_MASK | 374 ETH_VLAN_EXTEND_MASK; 375 ret = enicpmd_vlan_offload_set(eth_dev, mask); 376 if (ret) { 377 dev_err(enic, "Failed to configure VLAN offloads\n"); 378 return ret; 379 } 380 /* 381 * Initialize RSS with the default reta and key. If the user key is 382 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the 383 * default key. 384 */ 385 return enic_init_rss_nic_cfg(enic); 386 } 387 388 /* Start the device. 389 * It returns 0 on success. 390 */ 391 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev) 392 { 393 struct enic *enic = pmd_priv(eth_dev); 394 395 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 396 return -E_RTE_SECONDARY; 397 398 ENICPMD_FUNC_TRACE(); 399 return enic_enable(enic); 400 } 401 402 /* 403 * Stop device: disable rx and tx functions to allow for reconfiguring. 404 */ 405 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev) 406 { 407 struct rte_eth_link link; 408 struct enic *enic = pmd_priv(eth_dev); 409 410 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 411 return; 412 413 ENICPMD_FUNC_TRACE(); 414 enic_disable(enic); 415 416 memset(&link, 0, sizeof(link)); 417 rte_eth_linkstatus_set(eth_dev, &link); 418 } 419 420 /* 421 * Stop device. 422 */ 423 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev) 424 { 425 struct enic *enic = pmd_priv(eth_dev); 426 427 ENICPMD_FUNC_TRACE(); 428 enic_remove(enic); 429 } 430 431 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev, 432 __rte_unused int wait_to_complete) 433 { 434 struct enic *enic = pmd_priv(eth_dev); 435 436 ENICPMD_FUNC_TRACE(); 437 return enic_link_update(enic); 438 } 439 440 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev, 441 struct rte_eth_stats *stats) 442 { 443 struct enic *enic = pmd_priv(eth_dev); 444 445 ENICPMD_FUNC_TRACE(); 446 return enic_dev_stats_get(enic, stats); 447 } 448 449 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev) 450 { 451 struct enic *enic = pmd_priv(eth_dev); 452 453 ENICPMD_FUNC_TRACE(); 454 enic_dev_stats_clear(enic); 455 } 456 457 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, 458 struct rte_eth_dev_info *device_info) 459 { 460 struct enic *enic = pmd_priv(eth_dev); 461 462 ENICPMD_FUNC_TRACE(); 463 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */ 464 device_info->max_rx_queues = enic->conf_rq_count / 2; 465 device_info->max_tx_queues = enic->conf_wq_count; 466 device_info->min_rx_bufsize = ENIC_MIN_MTU; 467 /* "Max" mtu is not a typo. HW receives packet sizes up to the 468 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is 469 * a hint to the driver to size receive buffers accordingly so that 470 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let 471 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically 472 * ignoring vNIC mtu. 473 */ 474 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu); 475 device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR; 476 device_info->rx_offload_capa = enic->rx_offload_capa; 477 device_info->tx_offload_capa = enic->tx_offload_capa; 478 device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa; 479 device_info->default_rxconf = (struct rte_eth_rxconf) { 480 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH 481 }; 482 device_info->reta_size = enic->reta_size; 483 device_info->hash_key_size = enic->hash_key_size; 484 device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads; 485 device_info->rx_desc_lim = (struct rte_eth_desc_lim) { 486 .nb_max = enic->config.rq_desc_count, 487 .nb_min = ENIC_MIN_RQ_DESCS, 488 .nb_align = ENIC_ALIGN_DESCS, 489 }; 490 device_info->tx_desc_lim = (struct rte_eth_desc_lim) { 491 .nb_max = enic->config.wq_desc_count, 492 .nb_min = ENIC_MIN_WQ_DESCS, 493 .nb_align = ENIC_ALIGN_DESCS, 494 .nb_seg_max = ENIC_TX_XMIT_MAX, 495 .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC, 496 }; 497 device_info->default_rxportconf = (struct rte_eth_dev_portconf) { 498 .burst_size = ENIC_DEFAULT_RX_BURST, 499 .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max, 500 ENIC_DEFAULT_RX_RING_SIZE), 501 .nb_queues = ENIC_DEFAULT_RX_RINGS, 502 }; 503 device_info->default_txportconf = (struct rte_eth_dev_portconf) { 504 .burst_size = ENIC_DEFAULT_TX_BURST, 505 .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max, 506 ENIC_DEFAULT_TX_RING_SIZE), 507 .nb_queues = ENIC_DEFAULT_TX_RINGS, 508 }; 509 } 510 511 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) 512 { 513 static const uint32_t ptypes[] = { 514 RTE_PTYPE_L2_ETHER, 515 RTE_PTYPE_L2_ETHER_VLAN, 516 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, 517 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, 518 RTE_PTYPE_L4_TCP, 519 RTE_PTYPE_L4_UDP, 520 RTE_PTYPE_L4_FRAG, 521 RTE_PTYPE_L4_NONFRAG, 522 RTE_PTYPE_UNKNOWN 523 }; 524 525 if (dev->rx_pkt_burst == enic_recv_pkts || 526 dev->rx_pkt_burst == enic_noscatter_recv_pkts) 527 return ptypes; 528 return NULL; 529 } 530 531 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 532 { 533 struct enic *enic = pmd_priv(eth_dev); 534 535 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 536 return; 537 538 ENICPMD_FUNC_TRACE(); 539 540 enic->promisc = 1; 541 enic_add_packet_filter(enic); 542 } 543 544 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 545 { 546 struct enic *enic = pmd_priv(eth_dev); 547 548 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 549 return; 550 551 ENICPMD_FUNC_TRACE(); 552 enic->promisc = 0; 553 enic_add_packet_filter(enic); 554 } 555 556 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 557 { 558 struct enic *enic = pmd_priv(eth_dev); 559 560 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 561 return; 562 563 ENICPMD_FUNC_TRACE(); 564 enic->allmulti = 1; 565 enic_add_packet_filter(enic); 566 } 567 568 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 569 { 570 struct enic *enic = pmd_priv(eth_dev); 571 572 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 573 return; 574 575 ENICPMD_FUNC_TRACE(); 576 enic->allmulti = 0; 577 enic_add_packet_filter(enic); 578 } 579 580 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, 581 struct ether_addr *mac_addr, 582 __rte_unused uint32_t index, __rte_unused uint32_t pool) 583 { 584 struct enic *enic = pmd_priv(eth_dev); 585 586 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 587 return -E_RTE_SECONDARY; 588 589 ENICPMD_FUNC_TRACE(); 590 return enic_set_mac_address(enic, mac_addr->addr_bytes); 591 } 592 593 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index) 594 { 595 struct enic *enic = pmd_priv(eth_dev); 596 597 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 598 return; 599 600 ENICPMD_FUNC_TRACE(); 601 if (enic_del_mac_address(enic, index)) 602 dev_err(enic, "del mac addr failed\n"); 603 } 604 605 static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev, 606 struct ether_addr *addr) 607 { 608 struct enic *enic = pmd_priv(eth_dev); 609 int ret; 610 611 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 612 return -E_RTE_SECONDARY; 613 614 ENICPMD_FUNC_TRACE(); 615 ret = enic_del_mac_address(enic, 0); 616 if (ret) 617 return ret; 618 return enic_set_mac_address(enic, addr->addr_bytes); 619 } 620 621 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) 622 { 623 struct enic *enic = pmd_priv(eth_dev); 624 625 ENICPMD_FUNC_TRACE(); 626 return enic_set_mtu(enic, mtu); 627 } 628 629 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev, 630 struct rte_eth_rss_reta_entry64 631 *reta_conf, 632 uint16_t reta_size) 633 { 634 struct enic *enic = pmd_priv(dev); 635 uint16_t i, idx, shift; 636 637 ENICPMD_FUNC_TRACE(); 638 if (reta_size != ENIC_RSS_RETA_SIZE) { 639 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n", 640 reta_size, ENIC_RSS_RETA_SIZE); 641 return -EINVAL; 642 } 643 644 for (i = 0; i < reta_size; i++) { 645 idx = i / RTE_RETA_GROUP_SIZE; 646 shift = i % RTE_RETA_GROUP_SIZE; 647 if (reta_conf[idx].mask & (1ULL << shift)) 648 reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx( 649 enic->rss_cpu.cpu[i / 4].b[i % 4]); 650 } 651 652 return 0; 653 } 654 655 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev, 656 struct rte_eth_rss_reta_entry64 657 *reta_conf, 658 uint16_t reta_size) 659 { 660 struct enic *enic = pmd_priv(dev); 661 union vnic_rss_cpu rss_cpu; 662 uint16_t i, idx, shift; 663 664 ENICPMD_FUNC_TRACE(); 665 if (reta_size != ENIC_RSS_RETA_SIZE) { 666 dev_err(enic, "reta_update: wrong reta_size. given=%u" 667 " expected=%u\n", 668 reta_size, ENIC_RSS_RETA_SIZE); 669 return -EINVAL; 670 } 671 /* 672 * Start with the current reta and modify it per reta_conf, as we 673 * need to push the entire reta even if we only modify one entry. 674 */ 675 rss_cpu = enic->rss_cpu; 676 for (i = 0; i < reta_size; i++) { 677 idx = i / RTE_RETA_GROUP_SIZE; 678 shift = i % RTE_RETA_GROUP_SIZE; 679 if (reta_conf[idx].mask & (1ULL << shift)) 680 rss_cpu.cpu[i / 4].b[i % 4] = 681 enic_rte_rq_idx_to_sop_idx( 682 reta_conf[idx].reta[shift]); 683 } 684 return enic_set_rss_reta(enic, &rss_cpu); 685 } 686 687 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev, 688 struct rte_eth_rss_conf *rss_conf) 689 { 690 struct enic *enic = pmd_priv(dev); 691 692 ENICPMD_FUNC_TRACE(); 693 return enic_set_rss_conf(enic, rss_conf); 694 } 695 696 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev, 697 struct rte_eth_rss_conf *rss_conf) 698 { 699 struct enic *enic = pmd_priv(dev); 700 701 ENICPMD_FUNC_TRACE(); 702 if (rss_conf == NULL) 703 return -EINVAL; 704 if (rss_conf->rss_key != NULL && 705 rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) { 706 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u" 707 " expected=%u+\n", 708 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE); 709 return -EINVAL; 710 } 711 rss_conf->rss_hf = enic->rss_hf; 712 if (rss_conf->rss_key != NULL) { 713 int i; 714 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) { 715 rss_conf->rss_key[i] = 716 enic->rss_key.key[i / 10].b[i % 10]; 717 } 718 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE; 719 } 720 return 0; 721 } 722 723 static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev, 724 uint16_t rx_queue_id, 725 struct rte_eth_rxq_info *qinfo) 726 { 727 struct enic *enic = pmd_priv(dev); 728 struct vnic_rq *rq_sop; 729 struct vnic_rq *rq_data; 730 struct rte_eth_rxconf *conf; 731 uint16_t sop_queue_idx; 732 uint16_t data_queue_idx; 733 734 ENICPMD_FUNC_TRACE(); 735 sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id); 736 data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id); 737 rq_sop = &enic->rq[sop_queue_idx]; 738 rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */ 739 qinfo->mp = rq_sop->mp; 740 qinfo->scattered_rx = rq_sop->data_queue_enable; 741 qinfo->nb_desc = rq_sop->ring.desc_count; 742 if (qinfo->scattered_rx) 743 qinfo->nb_desc += rq_data->ring.desc_count; 744 conf = &qinfo->conf; 745 memset(conf, 0, sizeof(*conf)); 746 conf->rx_free_thresh = rq_sop->rx_free_thresh; 747 conf->rx_drop_en = 1; 748 /* 749 * Except VLAN stripping (port setting), all the checksum offloads 750 * are always enabled. 751 */ 752 conf->offloads = enic->rx_offload_capa; 753 if (!enic->ig_vlan_strip_en) 754 conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; 755 /* rx_thresh and other fields are not applicable for enic */ 756 } 757 758 static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev, 759 uint16_t tx_queue_id, 760 struct rte_eth_txq_info *qinfo) 761 { 762 struct enic *enic = pmd_priv(dev); 763 struct vnic_wq *wq = &enic->wq[tx_queue_id]; 764 765 ENICPMD_FUNC_TRACE(); 766 qinfo->nb_desc = wq->ring.desc_count; 767 memset(&qinfo->conf, 0, sizeof(qinfo->conf)); 768 qinfo->conf.offloads = wq->offloads; 769 /* tx_thresh, and all the other fields are not applicable for enic */ 770 } 771 772 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, 773 uint16_t rx_queue_id) 774 { 775 struct enic *enic = pmd_priv(eth_dev); 776 777 ENICPMD_FUNC_TRACE(); 778 vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); 779 return 0; 780 } 781 782 static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, 783 uint16_t rx_queue_id) 784 { 785 struct enic *enic = pmd_priv(eth_dev); 786 787 ENICPMD_FUNC_TRACE(); 788 vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]); 789 return 0; 790 } 791 792 static int udp_tunnel_common_check(struct enic *enic, 793 struct rte_eth_udp_tunnel *tnl) 794 { 795 if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) 796 return -ENOTSUP; 797 if (!enic->overlay_offload) { 798 PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not " 799 "supported\n"); 800 return -ENOTSUP; 801 } 802 return 0; 803 } 804 805 static int update_vxlan_port(struct enic *enic, uint16_t port) 806 { 807 if (vnic_dev_overlay_offload_cfg(enic->vdev, 808 OVERLAY_CFG_VXLAN_PORT_UPDATE, 809 port)) { 810 PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n"); 811 return -EINVAL; 812 } 813 PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port); 814 enic->vxlan_port = port; 815 return 0; 816 } 817 818 static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev, 819 struct rte_eth_udp_tunnel *tnl) 820 { 821 struct enic *enic = pmd_priv(eth_dev); 822 int ret; 823 824 ENICPMD_FUNC_TRACE(); 825 ret = udp_tunnel_common_check(enic, tnl); 826 if (ret) 827 return ret; 828 /* 829 * The NIC has 1 configurable VXLAN port number. "Adding" a new port 830 * number replaces it. 831 */ 832 if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) { 833 PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n", 834 tnl->udp_port); 835 return -EINVAL; 836 } 837 return update_vxlan_port(enic, tnl->udp_port); 838 } 839 840 static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev, 841 struct rte_eth_udp_tunnel *tnl) 842 { 843 struct enic *enic = pmd_priv(eth_dev); 844 int ret; 845 846 ENICPMD_FUNC_TRACE(); 847 ret = udp_tunnel_common_check(enic, tnl); 848 if (ret) 849 return ret; 850 /* 851 * Clear the previously set port number and restore the 852 * hardware default port number. Some drivers disable VXLAN 853 * offloads when there are no configured port numbers. But 854 * enic does not do that as VXLAN is part of overlay offload, 855 * which is tied to inner RSS and TSO. 856 */ 857 if (tnl->udp_port != enic->vxlan_port) { 858 PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n", 859 tnl->udp_port); 860 return -EINVAL; 861 } 862 return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT); 863 } 864 865 static const struct eth_dev_ops enicpmd_eth_dev_ops = { 866 .dev_configure = enicpmd_dev_configure, 867 .dev_start = enicpmd_dev_start, 868 .dev_stop = enicpmd_dev_stop, 869 .dev_set_link_up = NULL, 870 .dev_set_link_down = NULL, 871 .dev_close = enicpmd_dev_close, 872 .promiscuous_enable = enicpmd_dev_promiscuous_enable, 873 .promiscuous_disable = enicpmd_dev_promiscuous_disable, 874 .allmulticast_enable = enicpmd_dev_allmulticast_enable, 875 .allmulticast_disable = enicpmd_dev_allmulticast_disable, 876 .link_update = enicpmd_dev_link_update, 877 .stats_get = enicpmd_dev_stats_get, 878 .stats_reset = enicpmd_dev_stats_reset, 879 .queue_stats_mapping_set = NULL, 880 .dev_infos_get = enicpmd_dev_info_get, 881 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, 882 .mtu_set = enicpmd_mtu_set, 883 .vlan_filter_set = NULL, 884 .vlan_tpid_set = NULL, 885 .vlan_offload_set = enicpmd_vlan_offload_set, 886 .vlan_strip_queue_set = NULL, 887 .rx_queue_start = enicpmd_dev_rx_queue_start, 888 .rx_queue_stop = enicpmd_dev_rx_queue_stop, 889 .tx_queue_start = enicpmd_dev_tx_queue_start, 890 .tx_queue_stop = enicpmd_dev_tx_queue_stop, 891 .rx_queue_setup = enicpmd_dev_rx_queue_setup, 892 .rx_queue_release = enicpmd_dev_rx_queue_release, 893 .rx_queue_count = enicpmd_dev_rx_queue_count, 894 .rx_descriptor_done = NULL, 895 .tx_queue_setup = enicpmd_dev_tx_queue_setup, 896 .tx_queue_release = enicpmd_dev_tx_queue_release, 897 .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable, 898 .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable, 899 .rxq_info_get = enicpmd_dev_rxq_info_get, 900 .txq_info_get = enicpmd_dev_txq_info_get, 901 .dev_led_on = NULL, 902 .dev_led_off = NULL, 903 .flow_ctrl_get = NULL, 904 .flow_ctrl_set = NULL, 905 .priority_flow_ctrl_set = NULL, 906 .mac_addr_add = enicpmd_add_mac_addr, 907 .mac_addr_remove = enicpmd_remove_mac_addr, 908 .mac_addr_set = enicpmd_set_mac_addr, 909 .filter_ctrl = enicpmd_dev_filter_ctrl, 910 .reta_query = enicpmd_dev_rss_reta_query, 911 .reta_update = enicpmd_dev_rss_reta_update, 912 .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get, 913 .rss_hash_update = enicpmd_dev_rss_hash_update, 914 .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add, 915 .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del, 916 }; 917 918 static int enic_parse_disable_overlay(__rte_unused const char *key, 919 const char *value, 920 void *opaque) 921 { 922 struct enic *enic; 923 924 enic = (struct enic *)opaque; 925 if (strcmp(value, "0") == 0) { 926 enic->disable_overlay = false; 927 } else if (strcmp(value, "1") == 0) { 928 enic->disable_overlay = true; 929 } else { 930 dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY 931 ": expected=0|1 given=%s\n", value); 932 return -EINVAL; 933 } 934 return 0; 935 } 936 937 static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key, 938 const char *value, 939 void *opaque) 940 { 941 struct enic *enic; 942 943 enic = (struct enic *)opaque; 944 if (strcmp(value, "trunk") == 0) { 945 /* Trunk mode: always tag */ 946 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK; 947 } else if (strcmp(value, "untag") == 0) { 948 /* Untag default VLAN mode: untag if VLAN = default VLAN */ 949 enic->ig_vlan_rewrite_mode = 950 IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN; 951 } else if (strcmp(value, "priority") == 0) { 952 /* 953 * Priority-tag default VLAN mode: priority tag (VLAN header 954 * with ID=0) if VLAN = default 955 */ 956 enic->ig_vlan_rewrite_mode = 957 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN; 958 } else if (strcmp(value, "pass") == 0) { 959 /* Pass through mode: do not touch tags */ 960 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; 961 } else { 962 dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE 963 ": expected=trunk|untag|priority|pass given=%s\n", 964 value); 965 return -EINVAL; 966 } 967 return 0; 968 } 969 970 static int enic_check_devargs(struct rte_eth_dev *dev) 971 { 972 static const char *const valid_keys[] = { 973 ENIC_DEVARG_DISABLE_OVERLAY, 974 ENIC_DEVARG_IG_VLAN_REWRITE, 975 NULL}; 976 struct enic *enic = pmd_priv(dev); 977 struct rte_kvargs *kvlist; 978 979 ENICPMD_FUNC_TRACE(); 980 981 enic->disable_overlay = false; 982 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU; 983 if (!dev->device->devargs) 984 return 0; 985 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); 986 if (!kvlist) 987 return -EINVAL; 988 if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY, 989 enic_parse_disable_overlay, enic) < 0 || 990 rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE, 991 enic_parse_ig_vlan_rewrite, enic) < 0) { 992 rte_kvargs_free(kvlist); 993 return -EINVAL; 994 } 995 rte_kvargs_free(kvlist); 996 return 0; 997 } 998 999 struct enic *enicpmd_list_head = NULL; 1000 /* Initialize the driver 1001 * It returns 0 on success. 1002 */ 1003 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) 1004 { 1005 struct rte_pci_device *pdev; 1006 struct rte_pci_addr *addr; 1007 struct enic *enic = pmd_priv(eth_dev); 1008 int err; 1009 1010 ENICPMD_FUNC_TRACE(); 1011 1012 enic->port_id = eth_dev->data->port_id; 1013 enic->rte_dev = eth_dev; 1014 eth_dev->dev_ops = &enicpmd_eth_dev_ops; 1015 eth_dev->rx_pkt_burst = &enic_recv_pkts; 1016 eth_dev->tx_pkt_burst = &enic_xmit_pkts; 1017 eth_dev->tx_pkt_prepare = &enic_prep_pkts; 1018 1019 pdev = RTE_ETH_DEV_TO_PCI(eth_dev); 1020 rte_eth_copy_pci_info(eth_dev, pdev); 1021 enic->pdev = pdev; 1022 addr = &pdev->addr; 1023 1024 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", 1025 addr->domain, addr->bus, addr->devid, addr->function); 1026 1027 err = enic_check_devargs(eth_dev); 1028 if (err) 1029 return err; 1030 return enic_probe(enic); 1031 } 1032 1033 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 1034 struct rte_pci_device *pci_dev) 1035 { 1036 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic), 1037 eth_enicpmd_dev_init); 1038 } 1039 1040 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev) 1041 { 1042 return rte_eth_dev_pci_generic_remove(pci_dev, NULL); 1043 } 1044 1045 static struct rte_pci_driver rte_enic_pmd = { 1046 .id_table = pci_id_enic_map, 1047 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 1048 .probe = eth_enic_pci_probe, 1049 .remove = eth_enic_pci_remove, 1050 }; 1051 1052 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd); 1053 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map); 1054 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci"); 1055 RTE_PMD_REGISTER_PARAM_STRING(net_enic, 1056 ENIC_DEVARG_DISABLE_OVERLAY "=0|1 " 1057 ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass"); 1058