1 /* 2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * Copyright (c) 2014, Cisco Systems, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <stdio.h> 36 #include <stdint.h> 37 38 #include <rte_dev.h> 39 #include <rte_pci.h> 40 #include <rte_ethdev.h> 41 #include <rte_string_fns.h> 42 43 #include "vnic_intr.h" 44 #include "vnic_cq.h" 45 #include "vnic_wq.h" 46 #include "vnic_rq.h" 47 #include "vnic_enet.h" 48 #include "enic.h" 49 50 #ifdef RTE_LIBRTE_ENIC_DEBUG 51 #define ENICPMD_FUNC_TRACE() \ 52 RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__) 53 #else 54 #define ENICPMD_FUNC_TRACE() (void)0 55 #endif 56 57 /* 58 * The set of PCI devices this driver supports 59 */ 60 static const struct rte_pci_id pci_id_enic_map[] = { 61 #define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, 62 #ifndef PCI_VENDOR_ID_CISCO 63 #define PCI_VENDOR_ID_CISCO 0x1137 64 #endif 65 #include "rte_pci_dev_ids.h" 66 RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) 67 RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) 68 {.vendor_id = 0, /* Sentinal */}, 69 }; 70 71 static int 72 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev, 73 enum rte_filter_op filter_op, void *arg) 74 { 75 struct enic *enic = pmd_priv(eth_dev); 76 int ret = 0; 77 78 ENICPMD_FUNC_TRACE(); 79 if (filter_op == RTE_ETH_FILTER_NOP) 80 return 0; 81 82 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 83 return -EINVAL; 84 85 switch (filter_op) { 86 case RTE_ETH_FILTER_ADD: 87 case RTE_ETH_FILTER_UPDATE: 88 ret = enic_fdir_add_fltr(enic, 89 (struct rte_eth_fdir_filter *)arg); 90 break; 91 92 case RTE_ETH_FILTER_DELETE: 93 ret = enic_fdir_del_fltr(enic, 94 (struct rte_eth_fdir_filter *)arg); 95 break; 96 97 case RTE_ETH_FILTER_STATS: 98 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg); 99 break; 100 101 case RTE_ETH_FILTER_FLUSH: 102 case RTE_ETH_FILTER_INFO: 103 dev_warning(enic, "unsupported operation %u", filter_op); 104 ret = -ENOTSUP; 105 break; 106 default: 107 dev_err(enic, "unknown operation %u", filter_op); 108 ret = -EINVAL; 109 break; 110 } 111 return ret; 112 } 113 114 static int 115 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev, 116 enum rte_filter_type filter_type, 117 enum rte_filter_op filter_op, 118 void *arg) 119 { 120 int ret = -EINVAL; 121 122 if (RTE_ETH_FILTER_FDIR == filter_type) 123 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg); 124 else 125 dev_warning(enic, "Filter type (%d) not supported", 126 filter_type); 127 128 return ret; 129 } 130 131 static void enicpmd_dev_tx_queue_release(void *txq) 132 { 133 ENICPMD_FUNC_TRACE(); 134 enic_free_wq(txq); 135 } 136 137 static int enicpmd_dev_setup_intr(struct enic *enic) 138 { 139 int ret; 140 unsigned int index; 141 142 ENICPMD_FUNC_TRACE(); 143 144 /* Are we done with the init of all the queues? */ 145 for (index = 0; index < enic->cq_count; index++) { 146 if (!enic->cq[index].ctrl) 147 break; 148 } 149 150 if (enic->cq_count != index) 151 return 0; 152 153 ret = enic_alloc_intr_resources(enic); 154 if (ret) { 155 dev_err(enic, "alloc intr failed\n"); 156 return ret; 157 } 158 enic_init_vnic_resources(enic); 159 160 ret = enic_setup_finish(enic); 161 if (ret) 162 dev_err(enic, "setup could not be finished\n"); 163 164 return ret; 165 } 166 167 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, 168 uint16_t queue_idx, 169 uint16_t nb_desc, 170 unsigned int socket_id, 171 __rte_unused const struct rte_eth_txconf *tx_conf) 172 { 173 int ret; 174 struct enic *enic = pmd_priv(eth_dev); 175 176 ENICPMD_FUNC_TRACE(); 177 if (queue_idx >= ENIC_WQ_MAX) { 178 dev_err(enic, 179 "Max number of TX queues exceeded. Max is %d\n", 180 ENIC_WQ_MAX); 181 return -EINVAL; 182 } 183 184 eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx]; 185 186 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); 187 if (ret) { 188 dev_err(enic, "error in allocating wq\n"); 189 return ret; 190 } 191 192 return enicpmd_dev_setup_intr(enic); 193 } 194 195 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev, 196 uint16_t queue_idx) 197 { 198 struct enic *enic = pmd_priv(eth_dev); 199 200 ENICPMD_FUNC_TRACE(); 201 202 enic_start_wq(enic, queue_idx); 203 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; 204 205 return 0; 206 } 207 208 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, 209 uint16_t queue_idx) 210 { 211 int ret; 212 struct enic *enic = pmd_priv(eth_dev); 213 214 ENICPMD_FUNC_TRACE(); 215 216 ret = enic_stop_wq(enic, queue_idx); 217 if (ret) 218 dev_err(enic, "error in stopping wq %d\n", queue_idx); 219 else 220 eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; 221 222 return ret; 223 } 224 225 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev, 226 uint16_t queue_idx) 227 { 228 struct enic *enic = pmd_priv(eth_dev); 229 230 ENICPMD_FUNC_TRACE(); 231 232 enic_start_rq(enic, queue_idx); 233 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; 234 235 return 0; 236 } 237 238 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, 239 uint16_t queue_idx) 240 { 241 int ret; 242 struct enic *enic = pmd_priv(eth_dev); 243 244 ENICPMD_FUNC_TRACE(); 245 246 ret = enic_stop_rq(enic, queue_idx); 247 if (ret) 248 dev_err(enic, "error in stopping rq %d\n", queue_idx); 249 else 250 eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; 251 252 return ret; 253 } 254 255 static void enicpmd_dev_rx_queue_release(void *rxq) 256 { 257 ENICPMD_FUNC_TRACE(); 258 enic_free_rq(rxq); 259 } 260 261 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 262 uint16_t queue_idx, 263 uint16_t nb_desc, 264 unsigned int socket_id, 265 const struct rte_eth_rxconf *rx_conf, 266 struct rte_mempool *mp) 267 { 268 int ret; 269 struct enic *enic = pmd_priv(eth_dev); 270 271 ENICPMD_FUNC_TRACE(); 272 if (queue_idx >= ENIC_RQ_MAX) { 273 dev_err(enic, 274 "Max number of RX queues exceeded. Max is %d\n", 275 ENIC_RQ_MAX); 276 return -EINVAL; 277 } 278 279 eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx]; 280 281 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc); 282 if (ret) { 283 dev_err(enic, "error in allocating rq\n"); 284 return ret; 285 } 286 287 enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh; 288 dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx, 289 enic->rq[queue_idx].rx_free_thresh); 290 291 return enicpmd_dev_setup_intr(enic); 292 } 293 294 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev, 295 uint16_t vlan_id, int on) 296 { 297 struct enic *enic = pmd_priv(eth_dev); 298 int err; 299 300 ENICPMD_FUNC_TRACE(); 301 if (on) 302 err = enic_add_vlan(enic, vlan_id); 303 else 304 err = enic_del_vlan(enic, vlan_id); 305 return err; 306 } 307 308 static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 309 { 310 struct enic *enic = pmd_priv(eth_dev); 311 312 ENICPMD_FUNC_TRACE(); 313 314 if (mask & ETH_VLAN_STRIP_MASK) { 315 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) 316 enic->ig_vlan_strip_en = 1; 317 else 318 enic->ig_vlan_strip_en = 0; 319 } 320 enic_set_rss_nic_cfg(enic); 321 322 323 if (mask & ETH_VLAN_FILTER_MASK) { 324 dev_warning(enic, 325 "Configuration of VLAN filter is not supported\n"); 326 } 327 328 if (mask & ETH_VLAN_EXTEND_MASK) { 329 dev_warning(enic, 330 "Configuration of extended VLAN is not supported\n"); 331 } 332 } 333 334 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) 335 { 336 int ret; 337 struct enic *enic = pmd_priv(eth_dev); 338 339 ENICPMD_FUNC_TRACE(); 340 ret = enic_set_vnic_res(enic); 341 if (ret) { 342 dev_err(enic, "Set vNIC resource num failed, aborting\n"); 343 return ret; 344 } 345 346 if (eth_dev->data->dev_conf.rxmode.split_hdr_size && 347 eth_dev->data->dev_conf.rxmode.header_split) { 348 /* Enable header-data-split */ 349 enic_set_hdr_split_size(enic, 350 eth_dev->data->dev_conf.rxmode.split_hdr_size); 351 } 352 353 enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum; 354 return 0; 355 } 356 357 /* Start the device. 358 * It returns 0 on success. 359 */ 360 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev) 361 { 362 struct enic *enic = pmd_priv(eth_dev); 363 364 ENICPMD_FUNC_TRACE(); 365 return enic_enable(enic); 366 } 367 368 /* 369 * Stop device: disable rx and tx functions to allow for reconfiguring. 370 */ 371 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev) 372 { 373 struct rte_eth_link link; 374 struct enic *enic = pmd_priv(eth_dev); 375 376 ENICPMD_FUNC_TRACE(); 377 enic_disable(enic); 378 memset(&link, 0, sizeof(link)); 379 rte_atomic64_cmpset((uint64_t *)ð_dev->data->dev_link, 380 *(uint64_t *)ð_dev->data->dev_link, 381 *(uint64_t *)&link); 382 } 383 384 /* 385 * Stop device. 386 */ 387 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev) 388 { 389 struct enic *enic = pmd_priv(eth_dev); 390 391 ENICPMD_FUNC_TRACE(); 392 enic_remove(enic); 393 } 394 395 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev, 396 __rte_unused int wait_to_complete) 397 { 398 struct enic *enic = pmd_priv(eth_dev); 399 int ret; 400 int link_status = 0; 401 402 ENICPMD_FUNC_TRACE(); 403 link_status = enic_get_link_status(enic); 404 ret = (link_status == enic->link_status); 405 enic->link_status = link_status; 406 eth_dev->data->dev_link.link_status = link_status; 407 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; 408 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); 409 return ret; 410 } 411 412 static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev, 413 struct rte_eth_stats *stats) 414 { 415 struct enic *enic = pmd_priv(eth_dev); 416 417 ENICPMD_FUNC_TRACE(); 418 enic_dev_stats_get(enic, stats); 419 } 420 421 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev) 422 { 423 struct enic *enic = pmd_priv(eth_dev); 424 425 ENICPMD_FUNC_TRACE(); 426 enic_dev_stats_clear(enic); 427 } 428 429 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, 430 struct rte_eth_dev_info *device_info) 431 { 432 struct enic *enic = pmd_priv(eth_dev); 433 434 ENICPMD_FUNC_TRACE(); 435 device_info->max_rx_queues = enic->rq_count; 436 device_info->max_tx_queues = enic->wq_count; 437 device_info->min_rx_bufsize = ENIC_MIN_MTU; 438 device_info->max_rx_pktlen = enic->config.mtu; 439 device_info->max_mac_addrs = 1; 440 device_info->rx_offload_capa = 441 DEV_RX_OFFLOAD_VLAN_STRIP | 442 DEV_RX_OFFLOAD_IPV4_CKSUM | 443 DEV_RX_OFFLOAD_UDP_CKSUM | 444 DEV_RX_OFFLOAD_TCP_CKSUM; 445 device_info->tx_offload_capa = 446 DEV_TX_OFFLOAD_VLAN_INSERT | 447 DEV_TX_OFFLOAD_IPV4_CKSUM | 448 DEV_TX_OFFLOAD_UDP_CKSUM | 449 DEV_TX_OFFLOAD_TCP_CKSUM; 450 device_info->default_rxconf = (struct rte_eth_rxconf) { 451 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH 452 }; 453 } 454 455 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) 456 { 457 static const uint32_t ptypes[] = { 458 RTE_PTYPE_L3_IPV4, 459 RTE_PTYPE_L3_IPV6, 460 RTE_PTYPE_UNKNOWN 461 }; 462 463 if (dev->rx_pkt_burst == enic_recv_pkts) 464 return ptypes; 465 return NULL; 466 } 467 468 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 469 { 470 struct enic *enic = pmd_priv(eth_dev); 471 472 ENICPMD_FUNC_TRACE(); 473 enic->promisc = 1; 474 enic_add_packet_filter(enic); 475 } 476 477 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 478 { 479 struct enic *enic = pmd_priv(eth_dev); 480 481 ENICPMD_FUNC_TRACE(); 482 enic->promisc = 0; 483 enic_add_packet_filter(enic); 484 } 485 486 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 487 { 488 struct enic *enic = pmd_priv(eth_dev); 489 490 ENICPMD_FUNC_TRACE(); 491 enic->allmulti = 1; 492 enic_add_packet_filter(enic); 493 } 494 495 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 496 { 497 struct enic *enic = pmd_priv(eth_dev); 498 499 ENICPMD_FUNC_TRACE(); 500 enic->allmulti = 0; 501 enic_add_packet_filter(enic); 502 } 503 504 static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, 505 struct ether_addr *mac_addr, 506 __rte_unused uint32_t index, __rte_unused uint32_t pool) 507 { 508 struct enic *enic = pmd_priv(eth_dev); 509 510 ENICPMD_FUNC_TRACE(); 511 enic_set_mac_address(enic, mac_addr->addr_bytes); 512 } 513 514 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index) 515 { 516 struct enic *enic = pmd_priv(eth_dev); 517 518 ENICPMD_FUNC_TRACE(); 519 enic_del_mac_address(enic); 520 } 521 522 523 static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 524 uint16_t nb_pkts) 525 { 526 uint16_t index; 527 unsigned int frags; 528 unsigned int pkt_len; 529 unsigned int seg_len; 530 unsigned int inc_len; 531 unsigned int nb_segs; 532 struct rte_mbuf *tx_pkt, *next_tx_pkt; 533 struct vnic_wq *wq = (struct vnic_wq *)tx_queue; 534 struct enic *enic = vnic_dev_priv(wq->vdev); 535 unsigned short vlan_id; 536 unsigned short ol_flags; 537 uint8_t last_seg, eop; 538 unsigned int host_tx_descs = 0; 539 540 for (index = 0; index < nb_pkts; index++) { 541 tx_pkt = *tx_pkts++; 542 inc_len = 0; 543 nb_segs = tx_pkt->nb_segs; 544 if (nb_segs > vnic_wq_desc_avail(wq)) { 545 if (index > 0) 546 enic_post_wq_index(wq); 547 548 /* wq cleanup and try again */ 549 if (!enic_cleanup_wq(enic, wq) || 550 (nb_segs > vnic_wq_desc_avail(wq))) { 551 return index; 552 } 553 } 554 555 pkt_len = tx_pkt->pkt_len; 556 vlan_id = tx_pkt->vlan_tci; 557 ol_flags = tx_pkt->ol_flags; 558 for (frags = 0; inc_len < pkt_len; frags++) { 559 if (!tx_pkt) 560 break; 561 next_tx_pkt = tx_pkt->next; 562 seg_len = tx_pkt->data_len; 563 inc_len += seg_len; 564 565 host_tx_descs++; 566 last_seg = 0; 567 eop = 0; 568 if ((pkt_len == inc_len) || !next_tx_pkt) { 569 eop = 1; 570 /* post if last packet in batch or > thresh */ 571 if ((index == (nb_pkts - 1)) || 572 (host_tx_descs > ENIC_TX_POST_THRESH)) { 573 last_seg = 1; 574 host_tx_descs = 0; 575 } 576 } 577 enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len, 578 !frags, eop, last_seg, ol_flags, vlan_id); 579 tx_pkt = next_tx_pkt; 580 } 581 } 582 583 enic_cleanup_wq(enic, wq); 584 return index; 585 } 586 587 static const struct eth_dev_ops enicpmd_eth_dev_ops = { 588 .dev_configure = enicpmd_dev_configure, 589 .dev_start = enicpmd_dev_start, 590 .dev_stop = enicpmd_dev_stop, 591 .dev_set_link_up = NULL, 592 .dev_set_link_down = NULL, 593 .dev_close = enicpmd_dev_close, 594 .promiscuous_enable = enicpmd_dev_promiscuous_enable, 595 .promiscuous_disable = enicpmd_dev_promiscuous_disable, 596 .allmulticast_enable = enicpmd_dev_allmulticast_enable, 597 .allmulticast_disable = enicpmd_dev_allmulticast_disable, 598 .link_update = enicpmd_dev_link_update, 599 .stats_get = enicpmd_dev_stats_get, 600 .stats_reset = enicpmd_dev_stats_reset, 601 .queue_stats_mapping_set = NULL, 602 .dev_infos_get = enicpmd_dev_info_get, 603 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, 604 .mtu_set = NULL, 605 .vlan_filter_set = enicpmd_vlan_filter_set, 606 .vlan_tpid_set = NULL, 607 .vlan_offload_set = enicpmd_vlan_offload_set, 608 .vlan_strip_queue_set = NULL, 609 .rx_queue_start = enicpmd_dev_rx_queue_start, 610 .rx_queue_stop = enicpmd_dev_rx_queue_stop, 611 .tx_queue_start = enicpmd_dev_tx_queue_start, 612 .tx_queue_stop = enicpmd_dev_tx_queue_stop, 613 .rx_queue_setup = enicpmd_dev_rx_queue_setup, 614 .rx_queue_release = enicpmd_dev_rx_queue_release, 615 .rx_queue_count = NULL, 616 .rx_descriptor_done = NULL, 617 .tx_queue_setup = enicpmd_dev_tx_queue_setup, 618 .tx_queue_release = enicpmd_dev_tx_queue_release, 619 .dev_led_on = NULL, 620 .dev_led_off = NULL, 621 .flow_ctrl_get = NULL, 622 .flow_ctrl_set = NULL, 623 .priority_flow_ctrl_set = NULL, 624 .mac_addr_add = enicpmd_add_mac_addr, 625 .mac_addr_remove = enicpmd_remove_mac_addr, 626 .filter_ctrl = enicpmd_dev_filter_ctrl, 627 }; 628 629 struct enic *enicpmd_list_head = NULL; 630 /* Initialize the driver 631 * It returns 0 on success. 632 */ 633 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) 634 { 635 struct rte_pci_device *pdev; 636 struct rte_pci_addr *addr; 637 struct enic *enic = pmd_priv(eth_dev); 638 639 ENICPMD_FUNC_TRACE(); 640 641 enic->port_id = eth_dev->data->port_id; 642 enic->rte_dev = eth_dev; 643 eth_dev->dev_ops = &enicpmd_eth_dev_ops; 644 eth_dev->rx_pkt_burst = &enic_recv_pkts; 645 eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts; 646 647 pdev = eth_dev->pci_dev; 648 rte_eth_copy_pci_info(eth_dev, pdev); 649 enic->pdev = pdev; 650 addr = &pdev->addr; 651 652 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", 653 addr->domain, addr->bus, addr->devid, addr->function); 654 655 return enic_probe(enic); 656 } 657 658 static struct eth_driver rte_enic_pmd = { 659 .pci_drv = { 660 .name = "rte_enic_pmd", 661 .id_table = pci_id_enic_map, 662 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 663 }, 664 .eth_dev_init = eth_enicpmd_dev_init, 665 .dev_private_size = sizeof(struct enic), 666 }; 667 668 /* Driver initialization routine. 669 * Invoked once at EAL init time. 670 * Register as the [Poll Mode] Driver of Cisco ENIC device. 671 */ 672 static int 673 rte_enic_pmd_init(__rte_unused const char *name, 674 __rte_unused const char *params) 675 { 676 ENICPMD_FUNC_TRACE(); 677 678 rte_eth_driver_register(&rte_enic_pmd); 679 return 0; 680 } 681 682 static struct rte_driver rte_enic_driver = { 683 .type = PMD_PDEV, 684 .init = rte_enic_pmd_init, 685 }; 686 687 PMD_REGISTER_DRIVER(rte_enic_driver); 688