1 /* 2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * Copyright (c) 2014, Cisco Systems, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <stdio.h> 36 #include <stdint.h> 37 38 #include <rte_dev.h> 39 #include <rte_pci.h> 40 #include <rte_ethdev.h> 41 #include <rte_string_fns.h> 42 43 #include "vnic_intr.h" 44 #include "vnic_cq.h" 45 #include "vnic_wq.h" 46 #include "vnic_rq.h" 47 #include "vnic_enet.h" 48 #include "enic.h" 49 50 #ifdef RTE_LIBRTE_ENIC_DEBUG 51 #define ENICPMD_FUNC_TRACE() \ 52 RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__) 53 #else 54 #define ENICPMD_FUNC_TRACE() (void)0 55 #endif 56 57 /* 58 * The set of PCI devices this driver supports 59 */ 60 static const struct rte_pci_id pci_id_enic_map[] = { 61 #define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, 62 #ifndef PCI_VENDOR_ID_CISCO 63 #define PCI_VENDOR_ID_CISCO 0x1137 64 #endif 65 #include "rte_pci_dev_ids.h" 66 RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) 67 RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) 68 {.vendor_id = 0, /* Sentinal */}, 69 }; 70 71 static int 72 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev, 73 enum rte_filter_op filter_op, void *arg) 74 { 75 struct enic *enic = pmd_priv(eth_dev); 76 int ret = 0; 77 78 ENICPMD_FUNC_TRACE(); 79 if (filter_op == RTE_ETH_FILTER_NOP) 80 return 0; 81 82 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) 83 return -EINVAL; 84 85 switch (filter_op) { 86 case RTE_ETH_FILTER_ADD: 87 case RTE_ETH_FILTER_UPDATE: 88 ret = enic_fdir_add_fltr(enic, 89 (struct rte_eth_fdir_filter *)arg); 90 break; 91 92 case RTE_ETH_FILTER_DELETE: 93 ret = enic_fdir_del_fltr(enic, 94 (struct rte_eth_fdir_filter *)arg); 95 break; 96 97 case RTE_ETH_FILTER_STATS: 98 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg); 99 break; 100 101 case RTE_ETH_FILTER_FLUSH: 102 case RTE_ETH_FILTER_INFO: 103 dev_warning(enic, "unsupported operation %u", filter_op); 104 ret = -ENOTSUP; 105 break; 106 default: 107 dev_err(enic, "unknown operation %u", filter_op); 108 ret = -EINVAL; 109 break; 110 } 111 return ret; 112 } 113 114 static int 115 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev, 116 enum rte_filter_type filter_type, 117 enum rte_filter_op filter_op, 118 void *arg) 119 { 120 int ret = -EINVAL; 121 122 if (RTE_ETH_FILTER_FDIR == filter_type) 123 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg); 124 else 125 dev_warning(enic, "Filter type (%d) not supported", 126 filter_type); 127 128 return ret; 129 } 130 131 static void enicpmd_dev_tx_queue_release(void *txq) 132 { 133 ENICPMD_FUNC_TRACE(); 134 enic_free_wq(txq); 135 } 136 137 static int enicpmd_dev_setup_intr(struct enic *enic) 138 { 139 int ret; 140 unsigned int index; 141 142 ENICPMD_FUNC_TRACE(); 143 144 /* Are we done with the init of all the queues? */ 145 for (index = 0; index < enic->cq_count; index++) { 146 if (!enic->cq[index].ctrl) 147 break; 148 } 149 150 if (enic->cq_count != index) 151 return 0; 152 153 ret = enic_alloc_intr_resources(enic); 154 if (ret) { 155 dev_err(enic, "alloc intr failed\n"); 156 return ret; 157 } 158 enic_init_vnic_resources(enic); 159 160 ret = enic_setup_finish(enic); 161 if (ret) 162 dev_err(enic, "setup could not be finished\n"); 163 164 return ret; 165 } 166 167 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, 168 uint16_t queue_idx, 169 uint16_t nb_desc, 170 unsigned int socket_id, 171 __rte_unused const struct rte_eth_txconf *tx_conf) 172 { 173 int ret; 174 struct enic *enic = pmd_priv(eth_dev); 175 176 ENICPMD_FUNC_TRACE(); 177 eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx]; 178 179 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); 180 if (ret) { 181 dev_err(enic, "error in allocating wq\n"); 182 return ret; 183 } 184 185 return enicpmd_dev_setup_intr(enic); 186 } 187 188 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev, 189 uint16_t queue_idx) 190 { 191 struct enic *enic = pmd_priv(eth_dev); 192 193 ENICPMD_FUNC_TRACE(); 194 195 enic_start_wq(enic, queue_idx); 196 197 return 0; 198 } 199 200 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, 201 uint16_t queue_idx) 202 { 203 int ret; 204 struct enic *enic = pmd_priv(eth_dev); 205 206 ENICPMD_FUNC_TRACE(); 207 208 ret = enic_stop_wq(enic, queue_idx); 209 if (ret) 210 dev_err(enic, "error in stopping wq %d\n", queue_idx); 211 212 return ret; 213 } 214 215 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev, 216 uint16_t queue_idx) 217 { 218 struct enic *enic = pmd_priv(eth_dev); 219 220 ENICPMD_FUNC_TRACE(); 221 222 enic_start_rq(enic, queue_idx); 223 224 return 0; 225 } 226 227 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, 228 uint16_t queue_idx) 229 { 230 int ret; 231 struct enic *enic = pmd_priv(eth_dev); 232 233 ENICPMD_FUNC_TRACE(); 234 235 ret = enic_stop_rq(enic, queue_idx); 236 if (ret) 237 dev_err(enic, "error in stopping rq %d\n", queue_idx); 238 239 return ret; 240 } 241 242 static void enicpmd_dev_rx_queue_release(void *rxq) 243 { 244 ENICPMD_FUNC_TRACE(); 245 enic_free_rq(rxq); 246 } 247 248 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 249 uint16_t queue_idx, 250 uint16_t nb_desc, 251 unsigned int socket_id, 252 __rte_unused const struct rte_eth_rxconf *rx_conf, 253 struct rte_mempool *mp) 254 { 255 int ret; 256 struct enic *enic = pmd_priv(eth_dev); 257 258 ENICPMD_FUNC_TRACE(); 259 eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx]; 260 261 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc); 262 if (ret) { 263 dev_err(enic, "error in allocating rq\n"); 264 return ret; 265 } 266 267 return enicpmd_dev_setup_intr(enic); 268 } 269 270 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev, 271 uint16_t vlan_id, int on) 272 { 273 struct enic *enic = pmd_priv(eth_dev); 274 275 ENICPMD_FUNC_TRACE(); 276 if (on) 277 enic_add_vlan(enic, vlan_id); 278 else 279 enic_del_vlan(enic, vlan_id); 280 return 0; 281 } 282 283 static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) 284 { 285 struct enic *enic = pmd_priv(eth_dev); 286 287 ENICPMD_FUNC_TRACE(); 288 289 if (mask & ETH_VLAN_STRIP_MASK) { 290 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) 291 enic->ig_vlan_strip_en = 1; 292 else 293 enic->ig_vlan_strip_en = 0; 294 } 295 enic_set_rss_nic_cfg(enic); 296 297 298 if (mask & ETH_VLAN_FILTER_MASK) { 299 dev_warning(enic, 300 "Configuration of VLAN filter is not supported\n"); 301 } 302 303 if (mask & ETH_VLAN_EXTEND_MASK) { 304 dev_warning(enic, 305 "Configuration of extended VLAN is not supported\n"); 306 } 307 } 308 309 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev) 310 { 311 int ret; 312 struct enic *enic = pmd_priv(eth_dev); 313 314 ENICPMD_FUNC_TRACE(); 315 ret = enic_set_vnic_res(enic); 316 if (ret) { 317 dev_err(enic, "Set vNIC resource num failed, aborting\n"); 318 return ret; 319 } 320 321 if (eth_dev->data->dev_conf.rxmode.split_hdr_size && 322 eth_dev->data->dev_conf.rxmode.header_split) { 323 /* Enable header-data-split */ 324 enic_set_hdr_split_size(enic, 325 eth_dev->data->dev_conf.rxmode.split_hdr_size); 326 } 327 328 enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum; 329 return 0; 330 } 331 332 /* Start the device. 333 * It returns 0 on success. 334 */ 335 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev) 336 { 337 struct enic *enic = pmd_priv(eth_dev); 338 339 ENICPMD_FUNC_TRACE(); 340 return enic_enable(enic); 341 } 342 343 /* 344 * Stop device: disable rx and tx functions to allow for reconfiguring. 345 */ 346 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev) 347 { 348 struct rte_eth_link link; 349 struct enic *enic = pmd_priv(eth_dev); 350 351 ENICPMD_FUNC_TRACE(); 352 enic_disable(enic); 353 memset(&link, 0, sizeof(link)); 354 rte_atomic64_cmpset((uint64_t *)ð_dev->data->dev_link, 355 *(uint64_t *)ð_dev->data->dev_link, 356 *(uint64_t *)&link); 357 } 358 359 /* 360 * Stop device. 361 */ 362 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev) 363 { 364 struct enic *enic = pmd_priv(eth_dev); 365 366 ENICPMD_FUNC_TRACE(); 367 enic_remove(enic); 368 } 369 370 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev, 371 __rte_unused int wait_to_complete) 372 { 373 struct enic *enic = pmd_priv(eth_dev); 374 int ret; 375 int link_status = 0; 376 377 ENICPMD_FUNC_TRACE(); 378 link_status = enic_get_link_status(enic); 379 ret = (link_status == enic->link_status); 380 enic->link_status = link_status; 381 eth_dev->data->dev_link.link_status = link_status; 382 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; 383 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev); 384 return ret; 385 } 386 387 static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev, 388 struct rte_eth_stats *stats) 389 { 390 struct enic *enic = pmd_priv(eth_dev); 391 392 ENICPMD_FUNC_TRACE(); 393 enic_dev_stats_get(enic, stats); 394 } 395 396 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev) 397 { 398 struct enic *enic = pmd_priv(eth_dev); 399 400 ENICPMD_FUNC_TRACE(); 401 enic_dev_stats_clear(enic); 402 } 403 404 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, 405 struct rte_eth_dev_info *device_info) 406 { 407 struct enic *enic = pmd_priv(eth_dev); 408 409 ENICPMD_FUNC_TRACE(); 410 device_info->max_rx_queues = enic->rq_count; 411 device_info->max_tx_queues = enic->wq_count; 412 device_info->min_rx_bufsize = ENIC_MIN_MTU; 413 device_info->max_rx_pktlen = enic->config.mtu; 414 device_info->max_mac_addrs = 1; 415 device_info->rx_offload_capa = 416 DEV_RX_OFFLOAD_VLAN_STRIP | 417 DEV_RX_OFFLOAD_IPV4_CKSUM | 418 DEV_RX_OFFLOAD_UDP_CKSUM | 419 DEV_RX_OFFLOAD_TCP_CKSUM; 420 device_info->tx_offload_capa = 421 DEV_TX_OFFLOAD_VLAN_INSERT | 422 DEV_TX_OFFLOAD_IPV4_CKSUM | 423 DEV_TX_OFFLOAD_UDP_CKSUM | 424 DEV_TX_OFFLOAD_TCP_CKSUM; 425 } 426 427 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) 428 { 429 struct enic *enic = pmd_priv(eth_dev); 430 431 ENICPMD_FUNC_TRACE(); 432 enic->promisc = 1; 433 enic_add_packet_filter(enic); 434 } 435 436 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 437 { 438 struct enic *enic = pmd_priv(eth_dev); 439 440 ENICPMD_FUNC_TRACE(); 441 enic->promisc = 0; 442 enic_add_packet_filter(enic); 443 } 444 445 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) 446 { 447 struct enic *enic = pmd_priv(eth_dev); 448 449 ENICPMD_FUNC_TRACE(); 450 enic->allmulti = 1; 451 enic_add_packet_filter(enic); 452 } 453 454 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) 455 { 456 struct enic *enic = pmd_priv(eth_dev); 457 458 ENICPMD_FUNC_TRACE(); 459 enic->allmulti = 0; 460 enic_add_packet_filter(enic); 461 } 462 463 static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev, 464 struct ether_addr *mac_addr, 465 __rte_unused uint32_t index, __rte_unused uint32_t pool) 466 { 467 struct enic *enic = pmd_priv(eth_dev); 468 469 ENICPMD_FUNC_TRACE(); 470 enic_set_mac_address(enic, mac_addr->addr_bytes); 471 } 472 473 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index) 474 { 475 struct enic *enic = pmd_priv(eth_dev); 476 477 ENICPMD_FUNC_TRACE(); 478 enic_del_mac_address(enic); 479 } 480 481 482 static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 483 uint16_t nb_pkts) 484 { 485 unsigned int index; 486 unsigned int frags; 487 unsigned int pkt_len; 488 unsigned int seg_len; 489 unsigned int inc_len; 490 unsigned int nb_segs; 491 struct rte_mbuf *tx_pkt, *next_tx_pkt; 492 struct vnic_wq *wq = (struct vnic_wq *)tx_queue; 493 struct enic *enic = vnic_dev_priv(wq->vdev); 494 unsigned short vlan_id; 495 unsigned short ol_flags; 496 uint8_t last_seg, eop; 497 498 for (index = 0; index < nb_pkts; index++) { 499 tx_pkt = *tx_pkts++; 500 inc_len = 0; 501 nb_segs = tx_pkt->nb_segs; 502 if (nb_segs > vnic_wq_desc_avail(wq)) { 503 if (index > 0) 504 enic_post_wq_index(wq); 505 506 /* wq cleanup and try again */ 507 if (!enic_cleanup_wq(enic, wq) || 508 (nb_segs > vnic_wq_desc_avail(wq))) { 509 return index; 510 } 511 } 512 pkt_len = tx_pkt->pkt_len; 513 vlan_id = tx_pkt->vlan_tci; 514 ol_flags = tx_pkt->ol_flags; 515 for (frags = 0; inc_len < pkt_len; frags++) { 516 if (!tx_pkt) 517 break; 518 next_tx_pkt = tx_pkt->next; 519 seg_len = tx_pkt->data_len; 520 inc_len += seg_len; 521 eop = (pkt_len == inc_len) || (!next_tx_pkt); 522 last_seg = eop && 523 (index == ((unsigned int)nb_pkts - 1)); 524 enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len, 525 !frags, eop, last_seg, ol_flags, vlan_id); 526 tx_pkt = next_tx_pkt; 527 } 528 } 529 530 enic_cleanup_wq(enic, wq); 531 return index; 532 } 533 534 static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 535 uint16_t nb_pkts) 536 { 537 struct vnic_rq *rq = (struct vnic_rq *)rx_queue; 538 unsigned int work_done; 539 540 if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done)) 541 dev_err(enic, "error in enicpmd poll\n"); 542 543 return work_done; 544 } 545 546 static const struct eth_dev_ops enicpmd_eth_dev_ops = { 547 .dev_configure = enicpmd_dev_configure, 548 .dev_start = enicpmd_dev_start, 549 .dev_stop = enicpmd_dev_stop, 550 .dev_set_link_up = NULL, 551 .dev_set_link_down = NULL, 552 .dev_close = enicpmd_dev_close, 553 .promiscuous_enable = enicpmd_dev_promiscuous_enable, 554 .promiscuous_disable = enicpmd_dev_promiscuous_disable, 555 .allmulticast_enable = enicpmd_dev_allmulticast_enable, 556 .allmulticast_disable = enicpmd_dev_allmulticast_disable, 557 .link_update = enicpmd_dev_link_update, 558 .stats_get = enicpmd_dev_stats_get, 559 .stats_reset = enicpmd_dev_stats_reset, 560 .queue_stats_mapping_set = NULL, 561 .dev_infos_get = enicpmd_dev_info_get, 562 .mtu_set = NULL, 563 .vlan_filter_set = enicpmd_vlan_filter_set, 564 .vlan_tpid_set = NULL, 565 .vlan_offload_set = enicpmd_vlan_offload_set, 566 .vlan_strip_queue_set = NULL, 567 .rx_queue_start = enicpmd_dev_rx_queue_start, 568 .rx_queue_stop = enicpmd_dev_rx_queue_stop, 569 .tx_queue_start = enicpmd_dev_tx_queue_start, 570 .tx_queue_stop = enicpmd_dev_tx_queue_stop, 571 .rx_queue_setup = enicpmd_dev_rx_queue_setup, 572 .rx_queue_release = enicpmd_dev_rx_queue_release, 573 .rx_queue_count = NULL, 574 .rx_descriptor_done = NULL, 575 .tx_queue_setup = enicpmd_dev_tx_queue_setup, 576 .tx_queue_release = enicpmd_dev_tx_queue_release, 577 .dev_led_on = NULL, 578 .dev_led_off = NULL, 579 .flow_ctrl_get = NULL, 580 .flow_ctrl_set = NULL, 581 .priority_flow_ctrl_set = NULL, 582 .mac_addr_add = enicpmd_add_mac_addr, 583 .mac_addr_remove = enicpmd_remove_mac_addr, 584 .filter_ctrl = enicpmd_dev_filter_ctrl, 585 }; 586 587 struct enic *enicpmd_list_head = NULL; 588 /* Initialize the driver 589 * It returns 0 on success. 590 */ 591 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) 592 { 593 struct rte_pci_device *pdev; 594 struct rte_pci_addr *addr; 595 struct enic *enic = pmd_priv(eth_dev); 596 597 ENICPMD_FUNC_TRACE(); 598 599 enic->port_id = eth_dev->data->port_id; 600 enic->rte_dev = eth_dev; 601 eth_dev->dev_ops = &enicpmd_eth_dev_ops; 602 eth_dev->rx_pkt_burst = &enicpmd_recv_pkts; 603 eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts; 604 605 pdev = eth_dev->pci_dev; 606 enic->pdev = pdev; 607 addr = &pdev->addr; 608 609 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x", 610 addr->domain, addr->bus, addr->devid, addr->function); 611 612 return enic_probe(enic); 613 } 614 615 static struct eth_driver rte_enic_pmd = { 616 .pci_drv = { 617 .name = "rte_enic_pmd", 618 .id_table = pci_id_enic_map, 619 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 620 }, 621 .eth_dev_init = eth_enicpmd_dev_init, 622 .dev_private_size = sizeof(struct enic), 623 }; 624 625 /* Driver initialization routine. 626 * Invoked once at EAL init time. 627 * Register as the [Poll Mode] Driver of Cisco ENIC device. 628 */ 629 static int 630 rte_enic_pmd_init(const char *name __rte_unused, 631 const char *params __rte_unused) 632 { 633 ENICPMD_FUNC_TRACE(); 634 635 rte_eth_driver_register(&rte_enic_pmd); 636 return 0; 637 } 638 639 static struct rte_driver rte_enic_driver = { 640 .type = PMD_PDEV, 641 .init = rte_enic_pmd_init, 642 }; 643 644 PMD_REGISTER_DRIVER(rte_enic_driver); 645