1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 #include <stdio.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <stdarg.h> 41 #include <fcntl.h> 42 #include <inttypes.h> 43 #include <rte_byteorder.h> 44 #include <rte_common.h> 45 #include <rte_cycles.h> 46 47 #include <rte_interrupts.h> 48 #include <rte_log.h> 49 #include <rte_debug.h> 50 #include <rte_pci.h> 51 #include <rte_atomic.h> 52 #include <rte_branch_prediction.h> 53 #include <rte_memory.h> 54 #include <rte_memzone.h> 55 #include <rte_eal.h> 56 #include <rte_alarm.h> 57 #include <rte_ether.h> 58 #include <rte_ethdev.h> 59 #include <rte_atomic.h> 60 #include <rte_string_fns.h> 61 #include <rte_malloc.h> 62 #include <rte_dev.h> 63 64 #include "base/vmxnet3_defs.h" 65 66 #include "vmxnet3_ring.h" 67 #include "vmxnet3_logs.h" 68 #include "vmxnet3_ethdev.h" 69 70 #define PROCESS_SYS_EVENTS 0 71 72 #define VMXNET3_TX_MAX_SEG UINT8_MAX 73 74 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev); 75 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev); 76 static int vmxnet3_dev_configure(struct rte_eth_dev *dev); 77 static int vmxnet3_dev_start(struct rte_eth_dev *dev); 78 static void vmxnet3_dev_stop(struct rte_eth_dev *dev); 79 static void vmxnet3_dev_close(struct rte_eth_dev *dev); 80 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set); 81 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev); 82 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev); 83 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev); 84 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev); 85 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev, 86 int wait_to_complete); 87 static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev, 88 struct rte_eth_stats *stats); 89 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev, 90 struct rte_eth_dev_info *dev_info); 91 static const uint32_t * 92 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev); 93 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, 94 uint16_t vid, int on); 95 static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); 96 static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev, 97 struct ether_addr *mac_addr); 98 99 #if PROCESS_SYS_EVENTS == 1 100 static void vmxnet3_process_events(struct vmxnet3_hw *); 101 #endif 102 /* 103 * The set of PCI devices this driver supports 104 */ 105 #define VMWARE_PCI_VENDOR_ID 0x15AD 106 #define VMWARE_DEV_ID_VMXNET3 0x07B0 107 static const struct rte_pci_id pci_id_vmxnet3_map[] = { 108 { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) }, 109 { .vendor_id = 0, /* sentinel */ }, 110 }; 111 112 static const struct eth_dev_ops vmxnet3_eth_dev_ops = { 113 .dev_configure = vmxnet3_dev_configure, 114 .dev_start = vmxnet3_dev_start, 115 .dev_stop = vmxnet3_dev_stop, 116 .dev_close = vmxnet3_dev_close, 117 .promiscuous_enable = vmxnet3_dev_promiscuous_enable, 118 .promiscuous_disable = vmxnet3_dev_promiscuous_disable, 119 .allmulticast_enable = vmxnet3_dev_allmulticast_enable, 120 .allmulticast_disable = vmxnet3_dev_allmulticast_disable, 121 .link_update = vmxnet3_dev_link_update, 122 .stats_get = vmxnet3_dev_stats_get, 123 .mac_addr_set = vmxnet3_mac_addr_set, 124 .dev_infos_get = vmxnet3_dev_info_get, 125 .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get, 126 .vlan_filter_set = vmxnet3_dev_vlan_filter_set, 127 .vlan_offload_set = vmxnet3_dev_vlan_offload_set, 128 .rx_queue_setup = vmxnet3_dev_rx_queue_setup, 129 .rx_queue_release = vmxnet3_dev_rx_queue_release, 130 .tx_queue_setup = vmxnet3_dev_tx_queue_setup, 131 .tx_queue_release = vmxnet3_dev_tx_queue_release, 132 }; 133 134 static const struct rte_memzone * 135 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size, 136 const char *post_string, int socket_id, 137 uint16_t align, bool reuse) 138 { 139 char z_name[RTE_MEMZONE_NAMESIZE]; 140 const struct rte_memzone *mz; 141 142 snprintf(z_name, sizeof(z_name), "%s_%d_%s", 143 dev->data->drv_name, dev->data->port_id, post_string); 144 145 mz = rte_memzone_lookup(z_name); 146 if (!reuse) { 147 if (mz) 148 rte_memzone_free(mz); 149 return rte_memzone_reserve_aligned(z_name, size, socket_id, 150 0, align); 151 } 152 153 if (mz) 154 return mz; 155 156 return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align); 157 } 158 159 /** 160 * Atomically reads the link status information from global 161 * structure rte_eth_dev. 162 * 163 * @param dev 164 * - Pointer to the structure rte_eth_dev to read from. 165 * - Pointer to the buffer to be saved with the link status. 166 * 167 * @return 168 * - On success, zero. 169 * - On failure, negative value. 170 */ 171 172 static int 173 vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev, 174 struct rte_eth_link *link) 175 { 176 struct rte_eth_link *dst = link; 177 struct rte_eth_link *src = &(dev->data->dev_link); 178 179 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 180 *(uint64_t *)src) == 0) 181 return -1; 182 183 return 0; 184 } 185 186 /** 187 * Atomically writes the link status information into global 188 * structure rte_eth_dev. 189 * 190 * @param dev 191 * - Pointer to the structure rte_eth_dev to write to. 192 * - Pointer to the buffer to be saved with the link status. 193 * 194 * @return 195 * - On success, zero. 196 * - On failure, negative value. 197 */ 198 static int 199 vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev, 200 struct rte_eth_link *link) 201 { 202 struct rte_eth_link *dst = &(dev->data->dev_link); 203 struct rte_eth_link *src = link; 204 205 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 206 *(uint64_t *)src) == 0) 207 return -1; 208 209 return 0; 210 } 211 212 /* 213 * This function is based on vmxnet3_disable_intr() 214 */ 215 static void 216 vmxnet3_disable_intr(struct vmxnet3_hw *hw) 217 { 218 int i; 219 220 PMD_INIT_FUNC_TRACE(); 221 222 hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; 223 for (i = 0; i < VMXNET3_MAX_INTRS; i++) 224 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1); 225 } 226 227 /* 228 * Gets tx data ring descriptor size. 229 */ 230 static uint16_t 231 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw) 232 { 233 uint16 txdata_desc_size; 234 235 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 236 VMXNET3_CMD_GET_TXDATA_DESC_SIZE); 237 txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 238 239 return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE || 240 txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE || 241 txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ? 242 sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size; 243 } 244 245 /* 246 * It returns 0 on success. 247 */ 248 static int 249 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) 250 { 251 struct rte_pci_device *pci_dev; 252 struct vmxnet3_hw *hw = eth_dev->data->dev_private; 253 uint32_t mac_hi, mac_lo, ver; 254 255 PMD_INIT_FUNC_TRACE(); 256 257 eth_dev->dev_ops = &vmxnet3_eth_dev_ops; 258 eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts; 259 eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts; 260 eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts; 261 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 262 263 /* 264 * for secondary processes, we don't initialize any further as primary 265 * has already done this work. 266 */ 267 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 268 return 0; 269 270 rte_eth_copy_pci_info(eth_dev, pci_dev); 271 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; 272 273 /* Vendor and Device ID need to be set before init of shared code */ 274 hw->device_id = pci_dev->id.device_id; 275 hw->vendor_id = pci_dev->id.vendor_id; 276 hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr; 277 hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr; 278 279 hw->num_rx_queues = 1; 280 hw->num_tx_queues = 1; 281 hw->bufs_per_pkt = 1; 282 283 /* Check h/w version compatibility with driver. */ 284 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS); 285 PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver); 286 287 if (ver & (1 << VMXNET3_REV_3)) { 288 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 289 1 << VMXNET3_REV_3); 290 hw->version = VMXNET3_REV_3 + 1; 291 } else if (ver & (1 << VMXNET3_REV_2)) { 292 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 293 1 << VMXNET3_REV_2); 294 hw->version = VMXNET3_REV_2 + 1; 295 } else if (ver & (1 << VMXNET3_REV_1)) { 296 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 297 1 << VMXNET3_REV_1); 298 hw->version = VMXNET3_REV_1 + 1; 299 } else { 300 PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver); 301 return -EIO; 302 } 303 304 PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version); 305 306 /* Check UPT version compatibility with driver. */ 307 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS); 308 PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver); 309 if (ver & 0x1) 310 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1); 311 else { 312 PMD_INIT_LOG(ERR, "Incompatible UPT version."); 313 return -EIO; 314 } 315 316 /* Getting MAC Address */ 317 mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL); 318 mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH); 319 memcpy(hw->perm_addr, &mac_lo, 4); 320 memcpy(hw->perm_addr + 4, &mac_hi, 2); 321 322 /* Allocate memory for storing MAC addresses */ 323 eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN * 324 VMXNET3_MAX_MAC_ADDRS, 0); 325 if (eth_dev->data->mac_addrs == NULL) { 326 PMD_INIT_LOG(ERR, 327 "Failed to allocate %d bytes needed to store MAC addresses", 328 ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS); 329 return -ENOMEM; 330 } 331 /* Copy the permanent MAC address */ 332 ether_addr_copy((struct ether_addr *) hw->perm_addr, 333 ð_dev->data->mac_addrs[0]); 334 335 PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", 336 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2], 337 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]); 338 339 /* Put device in Quiesce Mode */ 340 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 341 342 /* allow untagged pkts */ 343 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0); 344 345 hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ? 346 eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc); 347 348 hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ? 349 VMXNET3_DEF_RXDATA_DESC_SIZE : 0; 350 RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) == 351 hw->rxdata_desc_size); 352 353 return 0; 354 } 355 356 static int 357 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev) 358 { 359 struct vmxnet3_hw *hw = eth_dev->data->dev_private; 360 361 PMD_INIT_FUNC_TRACE(); 362 363 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 364 return 0; 365 366 if (hw->adapter_stopped == 0) 367 vmxnet3_dev_close(eth_dev); 368 369 eth_dev->dev_ops = NULL; 370 eth_dev->rx_pkt_burst = NULL; 371 eth_dev->tx_pkt_burst = NULL; 372 eth_dev->tx_pkt_prepare = NULL; 373 374 rte_free(eth_dev->data->mac_addrs); 375 eth_dev->data->mac_addrs = NULL; 376 377 return 0; 378 } 379 380 static struct eth_driver rte_vmxnet3_pmd = { 381 .pci_drv = { 382 .id_table = pci_id_vmxnet3_map, 383 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 384 .probe = rte_eth_dev_pci_probe, 385 .remove = rte_eth_dev_pci_remove, 386 }, 387 .eth_dev_init = eth_vmxnet3_dev_init, 388 .eth_dev_uninit = eth_vmxnet3_dev_uninit, 389 .dev_private_size = sizeof(struct vmxnet3_hw), 390 }; 391 392 static int 393 vmxnet3_dev_configure(struct rte_eth_dev *dev) 394 { 395 const struct rte_memzone *mz; 396 struct vmxnet3_hw *hw = dev->data->dev_private; 397 size_t size; 398 399 PMD_INIT_FUNC_TRACE(); 400 401 if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES || 402 dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) { 403 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported"); 404 return -EINVAL; 405 } 406 407 if (!rte_is_power_of_2(dev->data->nb_rx_queues)) { 408 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2"); 409 return -EINVAL; 410 } 411 412 size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + 413 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc); 414 415 if (size > UINT16_MAX) 416 return -EINVAL; 417 418 hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues; 419 hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues; 420 421 /* 422 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead 423 * on current socket 424 */ 425 mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared), 426 "shared", rte_socket_id(), 8, 1); 427 428 if (mz == NULL) { 429 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone"); 430 return -ENOMEM; 431 } 432 memset(mz->addr, 0, mz->len); 433 434 hw->shared = mz->addr; 435 hw->sharedPA = mz->phys_addr; 436 437 /* 438 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc 439 * on current socket. 440 * 441 * We cannot reuse this memzone from previous allocation as its size 442 * depends on the number of tx and rx queues, which could be different 443 * from one config to another. 444 */ 445 mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(), 446 VMXNET3_QUEUE_DESC_ALIGN, 0); 447 if (mz == NULL) { 448 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone"); 449 return -ENOMEM; 450 } 451 memset(mz->addr, 0, mz->len); 452 453 hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr; 454 hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues); 455 456 hw->queueDescPA = mz->phys_addr; 457 hw->queue_desc_len = (uint16_t)size; 458 459 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 460 /* Allocate memory structure for UPT1_RSSConf and configure */ 461 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), 462 "rss_conf", rte_socket_id(), 463 RTE_CACHE_LINE_SIZE, 1); 464 if (mz == NULL) { 465 PMD_INIT_LOG(ERR, 466 "ERROR: Creating rss_conf structure zone"); 467 return -ENOMEM; 468 } 469 memset(mz->addr, 0, mz->len); 470 471 hw->rss_conf = mz->addr; 472 hw->rss_confPA = mz->phys_addr; 473 } 474 475 return 0; 476 } 477 478 static void 479 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr) 480 { 481 uint32_t val; 482 483 PMD_INIT_LOG(DEBUG, 484 "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", 485 addr[0], addr[1], addr[2], 486 addr[3], addr[4], addr[5]); 487 488 val = *(const uint32_t *)addr; 489 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val); 490 491 val = (addr[5] << 8) | addr[4]; 492 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val); 493 } 494 495 static int 496 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev) 497 { 498 struct vmxnet3_hw *hw = dev->data->dev_private; 499 Vmxnet3_DriverShared *shared = hw->shared; 500 Vmxnet3_CmdInfo *cmdInfo; 501 struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES]; 502 uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES]; 503 uint32_t num, i, j, size; 504 505 if (hw->memRegsPA == 0) { 506 const struct rte_memzone *mz; 507 508 size = sizeof(Vmxnet3_MemRegs) + 509 (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) * 510 sizeof(Vmxnet3_MemoryRegion); 511 512 mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8, 513 1); 514 if (mz == NULL) { 515 PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone"); 516 return -ENOMEM; 517 } 518 memset(mz->addr, 0, mz->len); 519 hw->memRegs = mz->addr; 520 hw->memRegsPA = mz->phys_addr; 521 } 522 523 num = hw->num_rx_queues; 524 525 for (i = 0; i < num; i++) { 526 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; 527 528 mp[i] = rxq->mp; 529 index[i] = 1 << i; 530 } 531 532 /* 533 * The same mempool could be used by multiple queues. In such a case, 534 * remove duplicate mempool entries. Only one entry is kept with 535 * bitmask indicating queues that are using this mempool. 536 */ 537 for (i = 1; i < num; i++) { 538 for (j = 0; j < i; j++) { 539 if (mp[i] == mp[j]) { 540 mp[i] = NULL; 541 index[j] |= 1 << i; 542 break; 543 } 544 } 545 } 546 547 j = 0; 548 for (i = 0; i < num; i++) { 549 if (mp[i] == NULL) 550 continue; 551 552 Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j]; 553 554 mr->startPA = 555 (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->phys_addr; 556 mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ? 557 STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX; 558 mr->txQueueBits = index[i]; 559 mr->rxQueueBits = index[i]; 560 561 PMD_INIT_LOG(INFO, 562 "index: %u startPA: %" PRIu64 " length: %u, " 563 "rxBits: %x", 564 j, mr->startPA, mr->length, mr->rxQueueBits); 565 j++; 566 } 567 hw->memRegs->numRegs = j; 568 PMD_INIT_LOG(INFO, "numRegs: %u", j); 569 570 size = sizeof(Vmxnet3_MemRegs) + 571 (j - 1) * sizeof(Vmxnet3_MemoryRegion); 572 573 cmdInfo = &shared->cu.cmdInfo; 574 cmdInfo->varConf.confVer = 1; 575 cmdInfo->varConf.confLen = size; 576 cmdInfo->varConf.confPA = hw->memRegsPA; 577 578 return 0; 579 } 580 581 static int 582 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) 583 { 584 struct rte_eth_conf port_conf = dev->data->dev_conf; 585 struct vmxnet3_hw *hw = dev->data->dev_private; 586 uint32_t mtu = dev->data->mtu; 587 Vmxnet3_DriverShared *shared = hw->shared; 588 Vmxnet3_DSDevRead *devRead = &shared->devRead; 589 uint32_t i; 590 int ret; 591 592 shared->magic = VMXNET3_REV1_MAGIC; 593 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 594 595 /* Setting up Guest OS information */ 596 devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ? 597 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64; 598 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 599 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 600 devRead->misc.driverInfo.uptVerSpt = 1; 601 602 devRead->misc.mtu = rte_le_to_cpu_32(mtu); 603 devRead->misc.queueDescPA = hw->queueDescPA; 604 devRead->misc.queueDescLen = hw->queue_desc_len; 605 devRead->misc.numTxQueues = hw->num_tx_queues; 606 devRead->misc.numRxQueues = hw->num_rx_queues; 607 608 /* 609 * Set number of interrupts to 1 610 * PMD disables all the interrupts but this is MUST to activate device 611 * It needs at least one interrupt for link events to handle 612 * So we'll disable it later after device activation if needed 613 */ 614 devRead->intrConf.numIntrs = 1; 615 devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; 616 617 for (i = 0; i < hw->num_tx_queues; i++) { 618 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i]; 619 vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i]; 620 621 tqd->ctrl.txNumDeferred = 0; 622 tqd->ctrl.txThreshold = 1; 623 tqd->conf.txRingBasePA = txq->cmd_ring.basePA; 624 tqd->conf.compRingBasePA = txq->comp_ring.basePA; 625 tqd->conf.dataRingBasePA = txq->data_ring.basePA; 626 627 tqd->conf.txRingSize = txq->cmd_ring.size; 628 tqd->conf.compRingSize = txq->comp_ring.size; 629 tqd->conf.dataRingSize = txq->data_ring.size; 630 tqd->conf.txDataRingDescSize = txq->txdata_desc_size; 631 tqd->conf.intrIdx = txq->comp_ring.intr_idx; 632 tqd->status.stopped = TRUE; 633 tqd->status.error = 0; 634 memset(&tqd->stats, 0, sizeof(tqd->stats)); 635 } 636 637 for (i = 0; i < hw->num_rx_queues; i++) { 638 Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i]; 639 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; 640 641 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA; 642 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA; 643 rqd->conf.compRingBasePA = rxq->comp_ring.basePA; 644 645 rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size; 646 rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size; 647 rqd->conf.compRingSize = rxq->comp_ring.size; 648 rqd->conf.intrIdx = rxq->comp_ring.intr_idx; 649 if (VMXNET3_VERSION_GE_3(hw)) { 650 rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA; 651 rqd->conf.rxDataRingDescSize = rxq->data_desc_size; 652 } 653 rqd->status.stopped = TRUE; 654 rqd->status.error = 0; 655 memset(&rqd->stats, 0, sizeof(rqd->stats)); 656 } 657 658 /* RxMode set to 0 of VMXNET3_RXM_xxx */ 659 devRead->rxFilterConf.rxMode = 0; 660 661 /* Setting up feature flags */ 662 if (dev->data->dev_conf.rxmode.hw_ip_checksum) 663 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM; 664 665 if (dev->data->dev_conf.rxmode.enable_lro) { 666 devRead->misc.uptFeatures |= VMXNET3_F_LRO; 667 devRead->misc.maxNumRxSG = 0; 668 } 669 670 if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 671 ret = vmxnet3_rss_configure(dev); 672 if (ret != VMXNET3_SUCCESS) 673 return ret; 674 675 devRead->misc.uptFeatures |= VMXNET3_F_RSS; 676 devRead->rssConfDesc.confVer = 1; 677 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf); 678 devRead->rssConfDesc.confPA = hw->rss_confPA; 679 } 680 681 vmxnet3_dev_vlan_offload_set(dev, 682 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK); 683 684 vmxnet3_write_mac(hw, hw->perm_addr); 685 686 return VMXNET3_SUCCESS; 687 } 688 689 /* 690 * Configure device link speed and setup link. 691 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail 692 * It returns 0 on success. 693 */ 694 static int 695 vmxnet3_dev_start(struct rte_eth_dev *dev) 696 { 697 int ret; 698 struct vmxnet3_hw *hw = dev->data->dev_private; 699 700 PMD_INIT_FUNC_TRACE(); 701 702 ret = vmxnet3_setup_driver_shared(dev); 703 if (ret != VMXNET3_SUCCESS) 704 return ret; 705 706 /* Exchange shared data with device */ 707 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 708 VMXNET3_GET_ADDR_LO(hw->sharedPA)); 709 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 710 VMXNET3_GET_ADDR_HI(hw->sharedPA)); 711 712 /* Activate device by register write */ 713 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); 714 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 715 716 if (ret != 0) { 717 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL"); 718 return -EINVAL; 719 } 720 721 /* Setup memory region for rx buffers */ 722 ret = vmxnet3_dev_setup_memreg(dev); 723 if (ret == 0) { 724 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 725 VMXNET3_CMD_REGISTER_MEMREGS); 726 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 727 if (ret != 0) 728 PMD_INIT_LOG(DEBUG, 729 "Failed in setup memory region cmd\n"); 730 ret = 0; 731 } else { 732 PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n"); 733 } 734 735 /* Disable interrupts */ 736 vmxnet3_disable_intr(hw); 737 738 /* 739 * Load RX queues with blank mbufs and update next2fill index for device 740 * Update RxMode of the device 741 */ 742 ret = vmxnet3_dev_rxtx_init(dev); 743 if (ret != VMXNET3_SUCCESS) { 744 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL"); 745 return ret; 746 } 747 748 hw->adapter_stopped = FALSE; 749 750 /* Setting proper Rx Mode and issue Rx Mode Update command */ 751 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); 752 753 /* 754 * Don't need to handle events for now 755 */ 756 #if PROCESS_SYS_EVENTS == 1 757 events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR); 758 PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events); 759 vmxnet3_process_events(hw); 760 #endif 761 return VMXNET3_SUCCESS; 762 } 763 764 /* 765 * Stop device: disable rx and tx functions to allow for reconfiguring. 766 */ 767 static void 768 vmxnet3_dev_stop(struct rte_eth_dev *dev) 769 { 770 struct rte_eth_link link; 771 struct vmxnet3_hw *hw = dev->data->dev_private; 772 773 PMD_INIT_FUNC_TRACE(); 774 775 if (hw->adapter_stopped == 1) { 776 PMD_INIT_LOG(DEBUG, "Device already closed."); 777 return; 778 } 779 780 /* disable interrupts */ 781 vmxnet3_disable_intr(hw); 782 783 /* quiesce the device first */ 784 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 785 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0); 786 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0); 787 788 /* reset the device */ 789 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 790 PMD_INIT_LOG(DEBUG, "Device reset."); 791 hw->adapter_stopped = 0; 792 793 vmxnet3_dev_clear_queues(dev); 794 795 /* Clear recorded link status */ 796 memset(&link, 0, sizeof(link)); 797 vmxnet3_dev_atomic_write_link_status(dev, &link); 798 } 799 800 /* 801 * Reset and stop device. 802 */ 803 static void 804 vmxnet3_dev_close(struct rte_eth_dev *dev) 805 { 806 struct vmxnet3_hw *hw = dev->data->dev_private; 807 808 PMD_INIT_FUNC_TRACE(); 809 810 vmxnet3_dev_stop(dev); 811 hw->adapter_stopped = 1; 812 } 813 814 static void 815 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 816 { 817 unsigned int i; 818 struct vmxnet3_hw *hw = dev->data->dev_private; 819 820 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 821 822 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); 823 for (i = 0; i < hw->num_tx_queues; i++) { 824 struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats; 825 826 stats->q_opackets[i] = txStats->ucastPktsTxOK + 827 txStats->mcastPktsTxOK + 828 txStats->bcastPktsTxOK; 829 stats->q_obytes[i] = txStats->ucastBytesTxOK + 830 txStats->mcastBytesTxOK + 831 txStats->bcastBytesTxOK; 832 833 stats->opackets += stats->q_opackets[i]; 834 stats->obytes += stats->q_obytes[i]; 835 stats->oerrors += txStats->pktsTxError + txStats->pktsTxDiscard; 836 } 837 838 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES); 839 for (i = 0; i < hw->num_rx_queues; i++) { 840 struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats; 841 842 stats->q_ipackets[i] = rxStats->ucastPktsRxOK + 843 rxStats->mcastPktsRxOK + 844 rxStats->bcastPktsRxOK; 845 846 stats->q_ibytes[i] = rxStats->ucastBytesRxOK + 847 rxStats->mcastBytesRxOK + 848 rxStats->bcastBytesRxOK; 849 850 stats->ipackets += stats->q_ipackets[i]; 851 stats->ibytes += stats->q_ibytes[i]; 852 853 stats->q_errors[i] = rxStats->pktsRxError; 854 stats->ierrors += rxStats->pktsRxError; 855 stats->rx_nombuf += rxStats->pktsRxOutOfBuf; 856 } 857 } 858 859 static void 860 vmxnet3_dev_info_get(struct rte_eth_dev *dev, 861 struct rte_eth_dev_info *dev_info) 862 { 863 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 864 865 dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; 866 dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; 867 dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; 868 dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */ 869 dev_info->speed_capa = ETH_LINK_SPEED_10G; 870 dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS; 871 872 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; 873 dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL; 874 875 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 876 .nb_max = VMXNET3_RX_RING_MAX_SIZE, 877 .nb_min = VMXNET3_DEF_RX_RING_SIZE, 878 .nb_align = 1, 879 }; 880 881 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 882 .nb_max = VMXNET3_TX_RING_MAX_SIZE, 883 .nb_min = VMXNET3_DEF_TX_RING_SIZE, 884 .nb_align = 1, 885 .nb_seg_max = VMXNET3_TX_MAX_SEG, 886 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT, 887 }; 888 889 dev_info->rx_offload_capa = 890 DEV_RX_OFFLOAD_VLAN_STRIP | 891 DEV_RX_OFFLOAD_UDP_CKSUM | 892 DEV_RX_OFFLOAD_TCP_CKSUM | 893 DEV_RX_OFFLOAD_TCP_LRO; 894 895 dev_info->tx_offload_capa = 896 DEV_TX_OFFLOAD_VLAN_INSERT | 897 DEV_TX_OFFLOAD_TCP_CKSUM | 898 DEV_TX_OFFLOAD_UDP_CKSUM | 899 DEV_TX_OFFLOAD_TCP_TSO; 900 } 901 902 static const uint32_t * 903 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev) 904 { 905 static const uint32_t ptypes[] = { 906 RTE_PTYPE_L3_IPV4_EXT, 907 RTE_PTYPE_L3_IPV4, 908 RTE_PTYPE_UNKNOWN 909 }; 910 911 if (dev->rx_pkt_burst == vmxnet3_recv_pkts) 912 return ptypes; 913 return NULL; 914 } 915 916 static void 917 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 918 { 919 struct vmxnet3_hw *hw = dev->data->dev_private; 920 921 vmxnet3_write_mac(hw, mac_addr->addr_bytes); 922 } 923 924 /* return 0 means link status changed, -1 means not changed */ 925 static int 926 vmxnet3_dev_link_update(struct rte_eth_dev *dev, 927 __rte_unused int wait_to_complete) 928 { 929 struct vmxnet3_hw *hw = dev->data->dev_private; 930 struct rte_eth_link old, link; 931 uint32_t ret; 932 933 /* Link status doesn't change for stopped dev */ 934 if (dev->data->dev_started == 0) 935 return -1; 936 937 memset(&link, 0, sizeof(link)); 938 vmxnet3_dev_atomic_read_link_status(dev, &old); 939 940 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 941 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 942 943 if (ret & 0x1) { 944 link.link_status = ETH_LINK_UP; 945 link.link_duplex = ETH_LINK_FULL_DUPLEX; 946 link.link_speed = ETH_SPEED_NUM_10G; 947 link.link_autoneg = ETH_LINK_SPEED_FIXED; 948 } 949 950 vmxnet3_dev_atomic_write_link_status(dev, &link); 951 952 return (old.link_status == link.link_status) ? -1 : 0; 953 } 954 955 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */ 956 static void 957 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) 958 { 959 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; 960 961 if (set) 962 rxConf->rxMode = rxConf->rxMode | feature; 963 else 964 rxConf->rxMode = rxConf->rxMode & (~feature); 965 966 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE); 967 } 968 969 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ 970 static void 971 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev) 972 { 973 struct vmxnet3_hw *hw = dev->data->dev_private; 974 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; 975 976 memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE); 977 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1); 978 979 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 980 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 981 } 982 983 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ 984 static void 985 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev) 986 { 987 struct vmxnet3_hw *hw = dev->data->dev_private; 988 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; 989 990 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); 991 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0); 992 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 993 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 994 } 995 996 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ 997 static void 998 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev) 999 { 1000 struct vmxnet3_hw *hw = dev->data->dev_private; 1001 1002 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1); 1003 } 1004 1005 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ 1006 static void 1007 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev) 1008 { 1009 struct vmxnet3_hw *hw = dev->data->dev_private; 1010 1011 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0); 1012 } 1013 1014 /* Enable/disable filter on vlan */ 1015 static int 1016 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on) 1017 { 1018 struct vmxnet3_hw *hw = dev->data->dev_private; 1019 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; 1020 uint32_t *vf_table = rxConf->vfTable; 1021 1022 /* save state for restore */ 1023 if (on) 1024 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid); 1025 else 1026 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid); 1027 1028 /* don't change active filter if in promiscuous mode */ 1029 if (rxConf->rxMode & VMXNET3_RXM_PROMISC) 1030 return 0; 1031 1032 /* set in hardware */ 1033 if (on) 1034 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid); 1035 else 1036 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid); 1037 1038 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1039 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1040 return 0; 1041 } 1042 1043 static void 1044 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1045 { 1046 struct vmxnet3_hw *hw = dev->data->dev_private; 1047 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead; 1048 uint32_t *vf_table = devRead->rxFilterConf.vfTable; 1049 1050 if (mask & ETH_VLAN_STRIP_MASK) { 1051 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 1052 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1053 else 1054 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 1055 1056 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1057 VMXNET3_CMD_UPDATE_FEATURE); 1058 } 1059 1060 if (mask & ETH_VLAN_FILTER_MASK) { 1061 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 1062 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); 1063 else 1064 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); 1065 1066 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1067 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1068 } 1069 } 1070 1071 #if PROCESS_SYS_EVENTS == 1 1072 static void 1073 vmxnet3_process_events(struct vmxnet3_hw *hw) 1074 { 1075 uint32_t events = hw->shared->ecr; 1076 1077 if (!events) { 1078 PMD_INIT_LOG(ERR, "No events to process"); 1079 return; 1080 } 1081 1082 /* 1083 * ECR bits when written with 1b are cleared. Hence write 1084 * events back to ECR so that the bits which were set will be reset. 1085 */ 1086 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events); 1087 1088 /* Check if link state has changed */ 1089 if (events & VMXNET3_ECR_LINK) 1090 PMD_INIT_LOG(ERR, 1091 "Process events in %s(): VMXNET3_ECR_LINK event", 1092 __func__); 1093 1094 /* Check if there is an error on xmit/recv queues */ 1095 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 1096 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1097 VMXNET3_CMD_GET_QUEUE_STATUS); 1098 1099 if (hw->tqd_start->status.stopped) 1100 PMD_INIT_LOG(ERR, "tq error 0x%x", 1101 hw->tqd_start->status.error); 1102 1103 if (hw->rqd_start->status.stopped) 1104 PMD_INIT_LOG(ERR, "rq error 0x%x", 1105 hw->rqd_start->status.error); 1106 1107 /* Reset the device */ 1108 /* Have to reset the device */ 1109 } 1110 1111 if (events & VMXNET3_ECR_DIC) 1112 PMD_INIT_LOG(ERR, "Device implementation change event."); 1113 1114 if (events & VMXNET3_ECR_DEBUG) 1115 PMD_INIT_LOG(ERR, "Debug event generated by device."); 1116 } 1117 #endif 1118 1119 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd.pci_drv); 1120 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map); 1121 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio"); 1122