1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 #include <stdio.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <stdarg.h> 41 #include <fcntl.h> 42 #include <inttypes.h> 43 #include <rte_byteorder.h> 44 #include <rte_common.h> 45 #include <rte_cycles.h> 46 47 #include <rte_interrupts.h> 48 #include <rte_log.h> 49 #include <rte_debug.h> 50 #include <rte_pci.h> 51 #include <rte_atomic.h> 52 #include <rte_branch_prediction.h> 53 #include <rte_memory.h> 54 #include <rte_memzone.h> 55 #include <rte_eal.h> 56 #include <rte_alarm.h> 57 #include <rte_ether.h> 58 #include <rte_ethdev.h> 59 #include <rte_atomic.h> 60 #include <rte_string_fns.h> 61 #include <rte_malloc.h> 62 #include <rte_dev.h> 63 64 #include "base/vmxnet3_defs.h" 65 66 #include "vmxnet3_ring.h" 67 #include "vmxnet3_logs.h" 68 #include "vmxnet3_ethdev.h" 69 70 #define PROCESS_SYS_EVENTS 0 71 72 #define VMXNET3_TX_MAX_SEG UINT8_MAX 73 74 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev); 75 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev); 76 static int vmxnet3_dev_configure(struct rte_eth_dev *dev); 77 static int vmxnet3_dev_start(struct rte_eth_dev *dev); 78 static void vmxnet3_dev_stop(struct rte_eth_dev *dev); 79 static void vmxnet3_dev_close(struct rte_eth_dev *dev); 80 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set); 81 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev); 82 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev); 83 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev); 84 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev); 85 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev, 86 int wait_to_complete); 87 static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev, 88 struct rte_eth_stats *stats); 89 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev, 90 struct rte_eth_dev_info *dev_info); 91 static const uint32_t * 92 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev); 93 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, 94 uint16_t vid, int on); 95 static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); 96 static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev, 97 struct ether_addr *mac_addr); 98 99 #if PROCESS_SYS_EVENTS == 1 100 static void vmxnet3_process_events(struct vmxnet3_hw *); 101 #endif 102 /* 103 * The set of PCI devices this driver supports 104 */ 105 #define VMWARE_PCI_VENDOR_ID 0x15AD 106 #define VMWARE_DEV_ID_VMXNET3 0x07B0 107 static const struct rte_pci_id pci_id_vmxnet3_map[] = { 108 { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) }, 109 { .vendor_id = 0, /* sentinel */ }, 110 }; 111 112 static const struct eth_dev_ops vmxnet3_eth_dev_ops = { 113 .dev_configure = vmxnet3_dev_configure, 114 .dev_start = vmxnet3_dev_start, 115 .dev_stop = vmxnet3_dev_stop, 116 .dev_close = vmxnet3_dev_close, 117 .promiscuous_enable = vmxnet3_dev_promiscuous_enable, 118 .promiscuous_disable = vmxnet3_dev_promiscuous_disable, 119 .allmulticast_enable = vmxnet3_dev_allmulticast_enable, 120 .allmulticast_disable = vmxnet3_dev_allmulticast_disable, 121 .link_update = vmxnet3_dev_link_update, 122 .stats_get = vmxnet3_dev_stats_get, 123 .mac_addr_set = vmxnet3_mac_addr_set, 124 .dev_infos_get = vmxnet3_dev_info_get, 125 .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get, 126 .vlan_filter_set = vmxnet3_dev_vlan_filter_set, 127 .vlan_offload_set = vmxnet3_dev_vlan_offload_set, 128 .rx_queue_setup = vmxnet3_dev_rx_queue_setup, 129 .rx_queue_release = vmxnet3_dev_rx_queue_release, 130 .tx_queue_setup = vmxnet3_dev_tx_queue_setup, 131 .tx_queue_release = vmxnet3_dev_tx_queue_release, 132 }; 133 134 static const struct rte_memzone * 135 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size, 136 const char *post_string, int socket_id, 137 uint16_t align, bool reuse) 138 { 139 char z_name[RTE_MEMZONE_NAMESIZE]; 140 const struct rte_memzone *mz; 141 142 snprintf(z_name, sizeof(z_name), "%s_%d_%s", 143 dev->data->drv_name, dev->data->port_id, post_string); 144 145 mz = rte_memzone_lookup(z_name); 146 if (!reuse) { 147 if (mz) 148 rte_memzone_free(mz); 149 return rte_memzone_reserve_aligned(z_name, size, socket_id, 150 0, align); 151 } 152 153 if (mz) 154 return mz; 155 156 return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align); 157 } 158 159 /** 160 * Atomically reads the link status information from global 161 * structure rte_eth_dev. 162 * 163 * @param dev 164 * - Pointer to the structure rte_eth_dev to read from. 165 * - Pointer to the buffer to be saved with the link status. 166 * 167 * @return 168 * - On success, zero. 169 * - On failure, negative value. 170 */ 171 172 static int 173 vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev, 174 struct rte_eth_link *link) 175 { 176 struct rte_eth_link *dst = link; 177 struct rte_eth_link *src = &(dev->data->dev_link); 178 179 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 180 *(uint64_t *)src) == 0) 181 return -1; 182 183 return 0; 184 } 185 186 /** 187 * Atomically writes the link status information into global 188 * structure rte_eth_dev. 189 * 190 * @param dev 191 * - Pointer to the structure rte_eth_dev to write to. 192 * - Pointer to the buffer to be saved with the link status. 193 * 194 * @return 195 * - On success, zero. 196 * - On failure, negative value. 197 */ 198 static int 199 vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev, 200 struct rte_eth_link *link) 201 { 202 struct rte_eth_link *dst = &(dev->data->dev_link); 203 struct rte_eth_link *src = link; 204 205 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 206 *(uint64_t *)src) == 0) 207 return -1; 208 209 return 0; 210 } 211 212 /* 213 * This function is based on vmxnet3_disable_intr() 214 */ 215 static void 216 vmxnet3_disable_intr(struct vmxnet3_hw *hw) 217 { 218 int i; 219 220 PMD_INIT_FUNC_TRACE(); 221 222 hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; 223 for (i = 0; i < VMXNET3_MAX_INTRS; i++) 224 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1); 225 } 226 227 /* 228 * It returns 0 on success. 229 */ 230 static int 231 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) 232 { 233 struct rte_pci_device *pci_dev; 234 struct vmxnet3_hw *hw = eth_dev->data->dev_private; 235 uint32_t mac_hi, mac_lo, ver; 236 237 PMD_INIT_FUNC_TRACE(); 238 239 eth_dev->dev_ops = &vmxnet3_eth_dev_ops; 240 eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts; 241 eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts; 242 eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts; 243 pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 244 245 /* 246 * for secondary processes, we don't initialize any further as primary 247 * has already done this work. 248 */ 249 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 250 return 0; 251 252 rte_eth_copy_pci_info(eth_dev, pci_dev); 253 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; 254 255 /* Vendor and Device ID need to be set before init of shared code */ 256 hw->device_id = pci_dev->id.device_id; 257 hw->vendor_id = pci_dev->id.vendor_id; 258 hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr; 259 hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr; 260 261 hw->num_rx_queues = 1; 262 hw->num_tx_queues = 1; 263 hw->bufs_per_pkt = 1; 264 265 /* Check h/w version compatibility with driver. */ 266 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS); 267 PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver); 268 if (ver & 0x1) 269 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1); 270 else { 271 PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1"); 272 return -EIO; 273 } 274 275 /* Check UPT version compatibility with driver. */ 276 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS); 277 PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver); 278 if (ver & 0x1) 279 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1); 280 else { 281 PMD_INIT_LOG(ERR, "Incompatible UPT version."); 282 return -EIO; 283 } 284 285 /* Getting MAC Address */ 286 mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL); 287 mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH); 288 memcpy(hw->perm_addr, &mac_lo, 4); 289 memcpy(hw->perm_addr + 4, &mac_hi, 2); 290 291 /* Allocate memory for storing MAC addresses */ 292 eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN * 293 VMXNET3_MAX_MAC_ADDRS, 0); 294 if (eth_dev->data->mac_addrs == NULL) { 295 PMD_INIT_LOG(ERR, 296 "Failed to allocate %d bytes needed to store MAC addresses", 297 ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS); 298 return -ENOMEM; 299 } 300 /* Copy the permanent MAC address */ 301 ether_addr_copy((struct ether_addr *) hw->perm_addr, 302 ð_dev->data->mac_addrs[0]); 303 304 PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", 305 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2], 306 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]); 307 308 /* Put device in Quiesce Mode */ 309 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 310 311 /* allow untagged pkts */ 312 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0); 313 314 return 0; 315 } 316 317 static int 318 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev) 319 { 320 struct vmxnet3_hw *hw = eth_dev->data->dev_private; 321 322 PMD_INIT_FUNC_TRACE(); 323 324 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 325 return 0; 326 327 if (hw->adapter_stopped == 0) 328 vmxnet3_dev_close(eth_dev); 329 330 eth_dev->dev_ops = NULL; 331 eth_dev->rx_pkt_burst = NULL; 332 eth_dev->tx_pkt_burst = NULL; 333 eth_dev->tx_pkt_prepare = NULL; 334 335 rte_free(eth_dev->data->mac_addrs); 336 eth_dev->data->mac_addrs = NULL; 337 338 return 0; 339 } 340 341 static struct eth_driver rte_vmxnet3_pmd = { 342 .pci_drv = { 343 .id_table = pci_id_vmxnet3_map, 344 .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 345 .probe = rte_eth_dev_pci_probe, 346 .remove = rte_eth_dev_pci_remove, 347 }, 348 .eth_dev_init = eth_vmxnet3_dev_init, 349 .eth_dev_uninit = eth_vmxnet3_dev_uninit, 350 .dev_private_size = sizeof(struct vmxnet3_hw), 351 }; 352 353 static int 354 vmxnet3_dev_configure(struct rte_eth_dev *dev) 355 { 356 const struct rte_memzone *mz; 357 struct vmxnet3_hw *hw = dev->data->dev_private; 358 size_t size; 359 360 PMD_INIT_FUNC_TRACE(); 361 362 if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES || 363 dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) { 364 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported"); 365 return -EINVAL; 366 } 367 368 if (!rte_is_power_of_2(dev->data->nb_rx_queues)) { 369 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2"); 370 return -EINVAL; 371 } 372 373 size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + 374 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc); 375 376 if (size > UINT16_MAX) 377 return -EINVAL; 378 379 hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues; 380 hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues; 381 382 /* 383 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead 384 * on current socket 385 */ 386 mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared), 387 "shared", rte_socket_id(), 8, 1); 388 389 if (mz == NULL) { 390 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone"); 391 return -ENOMEM; 392 } 393 memset(mz->addr, 0, mz->len); 394 395 hw->shared = mz->addr; 396 hw->sharedPA = mz->phys_addr; 397 398 /* 399 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc 400 * on current socket. 401 * 402 * We cannot reuse this memzone from previous allocation as its size 403 * depends on the number of tx and rx queues, which could be different 404 * from one config to another. 405 */ 406 mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(), 407 VMXNET3_QUEUE_DESC_ALIGN, 0); 408 if (mz == NULL) { 409 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone"); 410 return -ENOMEM; 411 } 412 memset(mz->addr, 0, mz->len); 413 414 hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr; 415 hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues); 416 417 hw->queueDescPA = mz->phys_addr; 418 hw->queue_desc_len = (uint16_t)size; 419 420 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 421 /* Allocate memory structure for UPT1_RSSConf and configure */ 422 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), 423 "rss_conf", rte_socket_id(), 424 RTE_CACHE_LINE_SIZE, 1); 425 if (mz == NULL) { 426 PMD_INIT_LOG(ERR, 427 "ERROR: Creating rss_conf structure zone"); 428 return -ENOMEM; 429 } 430 memset(mz->addr, 0, mz->len); 431 432 hw->rss_conf = mz->addr; 433 hw->rss_confPA = mz->phys_addr; 434 } 435 436 return 0; 437 } 438 439 static void 440 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr) 441 { 442 uint32_t val; 443 444 PMD_INIT_LOG(DEBUG, 445 "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", 446 addr[0], addr[1], addr[2], 447 addr[3], addr[4], addr[5]); 448 449 val = *(const uint32_t *)addr; 450 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val); 451 452 val = (addr[5] << 8) | addr[4]; 453 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val); 454 } 455 456 static int 457 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) 458 { 459 struct rte_eth_conf port_conf = dev->data->dev_conf; 460 struct vmxnet3_hw *hw = dev->data->dev_private; 461 uint32_t mtu = dev->data->mtu; 462 Vmxnet3_DriverShared *shared = hw->shared; 463 Vmxnet3_DSDevRead *devRead = &shared->devRead; 464 uint32_t i; 465 int ret; 466 467 shared->magic = VMXNET3_REV1_MAGIC; 468 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 469 470 /* Setting up Guest OS information */ 471 devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ? 472 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64; 473 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 474 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 475 devRead->misc.driverInfo.uptVerSpt = 1; 476 477 devRead->misc.mtu = rte_le_to_cpu_32(mtu); 478 devRead->misc.queueDescPA = hw->queueDescPA; 479 devRead->misc.queueDescLen = hw->queue_desc_len; 480 devRead->misc.numTxQueues = hw->num_tx_queues; 481 devRead->misc.numRxQueues = hw->num_rx_queues; 482 483 /* 484 * Set number of interrupts to 1 485 * PMD disables all the interrupts but this is MUST to activate device 486 * It needs at least one interrupt for link events to handle 487 * So we'll disable it later after device activation if needed 488 */ 489 devRead->intrConf.numIntrs = 1; 490 devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; 491 492 for (i = 0; i < hw->num_tx_queues; i++) { 493 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i]; 494 vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i]; 495 496 tqd->ctrl.txNumDeferred = 0; 497 tqd->ctrl.txThreshold = 1; 498 tqd->conf.txRingBasePA = txq->cmd_ring.basePA; 499 tqd->conf.compRingBasePA = txq->comp_ring.basePA; 500 tqd->conf.dataRingBasePA = txq->data_ring.basePA; 501 502 tqd->conf.txRingSize = txq->cmd_ring.size; 503 tqd->conf.compRingSize = txq->comp_ring.size; 504 tqd->conf.dataRingSize = txq->data_ring.size; 505 tqd->conf.intrIdx = txq->comp_ring.intr_idx; 506 tqd->status.stopped = TRUE; 507 tqd->status.error = 0; 508 memset(&tqd->stats, 0, sizeof(tqd->stats)); 509 } 510 511 for (i = 0; i < hw->num_rx_queues; i++) { 512 Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i]; 513 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; 514 515 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA; 516 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA; 517 rqd->conf.compRingBasePA = rxq->comp_ring.basePA; 518 519 rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size; 520 rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size; 521 rqd->conf.compRingSize = rxq->comp_ring.size; 522 rqd->conf.intrIdx = rxq->comp_ring.intr_idx; 523 rqd->status.stopped = TRUE; 524 rqd->status.error = 0; 525 memset(&rqd->stats, 0, sizeof(rqd->stats)); 526 } 527 528 /* RxMode set to 0 of VMXNET3_RXM_xxx */ 529 devRead->rxFilterConf.rxMode = 0; 530 531 /* Setting up feature flags */ 532 if (dev->data->dev_conf.rxmode.hw_ip_checksum) 533 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM; 534 535 if (dev->data->dev_conf.rxmode.enable_lro) { 536 devRead->misc.uptFeatures |= VMXNET3_F_LRO; 537 devRead->misc.maxNumRxSG = 0; 538 } 539 540 if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 541 ret = vmxnet3_rss_configure(dev); 542 if (ret != VMXNET3_SUCCESS) 543 return ret; 544 545 devRead->misc.uptFeatures |= VMXNET3_F_RSS; 546 devRead->rssConfDesc.confVer = 1; 547 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf); 548 devRead->rssConfDesc.confPA = hw->rss_confPA; 549 } 550 551 vmxnet3_dev_vlan_offload_set(dev, 552 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK); 553 554 vmxnet3_write_mac(hw, hw->perm_addr); 555 556 return VMXNET3_SUCCESS; 557 } 558 559 /* 560 * Configure device link speed and setup link. 561 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail 562 * It returns 0 on success. 563 */ 564 static int 565 vmxnet3_dev_start(struct rte_eth_dev *dev) 566 { 567 int ret; 568 struct vmxnet3_hw *hw = dev->data->dev_private; 569 570 PMD_INIT_FUNC_TRACE(); 571 572 ret = vmxnet3_setup_driver_shared(dev); 573 if (ret != VMXNET3_SUCCESS) 574 return ret; 575 576 /* Exchange shared data with device */ 577 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 578 VMXNET3_GET_ADDR_LO(hw->sharedPA)); 579 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 580 VMXNET3_GET_ADDR_HI(hw->sharedPA)); 581 582 /* Activate device by register write */ 583 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); 584 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 585 586 if (ret != 0) { 587 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL"); 588 return -EINVAL; 589 } 590 591 /* Disable interrupts */ 592 vmxnet3_disable_intr(hw); 593 594 /* 595 * Load RX queues with blank mbufs and update next2fill index for device 596 * Update RxMode of the device 597 */ 598 ret = vmxnet3_dev_rxtx_init(dev); 599 if (ret != VMXNET3_SUCCESS) { 600 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL"); 601 return ret; 602 } 603 604 /* Setting proper Rx Mode and issue Rx Mode Update command */ 605 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); 606 607 /* 608 * Don't need to handle events for now 609 */ 610 #if PROCESS_SYS_EVENTS == 1 611 events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR); 612 PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events); 613 vmxnet3_process_events(hw); 614 #endif 615 return VMXNET3_SUCCESS; 616 } 617 618 /* 619 * Stop device: disable rx and tx functions to allow for reconfiguring. 620 */ 621 static void 622 vmxnet3_dev_stop(struct rte_eth_dev *dev) 623 { 624 struct rte_eth_link link; 625 struct vmxnet3_hw *hw = dev->data->dev_private; 626 627 PMD_INIT_FUNC_TRACE(); 628 629 if (hw->adapter_stopped == 1) { 630 PMD_INIT_LOG(DEBUG, "Device already closed."); 631 return; 632 } 633 634 /* disable interrupts */ 635 vmxnet3_disable_intr(hw); 636 637 /* quiesce the device first */ 638 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 639 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0); 640 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0); 641 642 /* reset the device */ 643 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 644 PMD_INIT_LOG(DEBUG, "Device reset."); 645 hw->adapter_stopped = 0; 646 647 vmxnet3_dev_clear_queues(dev); 648 649 /* Clear recorded link status */ 650 memset(&link, 0, sizeof(link)); 651 vmxnet3_dev_atomic_write_link_status(dev, &link); 652 } 653 654 /* 655 * Reset and stop device. 656 */ 657 static void 658 vmxnet3_dev_close(struct rte_eth_dev *dev) 659 { 660 struct vmxnet3_hw *hw = dev->data->dev_private; 661 662 PMD_INIT_FUNC_TRACE(); 663 664 vmxnet3_dev_stop(dev); 665 hw->adapter_stopped = 1; 666 } 667 668 static void 669 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 670 { 671 unsigned int i; 672 struct vmxnet3_hw *hw = dev->data->dev_private; 673 674 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 675 676 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); 677 for (i = 0; i < hw->num_tx_queues; i++) { 678 struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats; 679 680 stats->q_opackets[i] = txStats->ucastPktsTxOK + 681 txStats->mcastPktsTxOK + 682 txStats->bcastPktsTxOK; 683 stats->q_obytes[i] = txStats->ucastBytesTxOK + 684 txStats->mcastBytesTxOK + 685 txStats->bcastBytesTxOK; 686 687 stats->opackets += stats->q_opackets[i]; 688 stats->obytes += stats->q_obytes[i]; 689 stats->oerrors += txStats->pktsTxError + txStats->pktsTxDiscard; 690 } 691 692 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES); 693 for (i = 0; i < hw->num_rx_queues; i++) { 694 struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats; 695 696 stats->q_ipackets[i] = rxStats->ucastPktsRxOK + 697 rxStats->mcastPktsRxOK + 698 rxStats->bcastPktsRxOK; 699 700 stats->q_ibytes[i] = rxStats->ucastBytesRxOK + 701 rxStats->mcastBytesRxOK + 702 rxStats->bcastBytesRxOK; 703 704 stats->ipackets += stats->q_ipackets[i]; 705 stats->ibytes += stats->q_ibytes[i]; 706 707 stats->q_errors[i] = rxStats->pktsRxError; 708 stats->ierrors += rxStats->pktsRxError; 709 stats->rx_nombuf += rxStats->pktsRxOutOfBuf; 710 } 711 } 712 713 static void 714 vmxnet3_dev_info_get(struct rte_eth_dev *dev, 715 struct rte_eth_dev_info *dev_info) 716 { 717 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); 718 719 dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; 720 dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; 721 dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; 722 dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */ 723 dev_info->speed_capa = ETH_LINK_SPEED_10G; 724 dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS; 725 726 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP; 727 dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL; 728 729 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 730 .nb_max = VMXNET3_RX_RING_MAX_SIZE, 731 .nb_min = VMXNET3_DEF_RX_RING_SIZE, 732 .nb_align = 1, 733 }; 734 735 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 736 .nb_max = VMXNET3_TX_RING_MAX_SIZE, 737 .nb_min = VMXNET3_DEF_TX_RING_SIZE, 738 .nb_align = 1, 739 .nb_seg_max = VMXNET3_TX_MAX_SEG, 740 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT, 741 }; 742 743 dev_info->rx_offload_capa = 744 DEV_RX_OFFLOAD_VLAN_STRIP | 745 DEV_RX_OFFLOAD_UDP_CKSUM | 746 DEV_RX_OFFLOAD_TCP_CKSUM | 747 DEV_RX_OFFLOAD_TCP_LRO; 748 749 dev_info->tx_offload_capa = 750 DEV_TX_OFFLOAD_VLAN_INSERT | 751 DEV_TX_OFFLOAD_TCP_CKSUM | 752 DEV_TX_OFFLOAD_UDP_CKSUM | 753 DEV_TX_OFFLOAD_TCP_TSO; 754 } 755 756 static const uint32_t * 757 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev) 758 { 759 static const uint32_t ptypes[] = { 760 RTE_PTYPE_L3_IPV4_EXT, 761 RTE_PTYPE_L3_IPV4, 762 RTE_PTYPE_UNKNOWN 763 }; 764 765 if (dev->rx_pkt_burst == vmxnet3_recv_pkts) 766 return ptypes; 767 return NULL; 768 } 769 770 static void 771 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr) 772 { 773 struct vmxnet3_hw *hw = dev->data->dev_private; 774 775 vmxnet3_write_mac(hw, mac_addr->addr_bytes); 776 } 777 778 /* return 0 means link status changed, -1 means not changed */ 779 static int 780 vmxnet3_dev_link_update(struct rte_eth_dev *dev, 781 __rte_unused int wait_to_complete) 782 { 783 struct vmxnet3_hw *hw = dev->data->dev_private; 784 struct rte_eth_link old, link; 785 uint32_t ret; 786 787 /* Link status doesn't change for stopped dev */ 788 if (dev->data->dev_started == 0) 789 return -1; 790 791 memset(&link, 0, sizeof(link)); 792 vmxnet3_dev_atomic_read_link_status(dev, &old); 793 794 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 795 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 796 797 if (ret & 0x1) { 798 link.link_status = ETH_LINK_UP; 799 link.link_duplex = ETH_LINK_FULL_DUPLEX; 800 link.link_speed = ETH_SPEED_NUM_10G; 801 link.link_autoneg = ETH_LINK_SPEED_FIXED; 802 } 803 804 vmxnet3_dev_atomic_write_link_status(dev, &link); 805 806 return (old.link_status == link.link_status) ? -1 : 0; 807 } 808 809 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */ 810 static void 811 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) 812 { 813 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; 814 815 if (set) 816 rxConf->rxMode = rxConf->rxMode | feature; 817 else 818 rxConf->rxMode = rxConf->rxMode & (~feature); 819 820 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE); 821 } 822 823 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ 824 static void 825 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev) 826 { 827 struct vmxnet3_hw *hw = dev->data->dev_private; 828 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; 829 830 memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE); 831 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1); 832 833 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 834 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 835 } 836 837 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ 838 static void 839 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev) 840 { 841 struct vmxnet3_hw *hw = dev->data->dev_private; 842 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; 843 844 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); 845 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0); 846 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 847 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 848 } 849 850 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ 851 static void 852 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev) 853 { 854 struct vmxnet3_hw *hw = dev->data->dev_private; 855 856 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1); 857 } 858 859 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ 860 static void 861 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev) 862 { 863 struct vmxnet3_hw *hw = dev->data->dev_private; 864 865 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0); 866 } 867 868 /* Enable/disable filter on vlan */ 869 static int 870 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on) 871 { 872 struct vmxnet3_hw *hw = dev->data->dev_private; 873 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; 874 uint32_t *vf_table = rxConf->vfTable; 875 876 /* save state for restore */ 877 if (on) 878 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid); 879 else 880 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid); 881 882 /* don't change active filter if in promiscuous mode */ 883 if (rxConf->rxMode & VMXNET3_RXM_PROMISC) 884 return 0; 885 886 /* set in hardware */ 887 if (on) 888 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid); 889 else 890 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid); 891 892 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 893 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 894 return 0; 895 } 896 897 static void 898 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) 899 { 900 struct vmxnet3_hw *hw = dev->data->dev_private; 901 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead; 902 uint32_t *vf_table = devRead->rxFilterConf.vfTable; 903 904 if (mask & ETH_VLAN_STRIP_MASK) { 905 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 906 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 907 else 908 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 909 910 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 911 VMXNET3_CMD_UPDATE_FEATURE); 912 } 913 914 if (mask & ETH_VLAN_FILTER_MASK) { 915 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 916 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); 917 else 918 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); 919 920 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 921 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 922 } 923 } 924 925 #if PROCESS_SYS_EVENTS == 1 926 static void 927 vmxnet3_process_events(struct vmxnet3_hw *hw) 928 { 929 uint32_t events = hw->shared->ecr; 930 931 if (!events) { 932 PMD_INIT_LOG(ERR, "No events to process"); 933 return; 934 } 935 936 /* 937 * ECR bits when written with 1b are cleared. Hence write 938 * events back to ECR so that the bits which were set will be reset. 939 */ 940 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events); 941 942 /* Check if link state has changed */ 943 if (events & VMXNET3_ECR_LINK) 944 PMD_INIT_LOG(ERR, 945 "Process events in %s(): VMXNET3_ECR_LINK event", 946 __func__); 947 948 /* Check if there is an error on xmit/recv queues */ 949 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 950 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 951 VMXNET3_CMD_GET_QUEUE_STATUS); 952 953 if (hw->tqd_start->status.stopped) 954 PMD_INIT_LOG(ERR, "tq error 0x%x", 955 hw->tqd_start->status.error); 956 957 if (hw->rqd_start->status.stopped) 958 PMD_INIT_LOG(ERR, "rq error 0x%x", 959 hw->rqd_start->status.error); 960 961 /* Reset the device */ 962 /* Have to reset the device */ 963 } 964 965 if (events & VMXNET3_ECR_DIC) 966 PMD_INIT_LOG(ERR, "Device implementation change event."); 967 968 if (events & VMXNET3_ECR_DEBUG) 969 PMD_INIT_LOG(ERR, "Debug event generated by device."); 970 } 971 #endif 972 973 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd.pci_drv); 974 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map); 975 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio"); 976