1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/queue.h> 35 #include <stdio.h> 36 #include <errno.h> 37 #include <stdint.h> 38 #include <string.h> 39 #include <unistd.h> 40 #include <stdarg.h> 41 #include <fcntl.h> 42 #include <inttypes.h> 43 #include <rte_byteorder.h> 44 #include <rte_common.h> 45 #include <rte_cycles.h> 46 47 #include <rte_interrupts.h> 48 #include <rte_log.h> 49 #include <rte_debug.h> 50 #include <rte_pci.h> 51 #include <rte_atomic.h> 52 #include <rte_branch_prediction.h> 53 #include <rte_memory.h> 54 #include <rte_memzone.h> 55 #include <rte_eal.h> 56 #include <rte_alarm.h> 57 #include <rte_ether.h> 58 #include <rte_ethdev.h> 59 #include <rte_atomic.h> 60 #include <rte_string_fns.h> 61 #include <rte_malloc.h> 62 #include <rte_dev.h> 63 64 #include "base/vmxnet3_defs.h" 65 66 #include "vmxnet3_ring.h" 67 #include "vmxnet3_logs.h" 68 #include "vmxnet3_ethdev.h" 69 70 #define PROCESS_SYS_EVENTS 0 71 72 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev); 73 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev); 74 static int vmxnet3_dev_configure(struct rte_eth_dev *dev); 75 static int vmxnet3_dev_start(struct rte_eth_dev *dev); 76 static void vmxnet3_dev_stop(struct rte_eth_dev *dev); 77 static void vmxnet3_dev_close(struct rte_eth_dev *dev); 78 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set); 79 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev); 80 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev); 81 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev); 82 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev); 83 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev, 84 int wait_to_complete); 85 static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev, 86 struct rte_eth_stats *stats); 87 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev, 88 struct rte_eth_dev_info *dev_info); 89 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, 90 uint16_t vid, int on); 91 static void vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); 92 static void vmxnet3_dev_vlan_offload_set_clear(struct rte_eth_dev *dev, 93 int mask, int clear); 94 95 #if PROCESS_SYS_EVENTS == 1 96 static void vmxnet3_process_events(struct vmxnet3_hw *); 97 #endif 98 /* 99 * The set of PCI devices this driver supports 100 */ 101 static const struct rte_pci_id pci_id_vmxnet3_map[] = { 102 103 #define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, 104 #include "rte_pci_dev_ids.h" 105 106 { .vendor_id = 0, /* sentinel */ }, 107 }; 108 109 static const struct eth_dev_ops vmxnet3_eth_dev_ops = { 110 .dev_configure = vmxnet3_dev_configure, 111 .dev_start = vmxnet3_dev_start, 112 .dev_stop = vmxnet3_dev_stop, 113 .dev_close = vmxnet3_dev_close, 114 .promiscuous_enable = vmxnet3_dev_promiscuous_enable, 115 .promiscuous_disable = vmxnet3_dev_promiscuous_disable, 116 .allmulticast_enable = vmxnet3_dev_allmulticast_enable, 117 .allmulticast_disable = vmxnet3_dev_allmulticast_disable, 118 .link_update = vmxnet3_dev_link_update, 119 .stats_get = vmxnet3_dev_stats_get, 120 .dev_infos_get = vmxnet3_dev_info_get, 121 .vlan_filter_set = vmxnet3_dev_vlan_filter_set, 122 .vlan_offload_set = vmxnet3_dev_vlan_offload_set, 123 .rx_queue_setup = vmxnet3_dev_rx_queue_setup, 124 .rx_queue_release = vmxnet3_dev_rx_queue_release, 125 .tx_queue_setup = vmxnet3_dev_tx_queue_setup, 126 .tx_queue_release = vmxnet3_dev_tx_queue_release, 127 }; 128 129 static const struct rte_memzone * 130 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size, 131 const char *post_string, int socket_id, uint16_t align) 132 { 133 char z_name[RTE_MEMZONE_NAMESIZE]; 134 const struct rte_memzone *mz; 135 136 snprintf(z_name, sizeof(z_name), "%s_%d_%s", 137 dev->driver->pci_drv.name, dev->data->port_id, post_string); 138 139 mz = rte_memzone_lookup(z_name); 140 if (mz) 141 return mz; 142 143 return rte_memzone_reserve_aligned(z_name, size, 144 socket_id, 0, align); 145 } 146 147 /** 148 * Atomically reads the link status information from global 149 * structure rte_eth_dev. 150 * 151 * @param dev 152 * - Pointer to the structure rte_eth_dev to read from. 153 * - Pointer to the buffer to be saved with the link status. 154 * 155 * @return 156 * - On success, zero. 157 * - On failure, negative value. 158 */ 159 160 static int 161 vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev, 162 struct rte_eth_link *link) 163 { 164 struct rte_eth_link *dst = link; 165 struct rte_eth_link *src = &(dev->data->dev_link); 166 167 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 168 *(uint64_t *)src) == 0) 169 return -1; 170 171 return 0; 172 } 173 174 /** 175 * Atomically writes the link status information into global 176 * structure rte_eth_dev. 177 * 178 * @param dev 179 * - Pointer to the structure rte_eth_dev to write to. 180 * - Pointer to the buffer to be saved with the link status. 181 * 182 * @return 183 * - On success, zero. 184 * - On failure, negative value. 185 */ 186 static int 187 vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev, 188 struct rte_eth_link *link) 189 { 190 struct rte_eth_link *dst = &(dev->data->dev_link); 191 struct rte_eth_link *src = link; 192 193 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, 194 *(uint64_t *)src) == 0) 195 return -1; 196 197 return 0; 198 } 199 200 /* 201 * This function is based on vmxnet3_disable_intr() 202 */ 203 static void 204 vmxnet3_disable_intr(struct vmxnet3_hw *hw) 205 { 206 int i; 207 208 PMD_INIT_FUNC_TRACE(); 209 210 hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; 211 for (i = 0; i < VMXNET3_MAX_INTRS; i++) 212 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1); 213 } 214 215 /* 216 * It returns 0 on success. 217 */ 218 static int 219 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) 220 { 221 struct rte_pci_device *pci_dev; 222 struct vmxnet3_hw *hw = eth_dev->data->dev_private; 223 uint32_t mac_hi, mac_lo, ver; 224 225 PMD_INIT_FUNC_TRACE(); 226 227 eth_dev->dev_ops = &vmxnet3_eth_dev_ops; 228 eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts; 229 eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts; 230 pci_dev = eth_dev->pci_dev; 231 232 /* 233 * for secondary processes, we don't initialize any further as primary 234 * has already done this work. 235 */ 236 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 237 return 0; 238 239 rte_eth_copy_pci_info(eth_dev, pci_dev); 240 241 /* Vendor and Device ID need to be set before init of shared code */ 242 hw->device_id = pci_dev->id.device_id; 243 hw->vendor_id = pci_dev->id.vendor_id; 244 hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr; 245 hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr; 246 247 hw->num_rx_queues = 1; 248 hw->num_tx_queues = 1; 249 hw->bufs_per_pkt = 1; 250 251 /* Check h/w version compatibility with driver. */ 252 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS); 253 PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver); 254 if (ver & 0x1) 255 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1); 256 else { 257 PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1"); 258 return -EIO; 259 } 260 261 /* Check UPT version compatibility with driver. */ 262 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS); 263 PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver); 264 if (ver & 0x1) 265 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1); 266 else { 267 PMD_INIT_LOG(ERR, "Incompatible UPT version."); 268 return -EIO; 269 } 270 271 /* Getting MAC Address */ 272 mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL); 273 mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH); 274 memcpy(hw->perm_addr , &mac_lo, 4); 275 memcpy(hw->perm_addr+4, &mac_hi, 2); 276 277 /* Allocate memory for storing MAC addresses */ 278 eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN * 279 VMXNET3_MAX_MAC_ADDRS, 0); 280 if (eth_dev->data->mac_addrs == NULL) { 281 PMD_INIT_LOG(ERR, 282 "Failed to allocate %d bytes needed to store MAC addresses", 283 ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS); 284 return -ENOMEM; 285 } 286 /* Copy the permanent MAC address */ 287 ether_addr_copy((struct ether_addr *) hw->perm_addr, 288 ð_dev->data->mac_addrs[0]); 289 290 PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", 291 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2], 292 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]); 293 294 /* Put device in Quiesce Mode */ 295 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 296 297 return 0; 298 } 299 300 static int 301 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev) 302 { 303 struct vmxnet3_hw *hw = eth_dev->data->dev_private; 304 305 PMD_INIT_FUNC_TRACE(); 306 307 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 308 return 0; 309 310 if (hw->adapter_stopped == 0) 311 vmxnet3_dev_close(eth_dev); 312 313 eth_dev->dev_ops = NULL; 314 eth_dev->rx_pkt_burst = NULL; 315 eth_dev->tx_pkt_burst = NULL; 316 317 rte_free(eth_dev->data->mac_addrs); 318 eth_dev->data->mac_addrs = NULL; 319 320 return 0; 321 } 322 323 static struct eth_driver rte_vmxnet3_pmd = { 324 .pci_drv = { 325 .name = "rte_vmxnet3_pmd", 326 .id_table = pci_id_vmxnet3_map, 327 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, 328 }, 329 .eth_dev_init = eth_vmxnet3_dev_init, 330 .eth_dev_uninit = eth_vmxnet3_dev_uninit, 331 .dev_private_size = sizeof(struct vmxnet3_hw), 332 }; 333 334 /* 335 * Driver initialization routine. 336 * Invoked once at EAL init time. 337 * Register itself as the [Poll Mode] Driver of Virtual PCI VMXNET3 devices. 338 */ 339 static int 340 rte_vmxnet3_pmd_init(const char *name __rte_unused, const char *param __rte_unused) 341 { 342 PMD_INIT_FUNC_TRACE(); 343 344 rte_eth_driver_register(&rte_vmxnet3_pmd); 345 return 0; 346 } 347 348 static int 349 vmxnet3_dev_configure(struct rte_eth_dev *dev) 350 { 351 const struct rte_memzone *mz; 352 struct vmxnet3_hw *hw = dev->data->dev_private; 353 size_t size; 354 355 PMD_INIT_FUNC_TRACE(); 356 357 if (dev->data->nb_rx_queues > UINT8_MAX || 358 dev->data->nb_tx_queues > UINT8_MAX) 359 return -EINVAL; 360 361 size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + 362 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc); 363 364 if (size > UINT16_MAX) 365 return -EINVAL; 366 367 hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues; 368 hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues; 369 370 /* 371 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead 372 * on current socket 373 */ 374 mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared), 375 "shared", rte_socket_id(), 8); 376 377 if (mz == NULL) { 378 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone"); 379 return -ENOMEM; 380 } 381 memset(mz->addr, 0, mz->len); 382 383 hw->shared = mz->addr; 384 hw->sharedPA = mz->phys_addr; 385 386 /* 387 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc 388 * on current socket 389 */ 390 mz = gpa_zone_reserve(dev, size, "queuedesc", 391 rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN); 392 if (mz == NULL) { 393 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone"); 394 return -ENOMEM; 395 } 396 memset(mz->addr, 0, mz->len); 397 398 hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr; 399 hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues); 400 401 hw->queueDescPA = mz->phys_addr; 402 hw->queue_desc_len = (uint16_t)size; 403 404 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 405 406 /* Allocate memory structure for UPT1_RSSConf and configure */ 407 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf", 408 rte_socket_id(), RTE_CACHE_LINE_SIZE); 409 if (mz == NULL) { 410 PMD_INIT_LOG(ERR, 411 "ERROR: Creating rss_conf structure zone"); 412 return -ENOMEM; 413 } 414 memset(mz->addr, 0, mz->len); 415 416 hw->rss_conf = mz->addr; 417 hw->rss_confPA = mz->phys_addr; 418 } 419 420 return 0; 421 } 422 423 static int 424 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) 425 { 426 struct rte_eth_conf port_conf = dev->data->dev_conf; 427 struct vmxnet3_hw *hw = dev->data->dev_private; 428 Vmxnet3_DriverShared *shared = hw->shared; 429 Vmxnet3_DSDevRead *devRead = &shared->devRead; 430 uint32_t *mac_ptr; 431 uint32_t val, i; 432 int ret, mask; 433 434 shared->magic = VMXNET3_REV1_MAGIC; 435 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 436 437 /* Setting up Guest OS information */ 438 devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ? 439 VMXNET3_GOS_BITS_32 : 440 VMXNET3_GOS_BITS_64; 441 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 442 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 443 devRead->misc.driverInfo.uptVerSpt = 1; 444 445 devRead->misc.mtu = rte_le_to_cpu_32(dev->data->mtu); 446 devRead->misc.queueDescPA = hw->queueDescPA; 447 devRead->misc.queueDescLen = hw->queue_desc_len; 448 devRead->misc.numTxQueues = hw->num_tx_queues; 449 devRead->misc.numRxQueues = hw->num_rx_queues; 450 451 /* 452 * Set number of interrupts to 1 453 * PMD disables all the interrupts but this is MUST to activate device 454 * It needs at least one interrupt for link events to handle 455 * So we'll disable it later after device activation if needed 456 */ 457 devRead->intrConf.numIntrs = 1; 458 devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; 459 460 for (i = 0; i < hw->num_tx_queues; i++) { 461 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i]; 462 vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i]; 463 464 tqd->ctrl.txNumDeferred = 0; 465 tqd->ctrl.txThreshold = 1; 466 tqd->conf.txRingBasePA = txq->cmd_ring.basePA; 467 tqd->conf.compRingBasePA = txq->comp_ring.basePA; 468 tqd->conf.dataRingBasePA = txq->data_ring.basePA; 469 470 tqd->conf.txRingSize = txq->cmd_ring.size; 471 tqd->conf.compRingSize = txq->comp_ring.size; 472 tqd->conf.dataRingSize = txq->data_ring.size; 473 tqd->conf.intrIdx = txq->comp_ring.intr_idx; 474 tqd->status.stopped = TRUE; 475 tqd->status.error = 0; 476 memset(&tqd->stats, 0, sizeof(tqd->stats)); 477 } 478 479 for (i = 0; i < hw->num_rx_queues; i++) { 480 Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i]; 481 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; 482 483 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA; 484 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA; 485 rqd->conf.compRingBasePA = rxq->comp_ring.basePA; 486 487 rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size; 488 rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size; 489 rqd->conf.compRingSize = rxq->comp_ring.size; 490 rqd->conf.intrIdx = rxq->comp_ring.intr_idx; 491 rqd->status.stopped = TRUE; 492 rqd->status.error = 0; 493 memset(&rqd->stats, 0, sizeof(rqd->stats)); 494 } 495 496 /* RxMode set to 0 of VMXNET3_RXM_xxx */ 497 devRead->rxFilterConf.rxMode = 0; 498 499 /* Setting up feature flags */ 500 if (dev->data->dev_conf.rxmode.hw_ip_checksum) 501 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM; 502 503 if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 504 ret = vmxnet3_rss_configure(dev); 505 if (ret != VMXNET3_SUCCESS) 506 return ret; 507 508 devRead->misc.uptFeatures |= VMXNET3_F_RSS; 509 devRead->rssConfDesc.confVer = 1; 510 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf); 511 devRead->rssConfDesc.confPA = hw->rss_confPA; 512 } 513 514 mask = 0; 515 if (dev->data->dev_conf.rxmode.hw_vlan_strip) 516 mask |= ETH_VLAN_STRIP_MASK; 517 518 if (dev->data->dev_conf.rxmode.hw_vlan_filter) 519 mask |= ETH_VLAN_FILTER_MASK; 520 521 vmxnet3_dev_vlan_offload_set_clear(dev, mask, 1); 522 523 PMD_INIT_LOG(DEBUG, 524 "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", 525 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2], 526 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]); 527 528 /* Write MAC Address back to device */ 529 mac_ptr = (uint32_t *)hw->perm_addr; 530 val = *mac_ptr; 531 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val); 532 533 val = (hw->perm_addr[5] << 8) | hw->perm_addr[4]; 534 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val); 535 536 return VMXNET3_SUCCESS; 537 } 538 539 /* 540 * Configure device link speed and setup link. 541 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail 542 * It returns 0 on success. 543 */ 544 static int 545 vmxnet3_dev_start(struct rte_eth_dev *dev) 546 { 547 int status, ret; 548 struct vmxnet3_hw *hw = dev->data->dev_private; 549 550 PMD_INIT_FUNC_TRACE(); 551 552 ret = vmxnet3_setup_driver_shared(dev); 553 if (ret != VMXNET3_SUCCESS) 554 return ret; 555 556 /* Exchange shared data with device */ 557 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 558 VMXNET3_GET_ADDR_LO(hw->sharedPA)); 559 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 560 VMXNET3_GET_ADDR_HI(hw->sharedPA)); 561 562 /* Activate device by register write */ 563 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); 564 status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 565 566 if (status != 0) { 567 PMD_INIT_LOG(ERR, "Device activation in %s(): UNSUCCESSFUL", __func__); 568 return -1; 569 } 570 571 /* Disable interrupts */ 572 vmxnet3_disable_intr(hw); 573 574 /* 575 * Load RX queues with blank mbufs and update next2fill index for device 576 * Update RxMode of the device 577 */ 578 ret = vmxnet3_dev_rxtx_init(dev); 579 if (ret != VMXNET3_SUCCESS) { 580 PMD_INIT_LOG(ERR, "Device receive init in %s: UNSUCCESSFUL", __func__); 581 return ret; 582 } 583 584 /* Setting proper Rx Mode and issue Rx Mode Update command */ 585 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); 586 587 /* 588 * Don't need to handle events for now 589 */ 590 #if PROCESS_SYS_EVENTS == 1 591 events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR); 592 PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events); 593 vmxnet3_process_events(hw); 594 #endif 595 return status; 596 } 597 598 /* 599 * Stop device: disable rx and tx functions to allow for reconfiguring. 600 */ 601 static void 602 vmxnet3_dev_stop(struct rte_eth_dev *dev) 603 { 604 struct rte_eth_link link; 605 struct vmxnet3_hw *hw = dev->data->dev_private; 606 607 PMD_INIT_FUNC_TRACE(); 608 609 if (hw->adapter_stopped == 1) { 610 PMD_INIT_LOG(DEBUG, "Device already closed."); 611 return; 612 } 613 614 /* disable interrupts */ 615 vmxnet3_disable_intr(hw); 616 617 /* quiesce the device first */ 618 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 619 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0); 620 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0); 621 622 /* reset the device */ 623 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 624 PMD_INIT_LOG(DEBUG, "Device reset."); 625 hw->adapter_stopped = 0; 626 627 vmxnet3_dev_clear_queues(dev); 628 629 /* Clear recorded link status */ 630 memset(&link, 0, sizeof(link)); 631 vmxnet3_dev_atomic_write_link_status(dev, &link); 632 } 633 634 /* 635 * Reset and stop device. 636 */ 637 static void 638 vmxnet3_dev_close(struct rte_eth_dev *dev) 639 { 640 struct vmxnet3_hw *hw = dev->data->dev_private; 641 642 PMD_INIT_FUNC_TRACE(); 643 644 vmxnet3_dev_stop(dev); 645 hw->adapter_stopped = 1; 646 } 647 648 static void 649 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 650 { 651 unsigned int i; 652 struct vmxnet3_hw *hw = dev->data->dev_private; 653 654 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 655 656 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); 657 for (i = 0; i < hw->num_tx_queues; i++) { 658 struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats; 659 660 stats->q_opackets[i] = txStats->ucastPktsTxOK + 661 txStats->mcastPktsTxOK + 662 txStats->bcastPktsTxOK; 663 stats->q_obytes[i] = txStats->ucastBytesTxOK + 664 txStats->mcastBytesTxOK + 665 txStats->bcastBytesTxOK; 666 667 stats->opackets += stats->q_opackets[i]; 668 stats->obytes += stats->q_obytes[i]; 669 stats->oerrors += txStats->pktsTxError + 670 txStats->pktsTxDiscard; 671 } 672 673 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES); 674 for (i = 0; i < hw->num_rx_queues; i++) { 675 struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats; 676 677 stats->q_ipackets[i] = rxStats->ucastPktsRxOK + 678 rxStats->mcastPktsRxOK + 679 rxStats->bcastPktsRxOK; 680 681 stats->q_ibytes[i] = rxStats->ucastBytesRxOK + 682 rxStats->mcastBytesRxOK + 683 rxStats->bcastBytesRxOK; 684 685 stats->ipackets += stats->q_ipackets[i]; 686 stats->ibytes += stats->q_ibytes[i]; 687 688 stats->q_errors[i] = rxStats->pktsRxError; 689 stats->ierrors += rxStats->pktsRxError; 690 stats->imcasts += rxStats->mcastPktsRxOK; 691 stats->rx_nombuf += rxStats->pktsRxOutOfBuf; 692 } 693 } 694 695 static void 696 vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 697 { 698 dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; 699 dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; 700 dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; 701 dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */ 702 dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS; 703 704 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 705 ETH_TXQ_FLAGS_NOOFFLOADS; 706 dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL; 707 708 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 709 .nb_max = VMXNET3_RX_RING_MAX_SIZE, 710 .nb_min = VMXNET3_DEF_RX_RING_SIZE, 711 .nb_align = 1, 712 }; 713 714 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 715 .nb_max = VMXNET3_TX_RING_MAX_SIZE, 716 .nb_min = VMXNET3_DEF_TX_RING_SIZE, 717 .nb_align = 1, 718 }; 719 } 720 721 /* return 0 means link status changed, -1 means not changed */ 722 static int 723 vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete) 724 { 725 struct vmxnet3_hw *hw = dev->data->dev_private; 726 struct rte_eth_link old, link; 727 uint32_t ret; 728 729 if (dev->data->dev_started == 0) 730 return -1; /* Link status doesn't change for stopped dev */ 731 732 memset(&link, 0, sizeof(link)); 733 vmxnet3_dev_atomic_read_link_status(dev, &old); 734 735 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 736 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 737 738 if (ret & 0x1) { 739 link.link_status = 1; 740 link.link_duplex = ETH_LINK_FULL_DUPLEX; 741 link.link_speed = ETH_LINK_SPEED_10000; 742 } 743 744 vmxnet3_dev_atomic_write_link_status(dev, &link); 745 746 return (old.link_status == link.link_status) ? -1 : 0; 747 } 748 749 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */ 750 static void 751 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) { 752 753 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; 754 755 if (set) 756 rxConf->rxMode = rxConf->rxMode | feature; 757 else 758 rxConf->rxMode = rxConf->rxMode & (~feature); 759 760 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE); 761 } 762 763 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ 764 static void 765 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev) 766 { 767 struct vmxnet3_hw *hw = dev->data->dev_private; 768 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; 769 770 memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE); 771 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1); 772 773 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 774 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 775 } 776 777 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ 778 static void 779 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev) 780 { 781 struct vmxnet3_hw *hw = dev->data->dev_private; 782 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; 783 784 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); 785 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0); 786 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 787 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 788 } 789 790 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ 791 static void 792 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev) 793 { 794 struct vmxnet3_hw *hw = dev->data->dev_private; 795 796 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1); 797 } 798 799 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ 800 static void 801 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev) 802 { 803 struct vmxnet3_hw *hw = dev->data->dev_private; 804 805 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0); 806 } 807 808 /* Enable/disable filter on vlan */ 809 static int 810 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on) 811 { 812 struct vmxnet3_hw *hw = dev->data->dev_private; 813 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; 814 uint32_t *vf_table = rxConf->vfTable; 815 816 /* save state for restore */ 817 if (on) 818 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid); 819 else 820 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid); 821 822 /* don't change active filter if in promiscious mode */ 823 if (rxConf->rxMode & VMXNET3_RXM_PROMISC) 824 return 0; 825 826 /* set in hardware */ 827 if (on) 828 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid); 829 else 830 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid); 831 832 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 833 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 834 return 0; 835 } 836 837 static void 838 vmxnet3_dev_vlan_offload_set_clear(struct rte_eth_dev *dev, 839 int mask, int clear) 840 { 841 struct vmxnet3_hw *hw = dev->data->dev_private; 842 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead; 843 uint32_t *vf_table = devRead->rxFilterConf.vfTable; 844 845 if (mask & ETH_VLAN_STRIP_MASK) 846 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 847 else 848 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 849 850 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 851 VMXNET3_CMD_UPDATE_FEATURE); 852 853 if (mask & ETH_VLAN_FILTER_MASK) { 854 if (clear) { 855 memset(hw->shadow_vfta, 0, 856 VMXNET3_VFT_TABLE_SIZE); 857 /* allow untagged pkts */ 858 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0); 859 } 860 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); 861 } else { 862 /* allow any pkts -- no filtering */ 863 if (clear) 864 memset(hw->shadow_vfta, 0xff, VMXNET3_VFT_TABLE_SIZE); 865 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); 866 } 867 868 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 869 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 870 } 871 872 static void 873 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) 874 { 875 vmxnet3_dev_vlan_offload_set_clear(dev, mask, 0); 876 } 877 878 #if PROCESS_SYS_EVENTS == 1 879 static void 880 vmxnet3_process_events(struct vmxnet3_hw *hw) 881 { 882 uint32_t events = hw->shared->ecr; 883 884 if (!events) { 885 PMD_INIT_LOG(ERR, "No events to process in %s()", __func__); 886 return; 887 } 888 889 /* 890 * ECR bits when written with 1b are cleared. Hence write 891 * events back to ECR so that the bits which were set will be reset. 892 */ 893 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events); 894 895 /* Check if link state has changed */ 896 if (events & VMXNET3_ECR_LINK) 897 PMD_INIT_LOG(ERR, 898 "Process events in %s(): VMXNET3_ECR_LINK event", __func__); 899 900 /* Check if there is an error on xmit/recv queues */ 901 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 902 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS); 903 904 if (hw->tqd_start->status.stopped) 905 PMD_INIT_LOG(ERR, "tq error 0x%x", 906 hw->tqd_start->status.error); 907 908 if (hw->rqd_start->status.stopped) 909 PMD_INIT_LOG(ERR, "rq error 0x%x", 910 hw->rqd_start->status.error); 911 912 /* Reset the device */ 913 /* Have to reset the device */ 914 } 915 916 if (events & VMXNET3_ECR_DIC) 917 PMD_INIT_LOG(ERR, "Device implementation change event."); 918 919 if (events & VMXNET3_ECR_DEBUG) 920 PMD_INIT_LOG(ERR, "Debug event generated by device."); 921 922 } 923 #endif 924 925 static struct rte_driver rte_vmxnet3_driver = { 926 .type = PMD_PDEV, 927 .init = rte_vmxnet3_pmd_init, 928 }; 929 930 PMD_REGISTER_DRIVER(rte_vmxnet3_driver); 931