1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2015 Intel Corporation 3 */ 4 5 #include <sys/queue.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <stdint.h> 9 #include <string.h> 10 #include <unistd.h> 11 #include <stdarg.h> 12 #include <fcntl.h> 13 #include <inttypes.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_cycles.h> 17 18 #include <rte_interrupts.h> 19 #include <rte_log.h> 20 #include <rte_debug.h> 21 #include <rte_pci.h> 22 #include <rte_bus_pci.h> 23 #include <rte_branch_prediction.h> 24 #include <rte_memory.h> 25 #include <rte_memzone.h> 26 #include <rte_eal.h> 27 #include <rte_alarm.h> 28 #include <rte_ether.h> 29 #include <rte_ethdev_driver.h> 30 #include <rte_ethdev_pci.h> 31 #include <rte_string_fns.h> 32 #include <rte_malloc.h> 33 #include <rte_dev.h> 34 35 #include "base/vmxnet3_defs.h" 36 37 #include "vmxnet3_ring.h" 38 #include "vmxnet3_logs.h" 39 #include "vmxnet3_ethdev.h" 40 41 #define PROCESS_SYS_EVENTS 0 42 43 #define VMXNET3_TX_MAX_SEG UINT8_MAX 44 45 #define VMXNET3_TX_OFFLOAD_CAP \ 46 (DEV_TX_OFFLOAD_VLAN_INSERT | \ 47 DEV_TX_OFFLOAD_IPV4_CKSUM | \ 48 DEV_TX_OFFLOAD_TCP_CKSUM | \ 49 DEV_TX_OFFLOAD_UDP_CKSUM | \ 50 DEV_TX_OFFLOAD_TCP_TSO | \ 51 DEV_TX_OFFLOAD_MULTI_SEGS) 52 53 #define VMXNET3_RX_OFFLOAD_CAP \ 54 (DEV_RX_OFFLOAD_VLAN_STRIP | \ 55 DEV_RX_OFFLOAD_VLAN_FILTER | \ 56 DEV_RX_OFFLOAD_SCATTER | \ 57 DEV_RX_OFFLOAD_IPV4_CKSUM | \ 58 DEV_RX_OFFLOAD_UDP_CKSUM | \ 59 DEV_RX_OFFLOAD_TCP_CKSUM | \ 60 DEV_RX_OFFLOAD_TCP_LRO | \ 61 DEV_RX_OFFLOAD_JUMBO_FRAME) 62 63 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev); 64 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev); 65 static int vmxnet3_dev_configure(struct rte_eth_dev *dev); 66 static int vmxnet3_dev_start(struct rte_eth_dev *dev); 67 static void vmxnet3_dev_stop(struct rte_eth_dev *dev); 68 static void vmxnet3_dev_close(struct rte_eth_dev *dev); 69 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set); 70 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev); 71 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev); 72 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev); 73 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev); 74 static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev, 75 int wait_to_complete); 76 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev, 77 int wait_to_complete); 78 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw); 79 static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev, 80 struct rte_eth_stats *stats); 81 static void vmxnet3_dev_stats_reset(struct rte_eth_dev *dev); 82 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev, 83 struct rte_eth_xstat_name *xstats, 84 unsigned int n); 85 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, 86 struct rte_eth_xstat *xstats, unsigned int n); 87 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev, 88 struct rte_eth_dev_info *dev_info); 89 static const uint32_t * 90 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev); 91 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, 92 uint16_t vid, int on); 93 static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask); 94 static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev, 95 struct rte_ether_addr *mac_addr); 96 static void vmxnet3_interrupt_handler(void *param); 97 98 int vmxnet3_logtype_init; 99 int vmxnet3_logtype_driver; 100 101 /* 102 * The set of PCI devices this driver supports 103 */ 104 #define VMWARE_PCI_VENDOR_ID 0x15AD 105 #define VMWARE_DEV_ID_VMXNET3 0x07B0 106 static const struct rte_pci_id pci_id_vmxnet3_map[] = { 107 { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) }, 108 { .vendor_id = 0, /* sentinel */ }, 109 }; 110 111 static const struct eth_dev_ops vmxnet3_eth_dev_ops = { 112 .dev_configure = vmxnet3_dev_configure, 113 .dev_start = vmxnet3_dev_start, 114 .dev_stop = vmxnet3_dev_stop, 115 .dev_close = vmxnet3_dev_close, 116 .promiscuous_enable = vmxnet3_dev_promiscuous_enable, 117 .promiscuous_disable = vmxnet3_dev_promiscuous_disable, 118 .allmulticast_enable = vmxnet3_dev_allmulticast_enable, 119 .allmulticast_disable = vmxnet3_dev_allmulticast_disable, 120 .link_update = vmxnet3_dev_link_update, 121 .stats_get = vmxnet3_dev_stats_get, 122 .xstats_get_names = vmxnet3_dev_xstats_get_names, 123 .xstats_get = vmxnet3_dev_xstats_get, 124 .stats_reset = vmxnet3_dev_stats_reset, 125 .mac_addr_set = vmxnet3_mac_addr_set, 126 .dev_infos_get = vmxnet3_dev_info_get, 127 .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get, 128 .vlan_filter_set = vmxnet3_dev_vlan_filter_set, 129 .vlan_offload_set = vmxnet3_dev_vlan_offload_set, 130 .rx_queue_setup = vmxnet3_dev_rx_queue_setup, 131 .rx_queue_release = vmxnet3_dev_rx_queue_release, 132 .tx_queue_setup = vmxnet3_dev_tx_queue_setup, 133 .tx_queue_release = vmxnet3_dev_tx_queue_release, 134 }; 135 136 struct vmxnet3_xstats_name_off { 137 char name[RTE_ETH_XSTATS_NAME_SIZE]; 138 unsigned int offset; 139 }; 140 141 /* tx_qX_ is prepended to the name string here */ 142 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = { 143 {"drop_total", offsetof(struct vmxnet3_txq_stats, drop_total)}, 144 {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)}, 145 {"drop_tso", offsetof(struct vmxnet3_txq_stats, drop_tso)}, 146 {"tx_ring_full", offsetof(struct vmxnet3_txq_stats, tx_ring_full)}, 147 }; 148 149 /* rx_qX_ is prepended to the name string here */ 150 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = { 151 {"drop_total", offsetof(struct vmxnet3_rxq_stats, drop_total)}, 152 {"drop_err", offsetof(struct vmxnet3_rxq_stats, drop_err)}, 153 {"drop_fcs", offsetof(struct vmxnet3_rxq_stats, drop_fcs)}, 154 {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)}, 155 }; 156 157 static const struct rte_memzone * 158 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size, 159 const char *post_string, int socket_id, 160 uint16_t align, bool reuse) 161 { 162 char z_name[RTE_MEMZONE_NAMESIZE]; 163 const struct rte_memzone *mz; 164 165 snprintf(z_name, sizeof(z_name), "eth_p%d_%s", 166 dev->data->port_id, post_string); 167 168 mz = rte_memzone_lookup(z_name); 169 if (!reuse) { 170 if (mz) 171 rte_memzone_free(mz); 172 return rte_memzone_reserve_aligned(z_name, size, socket_id, 173 RTE_MEMZONE_IOVA_CONTIG, align); 174 } 175 176 if (mz) 177 return mz; 178 179 return rte_memzone_reserve_aligned(z_name, size, socket_id, 180 RTE_MEMZONE_IOVA_CONTIG, align); 181 } 182 183 /* 184 * This function is based on vmxnet3_disable_intr() 185 */ 186 static void 187 vmxnet3_disable_intr(struct vmxnet3_hw *hw) 188 { 189 int i; 190 191 PMD_INIT_FUNC_TRACE(); 192 193 hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; 194 for (i = 0; i < hw->num_intrs; i++) 195 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1); 196 } 197 198 static void 199 vmxnet3_enable_intr(struct vmxnet3_hw *hw) 200 { 201 int i; 202 203 PMD_INIT_FUNC_TRACE(); 204 205 hw->shared->devRead.intrConf.intrCtrl &= ~VMXNET3_IC_DISABLE_ALL; 206 for (i = 0; i < hw->num_intrs; i++) 207 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 0); 208 } 209 210 /* 211 * Gets tx data ring descriptor size. 212 */ 213 static uint16_t 214 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw) 215 { 216 uint16 txdata_desc_size; 217 218 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 219 VMXNET3_CMD_GET_TXDATA_DESC_SIZE); 220 txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 221 222 return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE || 223 txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE || 224 txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ? 225 sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size; 226 } 227 228 /* 229 * It returns 0 on success. 230 */ 231 static int 232 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) 233 { 234 struct rte_pci_device *pci_dev; 235 struct vmxnet3_hw *hw = eth_dev->data->dev_private; 236 uint32_t mac_hi, mac_lo, ver; 237 struct rte_eth_link link; 238 239 PMD_INIT_FUNC_TRACE(); 240 241 eth_dev->dev_ops = &vmxnet3_eth_dev_ops; 242 eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts; 243 eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts; 244 eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts; 245 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); 246 247 /* 248 * for secondary processes, we don't initialize any further as primary 249 * has already done this work. 250 */ 251 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 252 return 0; 253 254 rte_eth_copy_pci_info(eth_dev, pci_dev); 255 256 /* Vendor and Device ID need to be set before init of shared code */ 257 hw->device_id = pci_dev->id.device_id; 258 hw->vendor_id = pci_dev->id.vendor_id; 259 hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr; 260 hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr; 261 262 hw->num_rx_queues = 1; 263 hw->num_tx_queues = 1; 264 hw->bufs_per_pkt = 1; 265 266 /* Check h/w version compatibility with driver. */ 267 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS); 268 PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver); 269 270 if (ver & (1 << VMXNET3_REV_4)) { 271 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 272 1 << VMXNET3_REV_4); 273 hw->version = VMXNET3_REV_4 + 1; 274 } else if (ver & (1 << VMXNET3_REV_3)) { 275 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 276 1 << VMXNET3_REV_3); 277 hw->version = VMXNET3_REV_3 + 1; 278 } else if (ver & (1 << VMXNET3_REV_2)) { 279 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 280 1 << VMXNET3_REV_2); 281 hw->version = VMXNET3_REV_2 + 1; 282 } else if (ver & (1 << VMXNET3_REV_1)) { 283 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 284 1 << VMXNET3_REV_1); 285 hw->version = VMXNET3_REV_1 + 1; 286 } else { 287 PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver); 288 return -EIO; 289 } 290 291 PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version); 292 293 /* Check UPT version compatibility with driver. */ 294 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS); 295 PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver); 296 if (ver & 0x1) 297 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1); 298 else { 299 PMD_INIT_LOG(ERR, "Incompatible UPT version."); 300 return -EIO; 301 } 302 303 /* Getting MAC Address */ 304 mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL); 305 mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH); 306 memcpy(hw->perm_addr, &mac_lo, 4); 307 memcpy(hw->perm_addr + 4, &mac_hi, 2); 308 309 /* Allocate memory for storing MAC addresses */ 310 eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", RTE_ETHER_ADDR_LEN * 311 VMXNET3_MAX_MAC_ADDRS, 0); 312 if (eth_dev->data->mac_addrs == NULL) { 313 PMD_INIT_LOG(ERR, 314 "Failed to allocate %d bytes needed to store MAC addresses", 315 RTE_ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS); 316 return -ENOMEM; 317 } 318 /* Copy the permanent MAC address */ 319 rte_ether_addr_copy((struct rte_ether_addr *)hw->perm_addr, 320 ð_dev->data->mac_addrs[0]); 321 322 PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", 323 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2], 324 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]); 325 326 /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */ 327 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE; 328 329 /* Put device in Quiesce Mode */ 330 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 331 332 /* allow untagged pkts */ 333 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0); 334 335 hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ? 336 eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc); 337 338 hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ? 339 VMXNET3_DEF_RXDATA_DESC_SIZE : 0; 340 RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) == 341 hw->rxdata_desc_size); 342 343 /* clear shadow stats */ 344 memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats)); 345 memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats)); 346 347 /* clear snapshot stats */ 348 memset(hw->snapshot_tx_stats, 0, sizeof(hw->snapshot_tx_stats)); 349 memset(hw->snapshot_rx_stats, 0, sizeof(hw->snapshot_rx_stats)); 350 351 /* set the initial link status */ 352 memset(&link, 0, sizeof(link)); 353 link.link_duplex = ETH_LINK_FULL_DUPLEX; 354 link.link_speed = ETH_SPEED_NUM_10G; 355 link.link_autoneg = ETH_LINK_FIXED; 356 rte_eth_linkstatus_set(eth_dev, &link); 357 358 return 0; 359 } 360 361 static int 362 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev) 363 { 364 struct vmxnet3_hw *hw = eth_dev->data->dev_private; 365 366 PMD_INIT_FUNC_TRACE(); 367 368 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 369 return 0; 370 371 if (hw->adapter_stopped == 0) { 372 PMD_INIT_LOG(DEBUG, "Device has not been closed."); 373 return -EBUSY; 374 } 375 376 eth_dev->dev_ops = NULL; 377 eth_dev->rx_pkt_burst = NULL; 378 eth_dev->tx_pkt_burst = NULL; 379 eth_dev->tx_pkt_prepare = NULL; 380 381 return 0; 382 } 383 384 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 385 struct rte_pci_device *pci_dev) 386 { 387 return rte_eth_dev_pci_generic_probe(pci_dev, 388 sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init); 389 } 390 391 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev) 392 { 393 return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit); 394 } 395 396 static struct rte_pci_driver rte_vmxnet3_pmd = { 397 .id_table = pci_id_vmxnet3_map, 398 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, 399 .probe = eth_vmxnet3_pci_probe, 400 .remove = eth_vmxnet3_pci_remove, 401 }; 402 403 static int 404 vmxnet3_dev_configure(struct rte_eth_dev *dev) 405 { 406 const struct rte_memzone *mz; 407 struct vmxnet3_hw *hw = dev->data->dev_private; 408 size_t size; 409 410 PMD_INIT_FUNC_TRACE(); 411 412 if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES || 413 dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) { 414 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported"); 415 return -EINVAL; 416 } 417 418 if (!rte_is_power_of_2(dev->data->nb_rx_queues)) { 419 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2"); 420 return -EINVAL; 421 } 422 423 size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + 424 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc); 425 426 if (size > UINT16_MAX) 427 return -EINVAL; 428 429 hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues; 430 hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues; 431 432 /* 433 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead 434 * on current socket 435 */ 436 mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared), 437 "shared", rte_socket_id(), 8, 1); 438 439 if (mz == NULL) { 440 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone"); 441 return -ENOMEM; 442 } 443 memset(mz->addr, 0, mz->len); 444 445 hw->shared = mz->addr; 446 hw->sharedPA = mz->iova; 447 448 /* 449 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc 450 * on current socket. 451 * 452 * We cannot reuse this memzone from previous allocation as its size 453 * depends on the number of tx and rx queues, which could be different 454 * from one config to another. 455 */ 456 mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(), 457 VMXNET3_QUEUE_DESC_ALIGN, 0); 458 if (mz == NULL) { 459 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone"); 460 return -ENOMEM; 461 } 462 memset(mz->addr, 0, mz->len); 463 464 hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr; 465 hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues); 466 467 hw->queueDescPA = mz->iova; 468 hw->queue_desc_len = (uint16_t)size; 469 470 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 471 /* Allocate memory structure for UPT1_RSSConf and configure */ 472 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), 473 "rss_conf", rte_socket_id(), 474 RTE_CACHE_LINE_SIZE, 1); 475 if (mz == NULL) { 476 PMD_INIT_LOG(ERR, 477 "ERROR: Creating rss_conf structure zone"); 478 return -ENOMEM; 479 } 480 memset(mz->addr, 0, mz->len); 481 482 hw->rss_conf = mz->addr; 483 hw->rss_confPA = mz->iova; 484 } 485 486 return 0; 487 } 488 489 static void 490 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr) 491 { 492 uint32_t val; 493 494 PMD_INIT_LOG(DEBUG, 495 "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x", 496 addr[0], addr[1], addr[2], 497 addr[3], addr[4], addr[5]); 498 499 memcpy(&val, addr, 4); 500 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val); 501 502 memcpy(&val, addr + 4, 2); 503 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val); 504 } 505 506 static int 507 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev) 508 { 509 struct vmxnet3_hw *hw = dev->data->dev_private; 510 Vmxnet3_DriverShared *shared = hw->shared; 511 Vmxnet3_CmdInfo *cmdInfo; 512 struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES]; 513 uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES]; 514 uint32_t num, i, j, size; 515 516 if (hw->memRegsPA == 0) { 517 const struct rte_memzone *mz; 518 519 size = sizeof(Vmxnet3_MemRegs) + 520 (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) * 521 sizeof(Vmxnet3_MemoryRegion); 522 523 mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8, 524 1); 525 if (mz == NULL) { 526 PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone"); 527 return -ENOMEM; 528 } 529 memset(mz->addr, 0, mz->len); 530 hw->memRegs = mz->addr; 531 hw->memRegsPA = mz->iova; 532 } 533 534 num = hw->num_rx_queues; 535 536 for (i = 0; i < num; i++) { 537 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; 538 539 mp[i] = rxq->mp; 540 index[i] = 1 << i; 541 } 542 543 /* 544 * The same mempool could be used by multiple queues. In such a case, 545 * remove duplicate mempool entries. Only one entry is kept with 546 * bitmask indicating queues that are using this mempool. 547 */ 548 for (i = 1; i < num; i++) { 549 for (j = 0; j < i; j++) { 550 if (mp[i] == mp[j]) { 551 mp[i] = NULL; 552 index[j] |= 1 << i; 553 break; 554 } 555 } 556 } 557 558 j = 0; 559 for (i = 0; i < num; i++) { 560 if (mp[i] == NULL) 561 continue; 562 563 Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j]; 564 565 mr->startPA = 566 (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova; 567 mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ? 568 STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX; 569 mr->txQueueBits = index[i]; 570 mr->rxQueueBits = index[i]; 571 572 PMD_INIT_LOG(INFO, 573 "index: %u startPA: %" PRIu64 " length: %u, " 574 "rxBits: %x", 575 j, mr->startPA, mr->length, mr->rxQueueBits); 576 j++; 577 } 578 hw->memRegs->numRegs = j; 579 PMD_INIT_LOG(INFO, "numRegs: %u", j); 580 581 size = sizeof(Vmxnet3_MemRegs) + 582 (j - 1) * sizeof(Vmxnet3_MemoryRegion); 583 584 cmdInfo = &shared->cu.cmdInfo; 585 cmdInfo->varConf.confVer = 1; 586 cmdInfo->varConf.confLen = size; 587 cmdInfo->varConf.confPA = hw->memRegsPA; 588 589 return 0; 590 } 591 592 static int 593 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) 594 { 595 struct rte_eth_conf port_conf = dev->data->dev_conf; 596 struct vmxnet3_hw *hw = dev->data->dev_private; 597 uint32_t mtu = dev->data->mtu; 598 Vmxnet3_DriverShared *shared = hw->shared; 599 Vmxnet3_DSDevRead *devRead = &shared->devRead; 600 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 601 uint32_t i; 602 int ret; 603 604 hw->mtu = mtu; 605 606 shared->magic = VMXNET3_REV1_MAGIC; 607 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 608 609 /* Setting up Guest OS information */ 610 devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ? 611 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64; 612 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 613 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 614 devRead->misc.driverInfo.uptVerSpt = 1; 615 616 devRead->misc.mtu = rte_le_to_cpu_32(mtu); 617 devRead->misc.queueDescPA = hw->queueDescPA; 618 devRead->misc.queueDescLen = hw->queue_desc_len; 619 devRead->misc.numTxQueues = hw->num_tx_queues; 620 devRead->misc.numRxQueues = hw->num_rx_queues; 621 622 /* 623 * Set number of interrupts to 1 624 * PMD by default disables all the interrupts but this is MUST 625 * to activate device. It needs at least one interrupt for 626 * link events to handle 627 */ 628 hw->num_intrs = devRead->intrConf.numIntrs = 1; 629 devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL; 630 631 for (i = 0; i < hw->num_tx_queues; i++) { 632 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i]; 633 vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i]; 634 635 txq->shared = &hw->tqd_start[i]; 636 637 tqd->ctrl.txNumDeferred = 0; 638 tqd->ctrl.txThreshold = 1; 639 tqd->conf.txRingBasePA = txq->cmd_ring.basePA; 640 tqd->conf.compRingBasePA = txq->comp_ring.basePA; 641 tqd->conf.dataRingBasePA = txq->data_ring.basePA; 642 643 tqd->conf.txRingSize = txq->cmd_ring.size; 644 tqd->conf.compRingSize = txq->comp_ring.size; 645 tqd->conf.dataRingSize = txq->data_ring.size; 646 tqd->conf.txDataRingDescSize = txq->txdata_desc_size; 647 tqd->conf.intrIdx = txq->comp_ring.intr_idx; 648 tqd->status.stopped = TRUE; 649 tqd->status.error = 0; 650 memset(&tqd->stats, 0, sizeof(tqd->stats)); 651 } 652 653 for (i = 0; i < hw->num_rx_queues; i++) { 654 Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i]; 655 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; 656 657 rxq->shared = &hw->rqd_start[i]; 658 659 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA; 660 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA; 661 rqd->conf.compRingBasePA = rxq->comp_ring.basePA; 662 663 rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size; 664 rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size; 665 rqd->conf.compRingSize = rxq->comp_ring.size; 666 rqd->conf.intrIdx = rxq->comp_ring.intr_idx; 667 if (VMXNET3_VERSION_GE_3(hw)) { 668 rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA; 669 rqd->conf.rxDataRingDescSize = rxq->data_desc_size; 670 } 671 rqd->status.stopped = TRUE; 672 rqd->status.error = 0; 673 memset(&rqd->stats, 0, sizeof(rqd->stats)); 674 } 675 676 /* RxMode set to 0 of VMXNET3_RXM_xxx */ 677 devRead->rxFilterConf.rxMode = 0; 678 679 /* Setting up feature flags */ 680 if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM) 681 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM; 682 683 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) { 684 devRead->misc.uptFeatures |= VMXNET3_F_LRO; 685 devRead->misc.maxNumRxSG = 0; 686 } 687 688 if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) { 689 ret = vmxnet3_rss_configure(dev); 690 if (ret != VMXNET3_SUCCESS) 691 return ret; 692 693 devRead->misc.uptFeatures |= VMXNET3_F_RSS; 694 devRead->rssConfDesc.confVer = 1; 695 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf); 696 devRead->rssConfDesc.confPA = hw->rss_confPA; 697 } 698 699 ret = vmxnet3_dev_vlan_offload_set(dev, 700 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK); 701 if (ret) 702 return ret; 703 704 vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes); 705 706 return VMXNET3_SUCCESS; 707 } 708 709 /* 710 * Configure device link speed and setup link. 711 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail 712 * It returns 0 on success. 713 */ 714 static int 715 vmxnet3_dev_start(struct rte_eth_dev *dev) 716 { 717 int ret; 718 struct vmxnet3_hw *hw = dev->data->dev_private; 719 720 PMD_INIT_FUNC_TRACE(); 721 722 /* Save stats before it is reset by CMD_ACTIVATE */ 723 vmxnet3_hw_stats_save(hw); 724 725 ret = vmxnet3_setup_driver_shared(dev); 726 if (ret != VMXNET3_SUCCESS) 727 return ret; 728 729 /* check if lsc interrupt feature is enabled */ 730 if (dev->data->dev_conf.intr_conf.lsc) { 731 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 732 733 /* Setup interrupt callback */ 734 rte_intr_callback_register(&pci_dev->intr_handle, 735 vmxnet3_interrupt_handler, dev); 736 737 if (rte_intr_enable(&pci_dev->intr_handle) < 0) { 738 PMD_INIT_LOG(ERR, "interrupt enable failed"); 739 return -EIO; 740 } 741 } 742 743 /* Exchange shared data with device */ 744 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 745 VMXNET3_GET_ADDR_LO(hw->sharedPA)); 746 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 747 VMXNET3_GET_ADDR_HI(hw->sharedPA)); 748 749 /* Activate device by register write */ 750 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV); 751 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 752 753 if (ret != 0) { 754 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL"); 755 return -EINVAL; 756 } 757 758 /* Setup memory region for rx buffers */ 759 ret = vmxnet3_dev_setup_memreg(dev); 760 if (ret == 0) { 761 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 762 VMXNET3_CMD_REGISTER_MEMREGS); 763 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 764 if (ret != 0) 765 PMD_INIT_LOG(DEBUG, 766 "Failed in setup memory region cmd\n"); 767 ret = 0; 768 } else { 769 PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n"); 770 } 771 772 if (VMXNET3_VERSION_GE_4(hw)) { 773 /* Check for additional RSS */ 774 ret = vmxnet3_v4_rss_configure(dev); 775 if (ret != VMXNET3_SUCCESS) { 776 PMD_INIT_LOG(ERR, "Failed to configure v4 RSS"); 777 return ret; 778 } 779 } 780 781 /* Disable interrupts */ 782 vmxnet3_disable_intr(hw); 783 784 /* 785 * Load RX queues with blank mbufs and update next2fill index for device 786 * Update RxMode of the device 787 */ 788 ret = vmxnet3_dev_rxtx_init(dev); 789 if (ret != VMXNET3_SUCCESS) { 790 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL"); 791 return ret; 792 } 793 794 hw->adapter_stopped = FALSE; 795 796 /* Setting proper Rx Mode and issue Rx Mode Update command */ 797 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1); 798 799 if (dev->data->dev_conf.intr_conf.lsc) { 800 vmxnet3_enable_intr(hw); 801 802 /* 803 * Update link state from device since this won't be 804 * done upon starting with lsc in use. This is done 805 * only after enabling interrupts to avoid any race 806 * where the link state could change without an 807 * interrupt being fired. 808 */ 809 __vmxnet3_dev_link_update(dev, 0); 810 } 811 812 return VMXNET3_SUCCESS; 813 } 814 815 /* 816 * Stop device: disable rx and tx functions to allow for reconfiguring. 817 */ 818 static void 819 vmxnet3_dev_stop(struct rte_eth_dev *dev) 820 { 821 struct rte_eth_link link; 822 struct vmxnet3_hw *hw = dev->data->dev_private; 823 824 PMD_INIT_FUNC_TRACE(); 825 826 if (hw->adapter_stopped == 1) { 827 PMD_INIT_LOG(DEBUG, "Device already stopped."); 828 return; 829 } 830 831 /* disable interrupts */ 832 vmxnet3_disable_intr(hw); 833 834 if (dev->data->dev_conf.intr_conf.lsc) { 835 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 836 837 rte_intr_disable(&pci_dev->intr_handle); 838 839 rte_intr_callback_unregister(&pci_dev->intr_handle, 840 vmxnet3_interrupt_handler, dev); 841 } 842 843 /* quiesce the device first */ 844 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); 845 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0); 846 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0); 847 848 /* reset the device */ 849 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 850 PMD_INIT_LOG(DEBUG, "Device reset."); 851 852 vmxnet3_dev_clear_queues(dev); 853 854 /* Clear recorded link status */ 855 memset(&link, 0, sizeof(link)); 856 link.link_duplex = ETH_LINK_FULL_DUPLEX; 857 link.link_speed = ETH_SPEED_NUM_10G; 858 link.link_autoneg = ETH_LINK_FIXED; 859 rte_eth_linkstatus_set(dev, &link); 860 861 hw->adapter_stopped = 1; 862 } 863 864 static void 865 vmxnet3_free_queues(struct rte_eth_dev *dev) 866 { 867 int i; 868 869 PMD_INIT_FUNC_TRACE(); 870 871 for (i = 0; i < dev->data->nb_rx_queues; i++) { 872 void *rxq = dev->data->rx_queues[i]; 873 874 vmxnet3_dev_rx_queue_release(rxq); 875 } 876 dev->data->nb_rx_queues = 0; 877 878 for (i = 0; i < dev->data->nb_tx_queues; i++) { 879 void *txq = dev->data->tx_queues[i]; 880 881 vmxnet3_dev_tx_queue_release(txq); 882 } 883 dev->data->nb_tx_queues = 0; 884 } 885 886 /* 887 * Reset and stop device. 888 */ 889 static void 890 vmxnet3_dev_close(struct rte_eth_dev *dev) 891 { 892 PMD_INIT_FUNC_TRACE(); 893 894 vmxnet3_dev_stop(dev); 895 vmxnet3_free_queues(dev); 896 } 897 898 static void 899 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q, 900 struct UPT1_TxStats *res) 901 { 902 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r) \ 903 ((r)->f = (h)->tqd_start[(i)].stats.f + \ 904 (h)->saved_tx_stats[(i)].f) 905 906 VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res); 907 VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res); 908 VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res); 909 VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res); 910 VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res); 911 VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res); 912 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res); 913 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res); 914 915 #undef VMXNET3_UPDATE_TX_STAT 916 } 917 918 static void 919 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q, 920 struct UPT1_RxStats *res) 921 { 922 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r) \ 923 ((r)->f = (h)->rqd_start[(i)].stats.f + \ 924 (h)->saved_rx_stats[(i)].f) 925 926 VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res); 927 VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res); 928 VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res); 929 VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res); 930 VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res); 931 VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res); 932 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res); 933 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res); 934 935 #undef VMXNET3_UPDATE_RX_STAT 936 } 937 938 static void 939 vmxnet3_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q, 940 struct UPT1_TxStats *res) 941 { 942 vmxnet3_hw_tx_stats_get(hw, q, res); 943 944 #define VMXNET3_REDUCE_SNAPSHOT_TX_STAT(h, i, f, r) \ 945 ((r)->f -= (h)->snapshot_tx_stats[(i)].f) 946 947 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastPktsTxOK, res); 948 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastPktsTxOK, res); 949 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastPktsTxOK, res); 950 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastBytesTxOK, res); 951 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastBytesTxOK, res); 952 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastBytesTxOK, res); 953 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxError, res); 954 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxDiscard, res); 955 956 #undef VMXNET3_REDUCE_SNAPSHOT_TX_STAT 957 } 958 959 static void 960 vmxnet3_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q, 961 struct UPT1_RxStats *res) 962 { 963 vmxnet3_hw_rx_stats_get(hw, q, res); 964 965 #define VMXNET3_REDUCE_SNAPSHOT_RX_STAT(h, i, f, r) \ 966 ((r)->f -= (h)->snapshot_rx_stats[(i)].f) 967 968 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastPktsRxOK, res); 969 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastPktsRxOK, res); 970 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastPktsRxOK, res); 971 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastBytesRxOK, res); 972 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastBytesRxOK, res); 973 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastBytesRxOK, res); 974 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxError, res); 975 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxOutOfBuf, res); 976 977 #undef VMXNET3_REDUCE_SNAPSHOT_RX_STAT 978 } 979 980 static void 981 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw) 982 { 983 unsigned int i; 984 985 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 986 987 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); 988 989 for (i = 0; i < hw->num_tx_queues; i++) 990 vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]); 991 for (i = 0; i < hw->num_rx_queues; i++) 992 vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]); 993 } 994 995 static int 996 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev, 997 struct rte_eth_xstat_name *xstats_names, 998 unsigned int n) 999 { 1000 unsigned int i, t, count = 0; 1001 unsigned int nstats = 1002 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) + 1003 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings); 1004 1005 if (!xstats_names || n < nstats) 1006 return nstats; 1007 1008 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1009 if (!dev->data->rx_queues[i]) 1010 continue; 1011 1012 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) { 1013 snprintf(xstats_names[count].name, 1014 sizeof(xstats_names[count].name), 1015 "rx_q%u_%s", i, 1016 vmxnet3_rxq_stat_strings[t].name); 1017 count++; 1018 } 1019 } 1020 1021 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1022 if (!dev->data->tx_queues[i]) 1023 continue; 1024 1025 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) { 1026 snprintf(xstats_names[count].name, 1027 sizeof(xstats_names[count].name), 1028 "tx_q%u_%s", i, 1029 vmxnet3_txq_stat_strings[t].name); 1030 count++; 1031 } 1032 } 1033 1034 return count; 1035 } 1036 1037 static int 1038 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, 1039 unsigned int n) 1040 { 1041 unsigned int i, t, count = 0; 1042 unsigned int nstats = 1043 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) + 1044 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings); 1045 1046 if (n < nstats) 1047 return nstats; 1048 1049 for (i = 0; i < dev->data->nb_rx_queues; i++) { 1050 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i]; 1051 1052 if (rxq == NULL) 1053 continue; 1054 1055 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) { 1056 xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) + 1057 vmxnet3_rxq_stat_strings[t].offset); 1058 xstats[count].id = count; 1059 count++; 1060 } 1061 } 1062 1063 for (i = 0; i < dev->data->nb_tx_queues; i++) { 1064 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i]; 1065 1066 if (txq == NULL) 1067 continue; 1068 1069 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) { 1070 xstats[count].value = *(uint64_t *)(((char *)&txq->stats) + 1071 vmxnet3_txq_stat_strings[t].offset); 1072 xstats[count].id = count; 1073 count++; 1074 } 1075 } 1076 1077 return count; 1078 } 1079 1080 static int 1081 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 1082 { 1083 unsigned int i; 1084 struct vmxnet3_hw *hw = dev->data->dev_private; 1085 struct UPT1_TxStats txStats; 1086 struct UPT1_RxStats rxStats; 1087 1088 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 1089 1090 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); 1091 for (i = 0; i < hw->num_tx_queues; i++) { 1092 vmxnet3_tx_stats_get(hw, i, &txStats); 1093 1094 stats->q_opackets[i] = txStats.ucastPktsTxOK + 1095 txStats.mcastPktsTxOK + 1096 txStats.bcastPktsTxOK; 1097 1098 stats->q_obytes[i] = txStats.ucastBytesTxOK + 1099 txStats.mcastBytesTxOK + 1100 txStats.bcastBytesTxOK; 1101 1102 stats->opackets += stats->q_opackets[i]; 1103 stats->obytes += stats->q_obytes[i]; 1104 stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard; 1105 } 1106 1107 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES); 1108 for (i = 0; i < hw->num_rx_queues; i++) { 1109 vmxnet3_rx_stats_get(hw, i, &rxStats); 1110 1111 stats->q_ipackets[i] = rxStats.ucastPktsRxOK + 1112 rxStats.mcastPktsRxOK + 1113 rxStats.bcastPktsRxOK; 1114 1115 stats->q_ibytes[i] = rxStats.ucastBytesRxOK + 1116 rxStats.mcastBytesRxOK + 1117 rxStats.bcastBytesRxOK; 1118 1119 stats->ipackets += stats->q_ipackets[i]; 1120 stats->ibytes += stats->q_ibytes[i]; 1121 1122 stats->q_errors[i] = rxStats.pktsRxError; 1123 stats->ierrors += rxStats.pktsRxError; 1124 stats->imissed += rxStats.pktsRxOutOfBuf; 1125 } 1126 1127 return 0; 1128 } 1129 1130 static void 1131 vmxnet3_dev_stats_reset(struct rte_eth_dev *dev) 1132 { 1133 unsigned int i; 1134 struct vmxnet3_hw *hw = dev->data->dev_private; 1135 struct UPT1_TxStats txStats = {0}; 1136 struct UPT1_RxStats rxStats = {0}; 1137 1138 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 1139 1140 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); 1141 1142 for (i = 0; i < hw->num_tx_queues; i++) { 1143 vmxnet3_hw_tx_stats_get(hw, i, &txStats); 1144 memcpy(&hw->snapshot_tx_stats[i], &txStats, 1145 sizeof(hw->snapshot_tx_stats[0])); 1146 } 1147 for (i = 0; i < hw->num_rx_queues; i++) { 1148 vmxnet3_hw_rx_stats_get(hw, i, &rxStats); 1149 memcpy(&hw->snapshot_rx_stats[i], &rxStats, 1150 sizeof(hw->snapshot_rx_stats[0])); 1151 } 1152 } 1153 1154 static void 1155 vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused, 1156 struct rte_eth_dev_info *dev_info) 1157 { 1158 struct vmxnet3_hw *hw = dev->data->dev_private; 1159 1160 dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; 1161 dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; 1162 dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; 1163 dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */ 1164 dev_info->speed_capa = ETH_LINK_SPEED_10G; 1165 dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS; 1166 1167 dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL; 1168 1169 if (VMXNET3_VERSION_GE_4(hw)) { 1170 dev_info->flow_type_rss_offloads |= VMXNET3_V4_RSS_MASK; 1171 } 1172 1173 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 1174 .nb_max = VMXNET3_RX_RING_MAX_SIZE, 1175 .nb_min = VMXNET3_DEF_RX_RING_SIZE, 1176 .nb_align = 1, 1177 }; 1178 1179 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 1180 .nb_max = VMXNET3_TX_RING_MAX_SIZE, 1181 .nb_min = VMXNET3_DEF_TX_RING_SIZE, 1182 .nb_align = 1, 1183 .nb_seg_max = VMXNET3_TX_MAX_SEG, 1184 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT, 1185 }; 1186 1187 dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP; 1188 dev_info->rx_queue_offload_capa = 0; 1189 dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP; 1190 dev_info->tx_queue_offload_capa = 0; 1191 } 1192 1193 static const uint32_t * 1194 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev) 1195 { 1196 static const uint32_t ptypes[] = { 1197 RTE_PTYPE_L3_IPV4_EXT, 1198 RTE_PTYPE_L3_IPV4, 1199 RTE_PTYPE_UNKNOWN 1200 }; 1201 1202 if (dev->rx_pkt_burst == vmxnet3_recv_pkts) 1203 return ptypes; 1204 return NULL; 1205 } 1206 1207 static int 1208 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) 1209 { 1210 struct vmxnet3_hw *hw = dev->data->dev_private; 1211 1212 rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr)); 1213 vmxnet3_write_mac(hw, mac_addr->addr_bytes); 1214 return 0; 1215 } 1216 1217 /* return 0 means link status changed, -1 means not changed */ 1218 static int 1219 __vmxnet3_dev_link_update(struct rte_eth_dev *dev, 1220 __rte_unused int wait_to_complete) 1221 { 1222 struct vmxnet3_hw *hw = dev->data->dev_private; 1223 struct rte_eth_link link; 1224 uint32_t ret; 1225 1226 memset(&link, 0, sizeof(link)); 1227 1228 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 1229 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); 1230 1231 if (ret & 0x1) 1232 link.link_status = ETH_LINK_UP; 1233 link.link_duplex = ETH_LINK_FULL_DUPLEX; 1234 link.link_speed = ETH_SPEED_NUM_10G; 1235 link.link_autoneg = ETH_LINK_FIXED; 1236 1237 return rte_eth_linkstatus_set(dev, &link); 1238 } 1239 1240 static int 1241 vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) 1242 { 1243 /* Link status doesn't change for stopped dev */ 1244 if (dev->data->dev_started == 0) 1245 return -1; 1246 1247 return __vmxnet3_dev_link_update(dev, wait_to_complete); 1248 } 1249 1250 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */ 1251 static void 1252 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) 1253 { 1254 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; 1255 1256 if (set) 1257 rxConf->rxMode = rxConf->rxMode | feature; 1258 else 1259 rxConf->rxMode = rxConf->rxMode & (~feature); 1260 1261 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE); 1262 } 1263 1264 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ 1265 static void 1266 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev) 1267 { 1268 struct vmxnet3_hw *hw = dev->data->dev_private; 1269 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; 1270 1271 memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE); 1272 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1); 1273 1274 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1275 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1276 } 1277 1278 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */ 1279 static void 1280 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev) 1281 { 1282 struct vmxnet3_hw *hw = dev->data->dev_private; 1283 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable; 1284 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 1285 1286 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1287 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); 1288 else 1289 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); 1290 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0); 1291 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1292 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1293 } 1294 1295 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ 1296 static void 1297 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev) 1298 { 1299 struct vmxnet3_hw *hw = dev->data->dev_private; 1300 1301 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1); 1302 } 1303 1304 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */ 1305 static void 1306 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev) 1307 { 1308 struct vmxnet3_hw *hw = dev->data->dev_private; 1309 1310 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0); 1311 } 1312 1313 /* Enable/disable filter on vlan */ 1314 static int 1315 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on) 1316 { 1317 struct vmxnet3_hw *hw = dev->data->dev_private; 1318 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf; 1319 uint32_t *vf_table = rxConf->vfTable; 1320 1321 /* save state for restore */ 1322 if (on) 1323 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid); 1324 else 1325 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid); 1326 1327 /* don't change active filter if in promiscuous mode */ 1328 if (rxConf->rxMode & VMXNET3_RXM_PROMISC) 1329 return 0; 1330 1331 /* set in hardware */ 1332 if (on) 1333 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid); 1334 else 1335 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid); 1336 1337 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1338 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1339 return 0; 1340 } 1341 1342 static int 1343 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask) 1344 { 1345 struct vmxnet3_hw *hw = dev->data->dev_private; 1346 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead; 1347 uint32_t *vf_table = devRead->rxFilterConf.vfTable; 1348 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; 1349 1350 if (mask & ETH_VLAN_STRIP_MASK) { 1351 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) 1352 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1353 else 1354 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 1355 1356 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1357 VMXNET3_CMD_UPDATE_FEATURE); 1358 } 1359 1360 if (mask & ETH_VLAN_FILTER_MASK) { 1361 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) 1362 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE); 1363 else 1364 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE); 1365 1366 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1367 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1368 } 1369 1370 return 0; 1371 } 1372 1373 static void 1374 vmxnet3_process_events(struct rte_eth_dev *dev) 1375 { 1376 struct vmxnet3_hw *hw = dev->data->dev_private; 1377 uint32_t events = hw->shared->ecr; 1378 1379 if (!events) 1380 return; 1381 1382 /* 1383 * ECR bits when written with 1b are cleared. Hence write 1384 * events back to ECR so that the bits which were set will be reset. 1385 */ 1386 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events); 1387 1388 /* Check if link state has changed */ 1389 if (events & VMXNET3_ECR_LINK) { 1390 PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event"); 1391 if (vmxnet3_dev_link_update(dev, 0) == 0) 1392 _rte_eth_dev_callback_process(dev, 1393 RTE_ETH_EVENT_INTR_LSC, 1394 NULL); 1395 } 1396 1397 /* Check if there is an error on xmit/recv queues */ 1398 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 1399 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, 1400 VMXNET3_CMD_GET_QUEUE_STATUS); 1401 1402 if (hw->tqd_start->status.stopped) 1403 PMD_DRV_LOG(ERR, "tq error 0x%x", 1404 hw->tqd_start->status.error); 1405 1406 if (hw->rqd_start->status.stopped) 1407 PMD_DRV_LOG(ERR, "rq error 0x%x", 1408 hw->rqd_start->status.error); 1409 1410 /* Reset the device */ 1411 /* Have to reset the device */ 1412 } 1413 1414 if (events & VMXNET3_ECR_DIC) 1415 PMD_DRV_LOG(DEBUG, "Device implementation change event."); 1416 1417 if (events & VMXNET3_ECR_DEBUG) 1418 PMD_DRV_LOG(DEBUG, "Debug event generated by device."); 1419 } 1420 1421 static void 1422 vmxnet3_interrupt_handler(void *param) 1423 { 1424 struct rte_eth_dev *dev = param; 1425 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device); 1426 1427 vmxnet3_process_events(dev); 1428 1429 if (rte_intr_enable(&pci_dev->intr_handle) < 0) 1430 PMD_DRV_LOG(ERR, "interrupt enable failed"); 1431 } 1432 1433 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd); 1434 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map); 1435 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci"); 1436 1437 RTE_INIT(vmxnet3_init_log) 1438 { 1439 vmxnet3_logtype_init = rte_log_register("pmd.net.vmxnet3.init"); 1440 if (vmxnet3_logtype_init >= 0) 1441 rte_log_set_level(vmxnet3_logtype_init, RTE_LOG_NOTICE); 1442 vmxnet3_logtype_driver = rte_log_register("pmd.net.vmxnet3.driver"); 1443 if (vmxnet3_logtype_driver >= 0) 1444 rte_log_set_level(vmxnet3_logtype_driver, RTE_LOG_NOTICE); 1445 } 1446