1457967cdSJunfeng Guo /* SPDX-License-Identifier: BSD-3-Clause 263ef5456SJoshua Washington * Copyright(C) 2022-2023 Intel Corporation 363ef5456SJoshua Washington * Copyright(C) 2023 Google LLC 4457967cdSJunfeng Guo */ 5457967cdSJunfeng Guo 6457967cdSJunfeng Guo #include "gve_ethdev.h" 7457967cdSJunfeng Guo #include "base/gve_adminq.h" 8457967cdSJunfeng Guo #include "base/gve_register.h" 9748d0e7fSRushil Gupta #include "base/gve_osdep.h" 10748d0e7fSRushil Gupta #include "gve_version.h" 11030025b7SJoshua Washington #include "rte_ether.h" 1263ef5456SJoshua Washington #include "gve_rss.h" 13457967cdSJunfeng Guo 14457967cdSJunfeng Guo static void 15457967cdSJunfeng Guo gve_write_version(uint8_t *driver_version_register) 16457967cdSJunfeng Guo { 17748d0e7fSRushil Gupta const char *c = gve_version_string(); 18457967cdSJunfeng Guo while (*c) { 19457967cdSJunfeng Guo writeb(*c, driver_version_register); 20457967cdSJunfeng Guo c++; 21457967cdSJunfeng Guo } 22457967cdSJunfeng Guo writeb('\n', driver_version_register); 23457967cdSJunfeng Guo } 24457967cdSJunfeng Guo 257f369975SJoshua Washington static struct gve_queue_page_list * 267f369975SJoshua Washington gve_alloc_queue_page_list(const char *name, uint32_t num_pages) 274bec2d0bSJunfeng Guo { 284bec2d0bSJunfeng Guo struct gve_queue_page_list *qpl; 294bec2d0bSJunfeng Guo const struct rte_memzone *mz; 304bec2d0bSJunfeng Guo dma_addr_t page_bus; 314bec2d0bSJunfeng Guo uint32_t i; 324bec2d0bSJunfeng Guo 337f369975SJoshua Washington qpl = rte_zmalloc("qpl struct", sizeof(struct gve_queue_page_list), 0); 347f369975SJoshua Washington if (!qpl) 357f369975SJoshua Washington return NULL; 367f369975SJoshua Washington 377f369975SJoshua Washington mz = rte_memzone_reserve_aligned(name, num_pages * PAGE_SIZE, 384bec2d0bSJunfeng Guo rte_socket_id(), 394bec2d0bSJunfeng Guo RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE); 404bec2d0bSJunfeng Guo if (mz == NULL) { 417f369975SJoshua Washington PMD_DRV_LOG(ERR, "Failed to alloc %s.", name); 427f369975SJoshua Washington goto free_qpl_struct; 434bec2d0bSJunfeng Guo } 447f369975SJoshua Washington qpl->page_buses = rte_zmalloc("qpl page buses", 457f369975SJoshua Washington num_pages * sizeof(dma_addr_t), 0); 464bec2d0bSJunfeng Guo if (qpl->page_buses == NULL) { 477f369975SJoshua Washington PMD_DRV_LOG(ERR, "Failed to alloc qpl page buses"); 487f369975SJoshua Washington goto free_qpl_memzone; 494bec2d0bSJunfeng Guo } 504bec2d0bSJunfeng Guo page_bus = mz->iova; 517f369975SJoshua Washington for (i = 0; i < num_pages; i++) { 524bec2d0bSJunfeng Guo qpl->page_buses[i] = page_bus; 534bec2d0bSJunfeng Guo page_bus += PAGE_SIZE; 544bec2d0bSJunfeng Guo } 554bec2d0bSJunfeng Guo qpl->mz = mz; 567f369975SJoshua Washington qpl->num_entries = num_pages; 577f369975SJoshua Washington return qpl; 584bec2d0bSJunfeng Guo 597f369975SJoshua Washington free_qpl_memzone: 607f369975SJoshua Washington rte_memzone_free(qpl->mz); 617f369975SJoshua Washington free_qpl_struct: 627f369975SJoshua Washington rte_free(qpl); 637f369975SJoshua Washington return NULL; 644bec2d0bSJunfeng Guo } 654bec2d0bSJunfeng Guo 664bec2d0bSJunfeng Guo static void 677f369975SJoshua Washington gve_free_queue_page_list(struct gve_queue_page_list *qpl) 684bec2d0bSJunfeng Guo { 697f369975SJoshua Washington if (qpl->mz) { 707f369975SJoshua Washington rte_memzone_free(qpl->mz); 717f369975SJoshua Washington qpl->mz = NULL; 727f369975SJoshua Washington } 737f369975SJoshua Washington if (qpl->page_buses) { 747f369975SJoshua Washington rte_free(qpl->page_buses); 757f369975SJoshua Washington qpl->page_buses = NULL; 767f369975SJoshua Washington } 777f369975SJoshua Washington rte_free(qpl); 784bec2d0bSJunfeng Guo } 794bec2d0bSJunfeng Guo 807f369975SJoshua Washington struct gve_queue_page_list * 817f369975SJoshua Washington gve_setup_queue_page_list(struct gve_priv *priv, uint16_t queue_id, bool is_rx, 827f369975SJoshua Washington uint32_t num_pages) 837f369975SJoshua Washington { 847f369975SJoshua Washington const char *queue_type_string = is_rx ? "rx" : "tx"; 857f369975SJoshua Washington char qpl_name[RTE_MEMZONE_NAMESIZE]; 867f369975SJoshua Washington struct gve_queue_page_list *qpl; 877f369975SJoshua Washington int err; 887f369975SJoshua Washington 897f369975SJoshua Washington /* Allocate a new QPL. */ 907f369975SJoshua Washington snprintf(qpl_name, sizeof(qpl_name), "gve_%s_%s_qpl%d", 917f369975SJoshua Washington priv->pci_dev->device.name, queue_type_string, queue_id); 927f369975SJoshua Washington qpl = gve_alloc_queue_page_list(qpl_name, num_pages); 937f369975SJoshua Washington if (!qpl) { 947f369975SJoshua Washington PMD_DRV_LOG(ERR, 957f369975SJoshua Washington "Failed to alloc %s qpl for queue %hu.", 967f369975SJoshua Washington queue_type_string, queue_id); 977f369975SJoshua Washington return NULL; 987f369975SJoshua Washington } 997f369975SJoshua Washington 1007f369975SJoshua Washington /* Assign the QPL an ID. */ 1017f369975SJoshua Washington qpl->id = queue_id; 1027f369975SJoshua Washington if (is_rx) 1037f369975SJoshua Washington qpl->id += priv->max_nb_txq; 1047f369975SJoshua Washington 1057f369975SJoshua Washington /* Validate page registration limit and register QPLs. */ 1067f369975SJoshua Washington if (priv->num_registered_pages + qpl->num_entries > 1077f369975SJoshua Washington priv->max_registered_pages) { 1087f369975SJoshua Washington PMD_DRV_LOG(ERR, "Pages %" PRIu64 " > max registered pages %" PRIu64, 1097f369975SJoshua Washington priv->num_registered_pages + qpl->num_entries, 1107f369975SJoshua Washington priv->max_registered_pages); 1117f369975SJoshua Washington goto cleanup_qpl; 1127f369975SJoshua Washington } 1137f369975SJoshua Washington err = gve_adminq_register_page_list(priv, qpl); 1147f369975SJoshua Washington if (err) { 1157f369975SJoshua Washington PMD_DRV_LOG(ERR, 1167f369975SJoshua Washington "Failed to register %s qpl for queue %hu.", 1177f369975SJoshua Washington queue_type_string, queue_id); 1187f369975SJoshua Washington goto cleanup_qpl; 1197f369975SJoshua Washington } 1207f369975SJoshua Washington priv->num_registered_pages += qpl->num_entries; 1217f369975SJoshua Washington return qpl; 1227f369975SJoshua Washington 1237f369975SJoshua Washington cleanup_qpl: 1247f369975SJoshua Washington gve_free_queue_page_list(qpl); 1257f369975SJoshua Washington return NULL; 1267f369975SJoshua Washington } 1277f369975SJoshua Washington 1287f369975SJoshua Washington int 1297f369975SJoshua Washington gve_teardown_queue_page_list(struct gve_priv *priv, 1307f369975SJoshua Washington struct gve_queue_page_list *qpl) 1317f369975SJoshua Washington { 1327f369975SJoshua Washington int err = gve_adminq_unregister_page_list(priv, qpl->id); 1337f369975SJoshua Washington if (err) { 1347f369975SJoshua Washington PMD_DRV_LOG(CRIT, "Unable to unregister qpl %d!", qpl->id); 1357f369975SJoshua Washington return err; 1367f369975SJoshua Washington } 1377f369975SJoshua Washington priv->num_registered_pages -= qpl->num_entries; 1387f369975SJoshua Washington gve_free_queue_page_list(qpl); 1397f369975SJoshua Washington return 0; 1404bec2d0bSJunfeng Guo } 1414bec2d0bSJunfeng Guo 1424bec2d0bSJunfeng Guo static int 14371dea04cSJunfeng Guo gve_dev_configure(struct rte_eth_dev *dev) 144457967cdSJunfeng Guo { 14571dea04cSJunfeng Guo struct gve_priv *priv = dev->data->dev_private; 14671dea04cSJunfeng Guo 14763ef5456SJoshua Washington if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { 14871dea04cSJunfeng Guo dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; 14963ef5456SJoshua Washington priv->rss_config.alg = GVE_RSS_HASH_TOEPLITZ; 15063ef5456SJoshua Washington } 15171dea04cSJunfeng Guo 15271dea04cSJunfeng Guo if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) 15371dea04cSJunfeng Guo priv->enable_rsc = 1; 15471dea04cSJunfeng Guo 15563ef5456SJoshua Washington /* Reset RSS RETA in case number of queues changed. */ 15663ef5456SJoshua Washington if (priv->rss_config.indir) { 15763ef5456SJoshua Washington struct gve_rss_config update_reta_config; 15863ef5456SJoshua Washington gve_init_rss_config_from_priv(priv, &update_reta_config); 15963ef5456SJoshua Washington gve_generate_rss_reta(dev, &update_reta_config); 16063ef5456SJoshua Washington 16163ef5456SJoshua Washington int err = gve_adminq_configure_rss(priv, &update_reta_config); 16263ef5456SJoshua Washington if (err) 16363ef5456SJoshua Washington PMD_DRV_LOG(ERR, 16463ef5456SJoshua Washington "Could not reconfigure RSS redirection table."); 16563ef5456SJoshua Washington else 16663ef5456SJoshua Washington gve_update_priv_rss_config(priv, &update_reta_config); 16763ef5456SJoshua Washington 16863ef5456SJoshua Washington gve_free_rss_config(&update_reta_config); 16963ef5456SJoshua Washington return err; 17063ef5456SJoshua Washington } 17163ef5456SJoshua Washington 172457967cdSJunfeng Guo return 0; 173457967cdSJunfeng Guo } 174457967cdSJunfeng Guo 175457967cdSJunfeng Guo static int 176440f551dSJunfeng Guo gve_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete) 177440f551dSJunfeng Guo { 178440f551dSJunfeng Guo struct gve_priv *priv = dev->data->dev_private; 179440f551dSJunfeng Guo struct rte_eth_link link; 180440f551dSJunfeng Guo int err; 181440f551dSJunfeng Guo 182440f551dSJunfeng Guo memset(&link, 0, sizeof(link)); 183440f551dSJunfeng Guo link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; 184440f551dSJunfeng Guo link.link_autoneg = RTE_ETH_LINK_AUTONEG; 185440f551dSJunfeng Guo 186440f551dSJunfeng Guo if (!dev->data->dev_started) { 187440f551dSJunfeng Guo link.link_status = RTE_ETH_LINK_DOWN; 188440f551dSJunfeng Guo link.link_speed = RTE_ETH_SPEED_NUM_NONE; 189440f551dSJunfeng Guo } else { 190440f551dSJunfeng Guo link.link_status = RTE_ETH_LINK_UP; 191440f551dSJunfeng Guo PMD_DRV_LOG(DEBUG, "Get link status from hw"); 192440f551dSJunfeng Guo err = gve_adminq_report_link_speed(priv); 193440f551dSJunfeng Guo if (err) { 194440f551dSJunfeng Guo PMD_DRV_LOG(ERR, "Failed to get link speed."); 195440f551dSJunfeng Guo priv->link_speed = RTE_ETH_SPEED_NUM_UNKNOWN; 196440f551dSJunfeng Guo } 197440f551dSJunfeng Guo link.link_speed = priv->link_speed; 198440f551dSJunfeng Guo } 199440f551dSJunfeng Guo 200440f551dSJunfeng Guo return rte_eth_linkstatus_set(dev, &link); 201440f551dSJunfeng Guo } 202440f551dSJunfeng Guo 203440f551dSJunfeng Guo static int 204458b53deSRushil Gupta gve_alloc_stats_report(struct gve_priv *priv, 205458b53deSRushil Gupta uint16_t nb_tx_queues, uint16_t nb_rx_queues) 206458b53deSRushil Gupta { 207458b53deSRushil Gupta char z_name[RTE_MEMZONE_NAMESIZE]; 208458b53deSRushil Gupta int tx_stats_cnt; 209458b53deSRushil Gupta int rx_stats_cnt; 210458b53deSRushil Gupta 211458b53deSRushil Gupta tx_stats_cnt = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) * 212458b53deSRushil Gupta nb_tx_queues; 213458b53deSRushil Gupta rx_stats_cnt = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * 214458b53deSRushil Gupta nb_rx_queues; 215458b53deSRushil Gupta priv->stats_report_len = sizeof(struct gve_stats_report) + 216458b53deSRushil Gupta sizeof(struct stats) * (tx_stats_cnt + rx_stats_cnt); 217458b53deSRushil Gupta 218458b53deSRushil Gupta snprintf(z_name, sizeof(z_name), "gve_stats_report_%s", 219458b53deSRushil Gupta priv->pci_dev->device.name); 220458b53deSRushil Gupta priv->stats_report_mem = rte_memzone_reserve_aligned(z_name, 221458b53deSRushil Gupta priv->stats_report_len, 222458b53deSRushil Gupta rte_socket_id(), 223458b53deSRushil Gupta RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE); 224458b53deSRushil Gupta 225458b53deSRushil Gupta if (!priv->stats_report_mem) 226458b53deSRushil Gupta return -ENOMEM; 227458b53deSRushil Gupta 228458b53deSRushil Gupta /* offset by skipping stats written by gve. */ 229458b53deSRushil Gupta priv->stats_start_idx = (GVE_TX_STATS_REPORT_NUM * nb_tx_queues) + 230458b53deSRushil Gupta (GVE_RX_STATS_REPORT_NUM * nb_rx_queues); 231458b53deSRushil Gupta priv->stats_end_idx = priv->stats_start_idx + 232458b53deSRushil Gupta (NIC_TX_STATS_REPORT_NUM * nb_tx_queues) + 233458b53deSRushil Gupta (NIC_RX_STATS_REPORT_NUM * nb_rx_queues) - 1; 234458b53deSRushil Gupta 235458b53deSRushil Gupta return 0; 236458b53deSRushil Gupta } 237458b53deSRushil Gupta 238458b53deSRushil Gupta static void 239458b53deSRushil Gupta gve_free_stats_report(struct rte_eth_dev *dev) 240458b53deSRushil Gupta { 241458b53deSRushil Gupta struct gve_priv *priv = dev->data->dev_private; 242458b53deSRushil Gupta rte_memzone_free(priv->stats_report_mem); 243458b53deSRushil Gupta priv->stats_report_mem = NULL; 244458b53deSRushil Gupta } 245458b53deSRushil Gupta 246458b53deSRushil Gupta /* Read Rx NIC stats from shared region */ 247458b53deSRushil Gupta static void 248458b53deSRushil Gupta gve_get_imissed_from_nic(struct rte_eth_dev *dev) 249458b53deSRushil Gupta { 250458b53deSRushil Gupta struct gve_stats_report *stats_report; 251458b53deSRushil Gupta struct gve_rx_queue *rxq; 252458b53deSRushil Gupta struct gve_priv *priv; 253458b53deSRushil Gupta struct stats stat; 254458b53deSRushil Gupta int queue_id; 255458b53deSRushil Gupta int stat_id; 256458b53deSRushil Gupta int i; 257458b53deSRushil Gupta 258458b53deSRushil Gupta priv = dev->data->dev_private; 259458b53deSRushil Gupta if (!priv->stats_report_mem) 260458b53deSRushil Gupta return; 261458b53deSRushil Gupta stats_report = (struct gve_stats_report *) 262458b53deSRushil Gupta priv->stats_report_mem->addr; 263458b53deSRushil Gupta for (i = priv->stats_start_idx; i <= priv->stats_end_idx; i++) { 264458b53deSRushil Gupta stat = stats_report->stats[i]; 265458b53deSRushil Gupta queue_id = cpu_to_be32(stat.queue_id); 266458b53deSRushil Gupta rxq = dev->data->rx_queues[queue_id]; 267458b53deSRushil Gupta if (rxq == NULL) 268458b53deSRushil Gupta continue; 269458b53deSRushil Gupta stat_id = cpu_to_be32(stat.stat_name); 270458b53deSRushil Gupta /* Update imissed. */ 271458b53deSRushil Gupta if (stat_id == RX_NO_BUFFERS_POSTED) 272458b53deSRushil Gupta rxq->stats.imissed = cpu_to_be64(stat.value); 273458b53deSRushil Gupta } 274458b53deSRushil Gupta } 275458b53deSRushil Gupta 276458b53deSRushil Gupta static int 277b044845bSJunfeng Guo gve_start_queues(struct rte_eth_dev *dev) 278457967cdSJunfeng Guo { 2794bec2d0bSJunfeng Guo struct gve_priv *priv = dev->data->dev_private; 280b044845bSJunfeng Guo uint16_t num_queues; 2814bec2d0bSJunfeng Guo uint16_t i; 282b044845bSJunfeng Guo int ret; 2834bec2d0bSJunfeng Guo 284b044845bSJunfeng Guo num_queues = dev->data->nb_tx_queues; 2854bec2d0bSJunfeng Guo priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues; 286b044845bSJunfeng Guo ret = gve_adminq_create_tx_queues(priv, num_queues); 287b044845bSJunfeng Guo if (ret != 0) { 288b044845bSJunfeng Guo PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", num_queues); 289b044845bSJunfeng Guo return ret; 2904bec2d0bSJunfeng Guo } 291*7174c889STathagat Priyadarshi for (i = 0; i < num_queues; i++) { 292*7174c889STathagat Priyadarshi if (gve_is_gqi(priv)) 293*7174c889STathagat Priyadarshi ret = gve_tx_queue_start(dev, i); 294*7174c889STathagat Priyadarshi else 295*7174c889STathagat Priyadarshi ret = gve_tx_queue_start_dqo(dev, i); 296*7174c889STathagat Priyadarshi if (ret != 0) { 297b044845bSJunfeng Guo PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i); 298b044845bSJunfeng Guo goto err_tx; 2994bec2d0bSJunfeng Guo } 300*7174c889STathagat Priyadarshi } 3014bec2d0bSJunfeng Guo 3024bec2d0bSJunfeng Guo num_queues = dev->data->nb_rx_queues; 3034bec2d0bSJunfeng Guo priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues; 304b044845bSJunfeng Guo ret = gve_adminq_create_rx_queues(priv, num_queues); 305b044845bSJunfeng Guo if (ret != 0) { 306b044845bSJunfeng Guo PMD_DRV_LOG(ERR, "Failed to create %u rx queues.", num_queues); 3074bec2d0bSJunfeng Guo goto err_tx; 3084bec2d0bSJunfeng Guo } 3094bec2d0bSJunfeng Guo for (i = 0; i < num_queues; i++) { 31088cfffc6SJunfeng Guo if (gve_is_gqi(priv)) 311b044845bSJunfeng Guo ret = gve_rx_queue_start(dev, i); 31288cfffc6SJunfeng Guo else 313b044845bSJunfeng Guo ret = gve_rx_queue_start_dqo(dev, i); 314b044845bSJunfeng Guo if (ret != 0) { 315b044845bSJunfeng Guo PMD_DRV_LOG(ERR, "Fail to start Rx queue %d", i); 3164bec2d0bSJunfeng Guo goto err_rx; 3174bec2d0bSJunfeng Guo } 3184bec2d0bSJunfeng Guo } 3194bec2d0bSJunfeng Guo 320457967cdSJunfeng Guo return 0; 3214bec2d0bSJunfeng Guo 3224bec2d0bSJunfeng Guo err_rx: 323*7174c889STathagat Priyadarshi if (gve_is_gqi(priv)) 3244bec2d0bSJunfeng Guo gve_stop_rx_queues(dev); 325*7174c889STathagat Priyadarshi else 326*7174c889STathagat Priyadarshi gve_stop_rx_queues_dqo(dev); 3274bec2d0bSJunfeng Guo err_tx: 328*7174c889STathagat Priyadarshi if (gve_is_gqi(priv)) 3294bec2d0bSJunfeng Guo gve_stop_tx_queues(dev); 330*7174c889STathagat Priyadarshi else 331*7174c889STathagat Priyadarshi gve_stop_tx_queues_dqo(dev); 332b044845bSJunfeng Guo return ret; 333b044845bSJunfeng Guo } 334b044845bSJunfeng Guo 335b044845bSJunfeng Guo static int 336b044845bSJunfeng Guo gve_dev_start(struct rte_eth_dev *dev) 337b044845bSJunfeng Guo { 338458b53deSRushil Gupta struct gve_priv *priv; 339b044845bSJunfeng Guo int ret; 340b044845bSJunfeng Guo 341b044845bSJunfeng Guo ret = gve_start_queues(dev); 342b044845bSJunfeng Guo if (ret != 0) { 343b044845bSJunfeng Guo PMD_DRV_LOG(ERR, "Failed to start queues"); 344b044845bSJunfeng Guo return ret; 345b044845bSJunfeng Guo } 346b044845bSJunfeng Guo 347b044845bSJunfeng Guo dev->data->dev_started = 1; 348b044845bSJunfeng Guo gve_link_update(dev, 0); 349b044845bSJunfeng Guo 350458b53deSRushil Gupta priv = dev->data->dev_private; 351458b53deSRushil Gupta /* No stats available yet for Dqo. */ 352458b53deSRushil Gupta if (gve_is_gqi(priv)) { 353458b53deSRushil Gupta ret = gve_alloc_stats_report(priv, 354458b53deSRushil Gupta dev->data->nb_tx_queues, 355458b53deSRushil Gupta dev->data->nb_rx_queues); 356458b53deSRushil Gupta if (ret != 0) { 357458b53deSRushil Gupta PMD_DRV_LOG(ERR, 358458b53deSRushil Gupta "Failed to allocate region for stats reporting."); 359458b53deSRushil Gupta return ret; 360458b53deSRushil Gupta } 361458b53deSRushil Gupta ret = gve_adminq_report_stats(priv, priv->stats_report_len, 362458b53deSRushil Gupta priv->stats_report_mem->iova, 363458b53deSRushil Gupta GVE_STATS_REPORT_TIMER_PERIOD); 364458b53deSRushil Gupta if (ret != 0) { 365458b53deSRushil Gupta PMD_DRV_LOG(ERR, "gve_adminq_report_stats command failed."); 366458b53deSRushil Gupta return ret; 367458b53deSRushil Gupta } 368458b53deSRushil Gupta } 369458b53deSRushil Gupta 370b044845bSJunfeng Guo return 0; 371457967cdSJunfeng Guo } 372457967cdSJunfeng Guo 373457967cdSJunfeng Guo static int 374457967cdSJunfeng Guo gve_dev_stop(struct rte_eth_dev *dev) 375457967cdSJunfeng Guo { 376*7174c889STathagat Priyadarshi struct gve_priv *priv = dev->data->dev_private; 377457967cdSJunfeng Guo dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; 3784bec2d0bSJunfeng Guo 379*7174c889STathagat Priyadarshi if (gve_is_gqi(priv)) { 3804bec2d0bSJunfeng Guo gve_stop_tx_queues(dev); 3814bec2d0bSJunfeng Guo gve_stop_rx_queues(dev); 382*7174c889STathagat Priyadarshi } else { 383*7174c889STathagat Priyadarshi gve_stop_tx_queues_dqo(dev); 384*7174c889STathagat Priyadarshi gve_stop_rx_queues_dqo(dev); 385*7174c889STathagat Priyadarshi } 3864bec2d0bSJunfeng Guo 387457967cdSJunfeng Guo dev->data->dev_started = 0; 388457967cdSJunfeng Guo 389458b53deSRushil Gupta if (gve_is_gqi(dev->data->dev_private)) 390458b53deSRushil Gupta gve_free_stats_report(dev); 391458b53deSRushil Gupta 392457967cdSJunfeng Guo return 0; 393457967cdSJunfeng Guo } 394457967cdSJunfeng Guo 395457967cdSJunfeng Guo static int 396457967cdSJunfeng Guo gve_dev_close(struct rte_eth_dev *dev) 397457967cdSJunfeng Guo { 3984bec2d0bSJunfeng Guo struct gve_priv *priv = dev->data->dev_private; 399457967cdSJunfeng Guo int err = 0; 4004bec2d0bSJunfeng Guo uint16_t i; 401457967cdSJunfeng Guo 402457967cdSJunfeng Guo if (dev->data->dev_started) { 403457967cdSJunfeng Guo err = gve_dev_stop(dev); 404457967cdSJunfeng Guo if (err != 0) 405457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Failed to stop dev."); 406457967cdSJunfeng Guo } 407457967cdSJunfeng Guo 4081e27182eSJunfeng Guo if (gve_is_gqi(priv)) { 40910d9e91aSJunfeng Guo for (i = 0; i < dev->data->nb_tx_queues; i++) 41010d9e91aSJunfeng Guo gve_tx_queue_release(dev, i); 4114bec2d0bSJunfeng Guo 41210d9e91aSJunfeng Guo for (i = 0; i < dev->data->nb_rx_queues; i++) 41310d9e91aSJunfeng Guo gve_rx_queue_release(dev, i); 4141e27182eSJunfeng Guo } else { 4151e27182eSJunfeng Guo for (i = 0; i < dev->data->nb_tx_queues; i++) 4161e27182eSJunfeng Guo gve_tx_queue_release_dqo(dev, i); 4171e27182eSJunfeng Guo 4181e27182eSJunfeng Guo for (i = 0; i < dev->data->nb_rx_queues; i++) 4191e27182eSJunfeng Guo gve_rx_queue_release_dqo(dev, i); 4201e27182eSJunfeng Guo } 4214bec2d0bSJunfeng Guo 4224bec2d0bSJunfeng Guo rte_free(priv->adminq); 4234bec2d0bSJunfeng Guo 424457967cdSJunfeng Guo dev->data->mac_addrs = NULL; 425457967cdSJunfeng Guo 426457967cdSJunfeng Guo return err; 427457967cdSJunfeng Guo } 428457967cdSJunfeng Guo 429f19c864eSJunfeng Guo static int 430748d0e7fSRushil Gupta gve_verify_driver_compatibility(struct gve_priv *priv) 431748d0e7fSRushil Gupta { 432748d0e7fSRushil Gupta const struct rte_memzone *driver_info_mem; 433748d0e7fSRushil Gupta struct gve_driver_info *driver_info; 434748d0e7fSRushil Gupta int err; 435748d0e7fSRushil Gupta 436748d0e7fSRushil Gupta driver_info_mem = rte_memzone_reserve_aligned("verify_driver_compatibility", 437748d0e7fSRushil Gupta sizeof(struct gve_driver_info), 438748d0e7fSRushil Gupta rte_socket_id(), 439748d0e7fSRushil Gupta RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE); 440748d0e7fSRushil Gupta 441748d0e7fSRushil Gupta if (driver_info_mem == NULL) { 442748d0e7fSRushil Gupta PMD_DRV_LOG(ERR, 443748d0e7fSRushil Gupta "Could not alloc memzone for driver compatibility"); 444748d0e7fSRushil Gupta return -ENOMEM; 445748d0e7fSRushil Gupta } 446748d0e7fSRushil Gupta driver_info = (struct gve_driver_info *)driver_info_mem->addr; 447748d0e7fSRushil Gupta 448748d0e7fSRushil Gupta *driver_info = (struct gve_driver_info) { 449748d0e7fSRushil Gupta .os_type = 5, /* DPDK */ 450748d0e7fSRushil Gupta .driver_major = GVE_VERSION_MAJOR, 451748d0e7fSRushil Gupta .driver_minor = GVE_VERSION_MINOR, 452748d0e7fSRushil Gupta .driver_sub = GVE_VERSION_SUB, 453748d0e7fSRushil Gupta .os_version_major = cpu_to_be32(DPDK_VERSION_MAJOR), 454748d0e7fSRushil Gupta .os_version_minor = cpu_to_be32(DPDK_VERSION_MINOR), 455748d0e7fSRushil Gupta .os_version_sub = cpu_to_be32(DPDK_VERSION_SUB), 456748d0e7fSRushil Gupta .driver_capability_flags = { 457748d0e7fSRushil Gupta cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1), 458748d0e7fSRushil Gupta cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2), 459748d0e7fSRushil Gupta cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3), 460748d0e7fSRushil Gupta cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4), 461748d0e7fSRushil Gupta }, 462748d0e7fSRushil Gupta }; 463748d0e7fSRushil Gupta 464748d0e7fSRushil Gupta populate_driver_version_strings((char *)driver_info->os_version_str1, 465748d0e7fSRushil Gupta (char *)driver_info->os_version_str2); 466748d0e7fSRushil Gupta 467748d0e7fSRushil Gupta err = gve_adminq_verify_driver_compatibility(priv, 468748d0e7fSRushil Gupta sizeof(struct gve_driver_info), 469748d0e7fSRushil Gupta (dma_addr_t)driver_info_mem->iova); 470748d0e7fSRushil Gupta /* It's ok if the device doesn't support this */ 471748d0e7fSRushil Gupta if (err == -EOPNOTSUPP) 472748d0e7fSRushil Gupta err = 0; 473748d0e7fSRushil Gupta 474748d0e7fSRushil Gupta rte_memzone_free(driver_info_mem); 475748d0e7fSRushil Gupta return err; 476748d0e7fSRushil Gupta } 477748d0e7fSRushil Gupta 478748d0e7fSRushil Gupta static int 47971dea04cSJunfeng Guo gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 48071dea04cSJunfeng Guo { 48171dea04cSJunfeng Guo struct gve_priv *priv = dev->data->dev_private; 48271dea04cSJunfeng Guo 48371dea04cSJunfeng Guo dev_info->device = dev->device; 48471dea04cSJunfeng Guo dev_info->max_mac_addrs = 1; 48571dea04cSJunfeng Guo dev_info->max_rx_queues = priv->max_nb_rxq; 48671dea04cSJunfeng Guo dev_info->max_tx_queues = priv->max_nb_txq; 487e422bb30SJoshua Washington if (gve_is_gqi(priv)) { 488835021a8SJoshua Washington dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_GQI; 489e422bb30SJoshua Washington dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_GQI; 490e422bb30SJoshua Washington } else { 491835021a8SJoshua Washington dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_DQO; 492e422bb30SJoshua Washington dev_info->max_rx_bufsize = GVE_RX_MAX_BUF_SIZE_DQO; 493e422bb30SJoshua Washington } 494e422bb30SJoshua Washington 4953b7896cdSJoshua Washington dev_info->max_rx_pktlen = priv->max_mtu + RTE_ETHER_HDR_LEN; 496030025b7SJoshua Washington dev_info->max_mtu = priv->max_mtu; 497030025b7SJoshua Washington dev_info->min_mtu = RTE_ETHER_MIN_MTU; 49871dea04cSJunfeng Guo 49961f77e67SJoshua Washington dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_RSS_HASH; 500a46583cfSJunfeng Guo dev_info->tx_offload_capa = 501a46583cfSJunfeng Guo RTE_ETH_TX_OFFLOAD_MULTI_SEGS | 502a46583cfSJunfeng Guo RTE_ETH_TX_OFFLOAD_UDP_CKSUM | 503a46583cfSJunfeng Guo RTE_ETH_TX_OFFLOAD_TCP_CKSUM | 504a46583cfSJunfeng Guo RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | 505a46583cfSJunfeng Guo RTE_ETH_TX_OFFLOAD_TCP_TSO; 50671dea04cSJunfeng Guo 507f2a9e162SRushil Gupta if (!gve_is_gqi(priv)) { 508f2a9e162SRushil Gupta dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; 509f2a9e162SRushil Gupta dev_info->rx_offload_capa |= 510f2a9e162SRushil Gupta RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | 511f2a9e162SRushil Gupta RTE_ETH_RX_OFFLOAD_UDP_CKSUM | 512f2a9e162SRushil Gupta RTE_ETH_RX_OFFLOAD_TCP_CKSUM | 513f2a9e162SRushil Gupta RTE_ETH_RX_OFFLOAD_TCP_LRO; 514f2a9e162SRushil Gupta } 51571dea04cSJunfeng Guo 51671dea04cSJunfeng Guo dev_info->default_rxconf = (struct rte_eth_rxconf) { 51771dea04cSJunfeng Guo .rx_free_thresh = GVE_DEFAULT_RX_FREE_THRESH, 51871dea04cSJunfeng Guo .rx_drop_en = 0, 51971dea04cSJunfeng Guo .offloads = 0, 52071dea04cSJunfeng Guo }; 52171dea04cSJunfeng Guo 52271dea04cSJunfeng Guo dev_info->default_txconf = (struct rte_eth_txconf) { 52371dea04cSJunfeng Guo .tx_free_thresh = GVE_DEFAULT_TX_FREE_THRESH, 524a14d391cSJunfeng Guo .tx_rs_thresh = GVE_DEFAULT_TX_RS_THRESH, 52571dea04cSJunfeng Guo .offloads = 0, 52671dea04cSJunfeng Guo }; 52771dea04cSJunfeng Guo 528eb8ec5c3SJoshua Washington dev_info->default_rxportconf.ring_size = priv->default_rx_desc_cnt; 52971dea04cSJunfeng Guo dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { 530cde01d16SJoshua Washington .nb_max = priv->max_rx_desc_cnt, 531cde01d16SJoshua Washington .nb_min = priv->min_rx_desc_cnt, 53271dea04cSJunfeng Guo .nb_align = 1, 53371dea04cSJunfeng Guo }; 53471dea04cSJunfeng Guo 535eb8ec5c3SJoshua Washington dev_info->default_txportconf.ring_size = priv->default_tx_desc_cnt; 53671dea04cSJunfeng Guo dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { 537cde01d16SJoshua Washington .nb_max = priv->max_tx_desc_cnt, 538cde01d16SJoshua Washington .nb_min = priv->min_tx_desc_cnt, 53971dea04cSJunfeng Guo .nb_align = 1, 54071dea04cSJunfeng Guo }; 54171dea04cSJunfeng Guo 542d6ac6f45SJoshua Washington dev_info->flow_type_rss_offloads = GVE_RTE_RSS_OFFLOAD_ALL; 54363ef5456SJoshua Washington dev_info->hash_key_size = GVE_RSS_HASH_KEY_SIZE; 54463ef5456SJoshua Washington dev_info->reta_size = GVE_RSS_INDIR_SIZE; 54561f77e67SJoshua Washington 54671dea04cSJunfeng Guo return 0; 54771dea04cSJunfeng Guo } 54871dea04cSJunfeng Guo 54971dea04cSJunfeng Guo static int 5504f6b1dd8SJunfeng Guo gve_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) 5514f6b1dd8SJunfeng Guo { 5524f6b1dd8SJunfeng Guo uint16_t i; 553458b53deSRushil Gupta if (gve_is_gqi(dev->data->dev_private)) 554458b53deSRushil Gupta gve_get_imissed_from_nic(dev); 5554f6b1dd8SJunfeng Guo 5564f6b1dd8SJunfeng Guo for (i = 0; i < dev->data->nb_tx_queues; i++) { 5574f6b1dd8SJunfeng Guo struct gve_tx_queue *txq = dev->data->tx_queues[i]; 5584f6b1dd8SJunfeng Guo if (txq == NULL) 5594f6b1dd8SJunfeng Guo continue; 5604f6b1dd8SJunfeng Guo 561c222ea9cSLevend Sayar stats->opackets += txq->stats.packets; 562c222ea9cSLevend Sayar stats->obytes += txq->stats.bytes; 563c222ea9cSLevend Sayar stats->oerrors += txq->stats.errors; 5644f6b1dd8SJunfeng Guo } 5654f6b1dd8SJunfeng Guo 5664f6b1dd8SJunfeng Guo for (i = 0; i < dev->data->nb_rx_queues; i++) { 5674f6b1dd8SJunfeng Guo struct gve_rx_queue *rxq = dev->data->rx_queues[i]; 5684f6b1dd8SJunfeng Guo if (rxq == NULL) 5694f6b1dd8SJunfeng Guo continue; 5704f6b1dd8SJunfeng Guo 571c222ea9cSLevend Sayar stats->ipackets += rxq->stats.packets; 572c222ea9cSLevend Sayar stats->ibytes += rxq->stats.bytes; 573c222ea9cSLevend Sayar stats->ierrors += rxq->stats.errors; 574c222ea9cSLevend Sayar stats->rx_nombuf += rxq->stats.no_mbufs; 575458b53deSRushil Gupta stats->imissed += rxq->stats.imissed; 5764f6b1dd8SJunfeng Guo } 5774f6b1dd8SJunfeng Guo 5784f6b1dd8SJunfeng Guo return 0; 5794f6b1dd8SJunfeng Guo } 5804f6b1dd8SJunfeng Guo 5814f6b1dd8SJunfeng Guo static int 5824f6b1dd8SJunfeng Guo gve_dev_stats_reset(struct rte_eth_dev *dev) 5834f6b1dd8SJunfeng Guo { 5844f6b1dd8SJunfeng Guo uint16_t i; 5854f6b1dd8SJunfeng Guo 5864f6b1dd8SJunfeng Guo for (i = 0; i < dev->data->nb_tx_queues; i++) { 5874f6b1dd8SJunfeng Guo struct gve_tx_queue *txq = dev->data->tx_queues[i]; 5884f6b1dd8SJunfeng Guo if (txq == NULL) 5894f6b1dd8SJunfeng Guo continue; 5904f6b1dd8SJunfeng Guo 591c222ea9cSLevend Sayar memset(&txq->stats, 0, sizeof(txq->stats)); 5924f6b1dd8SJunfeng Guo } 5934f6b1dd8SJunfeng Guo 5944f6b1dd8SJunfeng Guo for (i = 0; i < dev->data->nb_rx_queues; i++) { 5954f6b1dd8SJunfeng Guo struct gve_rx_queue *rxq = dev->data->rx_queues[i]; 5964f6b1dd8SJunfeng Guo if (rxq == NULL) 5974f6b1dd8SJunfeng Guo continue; 5984f6b1dd8SJunfeng Guo 599c222ea9cSLevend Sayar memset(&rxq->stats, 0, sizeof(rxq->stats)); 6004f6b1dd8SJunfeng Guo } 6014f6b1dd8SJunfeng Guo 6024f6b1dd8SJunfeng Guo return 0; 6034f6b1dd8SJunfeng Guo } 6044f6b1dd8SJunfeng Guo 6054f6b1dd8SJunfeng Guo static int 606f19c864eSJunfeng Guo gve_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) 607f19c864eSJunfeng Guo { 608f19c864eSJunfeng Guo struct gve_priv *priv = dev->data->dev_private; 609f19c864eSJunfeng Guo int err; 610f19c864eSJunfeng Guo 611f19c864eSJunfeng Guo if (mtu < RTE_ETHER_MIN_MTU || mtu > priv->max_mtu) { 612f19c864eSJunfeng Guo PMD_DRV_LOG(ERR, "MIN MTU is %u, MAX MTU is %u", 613f19c864eSJunfeng Guo RTE_ETHER_MIN_MTU, priv->max_mtu); 614f19c864eSJunfeng Guo return -EINVAL; 615f19c864eSJunfeng Guo } 616f19c864eSJunfeng Guo 617f19c864eSJunfeng Guo /* mtu setting is forbidden if port is start */ 618f19c864eSJunfeng Guo if (dev->data->dev_started) { 619f19c864eSJunfeng Guo PMD_DRV_LOG(ERR, "Port must be stopped before configuration"); 620f19c864eSJunfeng Guo return -EBUSY; 621f19c864eSJunfeng Guo } 622f19c864eSJunfeng Guo 623f19c864eSJunfeng Guo err = gve_adminq_set_mtu(priv, mtu); 624f19c864eSJunfeng Guo if (err) { 625f19c864eSJunfeng Guo PMD_DRV_LOG(ERR, "Failed to set mtu as %u err = %d", mtu, err); 626f19c864eSJunfeng Guo return err; 627f19c864eSJunfeng Guo } 628f19c864eSJunfeng Guo 629f19c864eSJunfeng Guo return 0; 630f19c864eSJunfeng Guo } 631f19c864eSJunfeng Guo 632c222ea9cSLevend Sayar #define TX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_tx_stats, x) 633c222ea9cSLevend Sayar #define RX_QUEUE_STATS_OFFSET(x) offsetof(struct gve_rx_stats, x) 634c222ea9cSLevend Sayar 635c222ea9cSLevend Sayar static const struct gve_xstats_name_offset tx_xstats_name_offset[] = { 636c222ea9cSLevend Sayar { "packets", TX_QUEUE_STATS_OFFSET(packets) }, 637c222ea9cSLevend Sayar { "bytes", TX_QUEUE_STATS_OFFSET(bytes) }, 638c222ea9cSLevend Sayar { "errors", TX_QUEUE_STATS_OFFSET(errors) }, 639c222ea9cSLevend Sayar }; 640c222ea9cSLevend Sayar 641c222ea9cSLevend Sayar static const struct gve_xstats_name_offset rx_xstats_name_offset[] = { 642c222ea9cSLevend Sayar { "packets", RX_QUEUE_STATS_OFFSET(packets) }, 643c222ea9cSLevend Sayar { "bytes", RX_QUEUE_STATS_OFFSET(bytes) }, 644c222ea9cSLevend Sayar { "errors", RX_QUEUE_STATS_OFFSET(errors) }, 645c222ea9cSLevend Sayar { "mbuf_alloc_errors", RX_QUEUE_STATS_OFFSET(no_mbufs) }, 646c222ea9cSLevend Sayar { "mbuf_alloc_errors_bulk", RX_QUEUE_STATS_OFFSET(no_mbufs_bulk) }, 647458b53deSRushil Gupta { "imissed", RX_QUEUE_STATS_OFFSET(imissed) }, 648c222ea9cSLevend Sayar }; 649c222ea9cSLevend Sayar 650c222ea9cSLevend Sayar static int 651c222ea9cSLevend Sayar gve_xstats_count(struct rte_eth_dev *dev) 652c222ea9cSLevend Sayar { 653c222ea9cSLevend Sayar uint16_t i, count = 0; 654c222ea9cSLevend Sayar 655c222ea9cSLevend Sayar for (i = 0; i < dev->data->nb_tx_queues; i++) { 656c222ea9cSLevend Sayar if (dev->data->tx_queues[i]) 657c222ea9cSLevend Sayar count += RTE_DIM(tx_xstats_name_offset); 658c222ea9cSLevend Sayar } 659c222ea9cSLevend Sayar 660c222ea9cSLevend Sayar for (i = 0; i < dev->data->nb_rx_queues; i++) { 661c222ea9cSLevend Sayar if (dev->data->rx_queues[i]) 662c222ea9cSLevend Sayar count += RTE_DIM(rx_xstats_name_offset); 663c222ea9cSLevend Sayar } 664c222ea9cSLevend Sayar 665c222ea9cSLevend Sayar return count; 666c222ea9cSLevend Sayar } 667c222ea9cSLevend Sayar 668c222ea9cSLevend Sayar static int 669c222ea9cSLevend Sayar gve_xstats_get(struct rte_eth_dev *dev, 670c222ea9cSLevend Sayar struct rte_eth_xstat *xstats, 671c222ea9cSLevend Sayar unsigned int size) 672c222ea9cSLevend Sayar { 673c222ea9cSLevend Sayar uint16_t i, j, count = gve_xstats_count(dev); 674c222ea9cSLevend Sayar const char *stats; 675c222ea9cSLevend Sayar 676458b53deSRushil Gupta if (gve_is_gqi(dev->data->dev_private)) 677458b53deSRushil Gupta gve_get_imissed_from_nic(dev); 678458b53deSRushil Gupta 679c222ea9cSLevend Sayar if (xstats == NULL || size < count) 680c222ea9cSLevend Sayar return count; 681c222ea9cSLevend Sayar 682c222ea9cSLevend Sayar count = 0; 683c222ea9cSLevend Sayar 684c222ea9cSLevend Sayar for (i = 0; i < dev->data->nb_tx_queues; i++) { 685c222ea9cSLevend Sayar const struct gve_tx_queue *txq = dev->data->tx_queues[i]; 686c222ea9cSLevend Sayar if (txq == NULL) 687c222ea9cSLevend Sayar continue; 688c222ea9cSLevend Sayar 689c222ea9cSLevend Sayar stats = (const char *)&txq->stats; 690c222ea9cSLevend Sayar for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++, count++) { 691c222ea9cSLevend Sayar xstats[count].id = count; 692c222ea9cSLevend Sayar xstats[count].value = *(const uint64_t *) 693c222ea9cSLevend Sayar (stats + tx_xstats_name_offset[j].offset); 694c222ea9cSLevend Sayar } 695c222ea9cSLevend Sayar } 696c222ea9cSLevend Sayar 697c222ea9cSLevend Sayar for (i = 0; i < dev->data->nb_rx_queues; i++) { 698c222ea9cSLevend Sayar const struct gve_rx_queue *rxq = dev->data->rx_queues[i]; 699c222ea9cSLevend Sayar if (rxq == NULL) 700c222ea9cSLevend Sayar continue; 701c222ea9cSLevend Sayar 702c222ea9cSLevend Sayar stats = (const char *)&rxq->stats; 703c222ea9cSLevend Sayar for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++, count++) { 704c222ea9cSLevend Sayar xstats[count].id = count; 705c222ea9cSLevend Sayar xstats[count].value = *(const uint64_t *) 706c222ea9cSLevend Sayar (stats + rx_xstats_name_offset[j].offset); 707c222ea9cSLevend Sayar } 708c222ea9cSLevend Sayar } 709c222ea9cSLevend Sayar 710c222ea9cSLevend Sayar return count; 711c222ea9cSLevend Sayar } 712c222ea9cSLevend Sayar 713c222ea9cSLevend Sayar static int 714c222ea9cSLevend Sayar gve_xstats_get_names(struct rte_eth_dev *dev, 715c222ea9cSLevend Sayar struct rte_eth_xstat_name *xstats_names, 716c222ea9cSLevend Sayar unsigned int size) 717c222ea9cSLevend Sayar { 718c222ea9cSLevend Sayar uint16_t i, j, count = gve_xstats_count(dev); 719c222ea9cSLevend Sayar 720c222ea9cSLevend Sayar if (xstats_names == NULL || size < count) 721c222ea9cSLevend Sayar return count; 722c222ea9cSLevend Sayar 723c222ea9cSLevend Sayar count = 0; 724c222ea9cSLevend Sayar 725c222ea9cSLevend Sayar for (i = 0; i < dev->data->nb_tx_queues; i++) { 726c222ea9cSLevend Sayar if (dev->data->tx_queues[i] == NULL) 727c222ea9cSLevend Sayar continue; 728c222ea9cSLevend Sayar 729c222ea9cSLevend Sayar for (j = 0; j < RTE_DIM(tx_xstats_name_offset); j++) 730c222ea9cSLevend Sayar snprintf(xstats_names[count++].name, 731c222ea9cSLevend Sayar RTE_ETH_XSTATS_NAME_SIZE, 732c222ea9cSLevend Sayar "tx_q%u_%s", i, tx_xstats_name_offset[j].name); 733c222ea9cSLevend Sayar } 734c222ea9cSLevend Sayar 735c222ea9cSLevend Sayar for (i = 0; i < dev->data->nb_rx_queues; i++) { 736c222ea9cSLevend Sayar if (dev->data->rx_queues[i] == NULL) 737c222ea9cSLevend Sayar continue; 738c222ea9cSLevend Sayar 739c222ea9cSLevend Sayar for (j = 0; j < RTE_DIM(rx_xstats_name_offset); j++) 740c222ea9cSLevend Sayar snprintf(xstats_names[count++].name, 741c222ea9cSLevend Sayar RTE_ETH_XSTATS_NAME_SIZE, 742c222ea9cSLevend Sayar "rx_q%u_%s", i, rx_xstats_name_offset[j].name); 743c222ea9cSLevend Sayar } 744c222ea9cSLevend Sayar 745c222ea9cSLevend Sayar return count; 746c222ea9cSLevend Sayar } 747c222ea9cSLevend Sayar 74863ef5456SJoshua Washington 74963ef5456SJoshua Washington static int 75063ef5456SJoshua Washington gve_rss_hash_update(struct rte_eth_dev *dev, 75163ef5456SJoshua Washington struct rte_eth_rss_conf *rss_conf) 75263ef5456SJoshua Washington { 75363ef5456SJoshua Washington struct gve_priv *priv = dev->data->dev_private; 75463ef5456SJoshua Washington struct gve_rss_config gve_rss_conf; 75563ef5456SJoshua Washington int rss_reta_size; 75663ef5456SJoshua Washington int err; 75763ef5456SJoshua Washington 75863ef5456SJoshua Washington if (gve_validate_rss_hf(rss_conf->rss_hf)) { 75963ef5456SJoshua Washington PMD_DRV_LOG(ERR, "Unsupported hash function."); 76063ef5456SJoshua Washington return -EINVAL; 76163ef5456SJoshua Washington } 76263ef5456SJoshua Washington 76363ef5456SJoshua Washington if (rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_TOEPLITZ && 76463ef5456SJoshua Washington rss_conf->algorithm != RTE_ETH_HASH_FUNCTION_DEFAULT) { 76563ef5456SJoshua Washington PMD_DRV_LOG(ERR, "Device only supports Toeplitz algorithm."); 76663ef5456SJoshua Washington return -EINVAL; 76763ef5456SJoshua Washington } 76863ef5456SJoshua Washington 76963ef5456SJoshua Washington if (rss_conf->rss_key_len) { 77063ef5456SJoshua Washington if (rss_conf->rss_key_len != GVE_RSS_HASH_KEY_SIZE) { 77163ef5456SJoshua Washington PMD_DRV_LOG(ERR, 77263ef5456SJoshua Washington "Invalid hash key size. Only RSS hash key size " 77363ef5456SJoshua Washington "of %u supported", GVE_RSS_HASH_KEY_SIZE); 77463ef5456SJoshua Washington return -EINVAL; 77563ef5456SJoshua Washington } 77663ef5456SJoshua Washington 77763ef5456SJoshua Washington if (!rss_conf->rss_key) { 77863ef5456SJoshua Washington PMD_DRV_LOG(ERR, "RSS key must be non-null."); 77963ef5456SJoshua Washington return -EINVAL; 78063ef5456SJoshua Washington } 78163ef5456SJoshua Washington } else { 78263ef5456SJoshua Washington if (!priv->rss_config.key_size) { 78363ef5456SJoshua Washington PMD_DRV_LOG(ERR, "RSS key must be initialized before " 78463ef5456SJoshua Washington "any other configuration."); 78563ef5456SJoshua Washington return -EINVAL; 78663ef5456SJoshua Washington } 78763ef5456SJoshua Washington rss_conf->rss_key_len = priv->rss_config.key_size; 78863ef5456SJoshua Washington } 78963ef5456SJoshua Washington 79063ef5456SJoshua Washington rss_reta_size = priv->rss_config.indir ? 79163ef5456SJoshua Washington priv->rss_config.indir_size : 79263ef5456SJoshua Washington GVE_RSS_INDIR_SIZE; 79363ef5456SJoshua Washington err = gve_init_rss_config(&gve_rss_conf, rss_conf->rss_key_len, 79463ef5456SJoshua Washington rss_reta_size); 79563ef5456SJoshua Washington if (err) 79663ef5456SJoshua Washington return err; 79763ef5456SJoshua Washington 79863ef5456SJoshua Washington gve_rss_conf.alg = GVE_RSS_HASH_TOEPLITZ; 79963ef5456SJoshua Washington err = gve_update_rss_hash_types(priv, &gve_rss_conf, rss_conf); 80063ef5456SJoshua Washington if (err) 80163ef5456SJoshua Washington goto err; 80263ef5456SJoshua Washington err = gve_update_rss_key(priv, &gve_rss_conf, rss_conf); 80363ef5456SJoshua Washington if (err) 80463ef5456SJoshua Washington goto err; 80563ef5456SJoshua Washington 80663ef5456SJoshua Washington /* Set redirection table to default or preexisting. */ 80763ef5456SJoshua Washington if (!priv->rss_config.indir) 80863ef5456SJoshua Washington gve_generate_rss_reta(dev, &gve_rss_conf); 80963ef5456SJoshua Washington else 81063ef5456SJoshua Washington memcpy(gve_rss_conf.indir, priv->rss_config.indir, 81163ef5456SJoshua Washington gve_rss_conf.indir_size * sizeof(*priv->rss_config.indir)); 81263ef5456SJoshua Washington 81363ef5456SJoshua Washington err = gve_adminq_configure_rss(priv, &gve_rss_conf); 81463ef5456SJoshua Washington if (!err) 81563ef5456SJoshua Washington gve_update_priv_rss_config(priv, &gve_rss_conf); 81663ef5456SJoshua Washington 81763ef5456SJoshua Washington err: 81863ef5456SJoshua Washington gve_free_rss_config(&gve_rss_conf); 81963ef5456SJoshua Washington return err; 82063ef5456SJoshua Washington } 82163ef5456SJoshua Washington 82263ef5456SJoshua Washington static int 82363ef5456SJoshua Washington gve_rss_hash_conf_get(struct rte_eth_dev *dev, 82463ef5456SJoshua Washington struct rte_eth_rss_conf *rss_conf) 82563ef5456SJoshua Washington { 82663ef5456SJoshua Washington struct gve_priv *priv = dev->data->dev_private; 82763ef5456SJoshua Washington 82863ef5456SJoshua Washington if (!(dev->data->dev_conf.rxmode.offloads & 82963ef5456SJoshua Washington RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 83063ef5456SJoshua Washington PMD_DRV_LOG(ERR, "RSS not configured."); 83163ef5456SJoshua Washington return -ENOTSUP; 83263ef5456SJoshua Washington } 83363ef5456SJoshua Washington 83463ef5456SJoshua Washington 83563ef5456SJoshua Washington gve_to_rte_rss_hf(priv->rss_config.hash_types, rss_conf); 83663ef5456SJoshua Washington rss_conf->rss_key_len = priv->rss_config.key_size; 83763ef5456SJoshua Washington if (rss_conf->rss_key) { 83863ef5456SJoshua Washington if (!priv->rss_config.key) { 83963ef5456SJoshua Washington PMD_DRV_LOG(ERR, "Unable to retrieve default RSS hash key."); 84063ef5456SJoshua Washington return -ENOTSUP; 84163ef5456SJoshua Washington } 84263ef5456SJoshua Washington memcpy(rss_conf->rss_key, priv->rss_config.key, 84363ef5456SJoshua Washington rss_conf->rss_key_len * sizeof(*rss_conf->rss_key)); 84463ef5456SJoshua Washington } 84563ef5456SJoshua Washington 84663ef5456SJoshua Washington return 0; 84763ef5456SJoshua Washington } 84863ef5456SJoshua Washington 8490d4ea0b3SJoshua Washington static int 8500d4ea0b3SJoshua Washington gve_rss_reta_update(struct rte_eth_dev *dev, 8510d4ea0b3SJoshua Washington struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) 8520d4ea0b3SJoshua Washington { 8530d4ea0b3SJoshua Washington struct gve_priv *priv = dev->data->dev_private; 8540d4ea0b3SJoshua Washington struct gve_rss_config gve_rss_conf; 8550d4ea0b3SJoshua Washington int table_id; 8560d4ea0b3SJoshua Washington int err; 8570d4ea0b3SJoshua Washington int i; 8580d4ea0b3SJoshua Washington 8590d4ea0b3SJoshua Washington /* RSS key must be set before the redirection table can be set. */ 8600d4ea0b3SJoshua Washington if (!priv->rss_config.key || priv->rss_config.key_size == 0) { 8610d4ea0b3SJoshua Washington PMD_DRV_LOG(ERR, "RSS hash key msut be set before the " 8620d4ea0b3SJoshua Washington "redirection table can be updated."); 8630d4ea0b3SJoshua Washington return -ENOTSUP; 8640d4ea0b3SJoshua Washington } 8650d4ea0b3SJoshua Washington 8660d4ea0b3SJoshua Washington if (reta_size != GVE_RSS_INDIR_SIZE) { 8670d4ea0b3SJoshua Washington PMD_DRV_LOG(ERR, "Redirection table must have %hu elements", 8680d4ea0b3SJoshua Washington (uint16_t)GVE_RSS_INDIR_SIZE); 8690d4ea0b3SJoshua Washington return -EINVAL; 8700d4ea0b3SJoshua Washington } 8710d4ea0b3SJoshua Washington 8720d4ea0b3SJoshua Washington err = gve_init_rss_config_from_priv(priv, &gve_rss_conf); 8730d4ea0b3SJoshua Washington if (err) { 8740d4ea0b3SJoshua Washington PMD_DRV_LOG(ERR, "Error allocating new RSS config."); 8750d4ea0b3SJoshua Washington return err; 8760d4ea0b3SJoshua Washington } 8770d4ea0b3SJoshua Washington 8780d4ea0b3SJoshua Washington table_id = 0; 8790d4ea0b3SJoshua Washington for (i = 0; i < priv->rss_config.indir_size; i++) { 8800d4ea0b3SJoshua Washington int table_entry = i % RTE_ETH_RETA_GROUP_SIZE; 8810d4ea0b3SJoshua Washington if (reta_conf[table_id].mask & (1ULL << table_entry)) 8820d4ea0b3SJoshua Washington gve_rss_conf.indir[i] = 8830d4ea0b3SJoshua Washington reta_conf[table_id].reta[table_entry]; 8840d4ea0b3SJoshua Washington 8850d4ea0b3SJoshua Washington if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1) 8860d4ea0b3SJoshua Washington table_id++; 8870d4ea0b3SJoshua Washington } 8880d4ea0b3SJoshua Washington 8890d4ea0b3SJoshua Washington err = gve_adminq_configure_rss(priv, &gve_rss_conf); 8900d4ea0b3SJoshua Washington if (err) 8910d4ea0b3SJoshua Washington PMD_DRV_LOG(ERR, "Problem configuring RSS with device."); 8920d4ea0b3SJoshua Washington else 8930d4ea0b3SJoshua Washington gve_update_priv_rss_config(priv, &gve_rss_conf); 8940d4ea0b3SJoshua Washington 8950d4ea0b3SJoshua Washington gve_free_rss_config(&gve_rss_conf); 8960d4ea0b3SJoshua Washington return err; 8970d4ea0b3SJoshua Washington } 8980d4ea0b3SJoshua Washington 8990d4ea0b3SJoshua Washington static int 9000d4ea0b3SJoshua Washington gve_rss_reta_query(struct rte_eth_dev *dev, 9010d4ea0b3SJoshua Washington struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) 9020d4ea0b3SJoshua Washington { 9030d4ea0b3SJoshua Washington struct gve_priv *priv = dev->data->dev_private; 9040d4ea0b3SJoshua Washington int table_id; 9050d4ea0b3SJoshua Washington int i; 9060d4ea0b3SJoshua Washington 9070d4ea0b3SJoshua Washington if (!(dev->data->dev_conf.rxmode.offloads & 9080d4ea0b3SJoshua Washington RTE_ETH_RX_OFFLOAD_RSS_HASH)) { 9090d4ea0b3SJoshua Washington PMD_DRV_LOG(ERR, "RSS not configured."); 9100d4ea0b3SJoshua Washington return -ENOTSUP; 9110d4ea0b3SJoshua Washington } 9120d4ea0b3SJoshua Washington 9130d4ea0b3SJoshua Washington /* RSS key must be set before the redirection table can be queried. */ 9140d4ea0b3SJoshua Washington if (!priv->rss_config.key) { 9150d4ea0b3SJoshua Washington PMD_DRV_LOG(ERR, "RSS hash key must be set before the " 9160d4ea0b3SJoshua Washington "redirection table can be initialized."); 9170d4ea0b3SJoshua Washington return -ENOTSUP; 9180d4ea0b3SJoshua Washington } 9190d4ea0b3SJoshua Washington 9200d4ea0b3SJoshua Washington if (reta_size != priv->rss_config.indir_size) { 9210d4ea0b3SJoshua Washington PMD_DRV_LOG(ERR, "RSS redirection table must have %d entries.", 9220d4ea0b3SJoshua Washington priv->rss_config.indir_size); 9230d4ea0b3SJoshua Washington return -EINVAL; 9240d4ea0b3SJoshua Washington } 9250d4ea0b3SJoshua Washington 9260d4ea0b3SJoshua Washington table_id = 0; 9270d4ea0b3SJoshua Washington for (i = 0; i < priv->rss_config.indir_size; i++) { 9280d4ea0b3SJoshua Washington int table_entry = i % RTE_ETH_RETA_GROUP_SIZE; 9290d4ea0b3SJoshua Washington if (reta_conf[table_id].mask & (1ULL << table_entry)) 9300d4ea0b3SJoshua Washington reta_conf[table_id].reta[table_entry] = 9310d4ea0b3SJoshua Washington priv->rss_config.indir[i]; 9320d4ea0b3SJoshua Washington 9330d4ea0b3SJoshua Washington if (table_entry == RTE_ETH_RETA_GROUP_SIZE - 1) 9340d4ea0b3SJoshua Washington table_id++; 9350d4ea0b3SJoshua Washington } 9360d4ea0b3SJoshua Washington 9370d4ea0b3SJoshua Washington return 0; 9380d4ea0b3SJoshua Washington } 9390d4ea0b3SJoshua Washington 940457967cdSJunfeng Guo static const struct eth_dev_ops gve_eth_dev_ops = { 941457967cdSJunfeng Guo .dev_configure = gve_dev_configure, 942457967cdSJunfeng Guo .dev_start = gve_dev_start, 943457967cdSJunfeng Guo .dev_stop = gve_dev_stop, 944457967cdSJunfeng Guo .dev_close = gve_dev_close, 94571dea04cSJunfeng Guo .dev_infos_get = gve_dev_info_get, 9464bec2d0bSJunfeng Guo .rx_queue_setup = gve_rx_queue_setup, 9474bec2d0bSJunfeng Guo .tx_queue_setup = gve_tx_queue_setup, 94810d9e91aSJunfeng Guo .rx_queue_release = gve_rx_queue_release, 94910d9e91aSJunfeng Guo .tx_queue_release = gve_tx_queue_release, 950b044845bSJunfeng Guo .rx_queue_start = gve_rx_queue_start, 951b044845bSJunfeng Guo .tx_queue_start = gve_tx_queue_start, 952b044845bSJunfeng Guo .rx_queue_stop = gve_rx_queue_stop, 953b044845bSJunfeng Guo .tx_queue_stop = gve_tx_queue_stop, 954440f551dSJunfeng Guo .link_update = gve_link_update, 9554f6b1dd8SJunfeng Guo .stats_get = gve_dev_stats_get, 9564f6b1dd8SJunfeng Guo .stats_reset = gve_dev_stats_reset, 957f19c864eSJunfeng Guo .mtu_set = gve_dev_mtu_set, 958c222ea9cSLevend Sayar .xstats_get = gve_xstats_get, 959c222ea9cSLevend Sayar .xstats_get_names = gve_xstats_get_names, 96063ef5456SJoshua Washington .rss_hash_update = gve_rss_hash_update, 96163ef5456SJoshua Washington .rss_hash_conf_get = gve_rss_hash_conf_get, 9620d4ea0b3SJoshua Washington .reta_update = gve_rss_reta_update, 9630d4ea0b3SJoshua Washington .reta_query = gve_rss_reta_query, 964457967cdSJunfeng Guo }; 965457967cdSJunfeng Guo 966a14d391cSJunfeng Guo static const struct eth_dev_ops gve_eth_dev_ops_dqo = { 967a14d391cSJunfeng Guo .dev_configure = gve_dev_configure, 968a14d391cSJunfeng Guo .dev_start = gve_dev_start, 969a14d391cSJunfeng Guo .dev_stop = gve_dev_stop, 970a14d391cSJunfeng Guo .dev_close = gve_dev_close, 971a14d391cSJunfeng Guo .dev_infos_get = gve_dev_info_get, 9721dc00f4fSJunfeng Guo .rx_queue_setup = gve_rx_queue_setup_dqo, 973a14d391cSJunfeng Guo .tx_queue_setup = gve_tx_queue_setup_dqo, 9741e27182eSJunfeng Guo .rx_queue_release = gve_rx_queue_release_dqo, 9751e27182eSJunfeng Guo .tx_queue_release = gve_tx_queue_release_dqo, 976b044845bSJunfeng Guo .rx_queue_start = gve_rx_queue_start_dqo, 977b044845bSJunfeng Guo .tx_queue_start = gve_tx_queue_start_dqo, 978b044845bSJunfeng Guo .rx_queue_stop = gve_rx_queue_stop_dqo, 979b044845bSJunfeng Guo .tx_queue_stop = gve_tx_queue_stop_dqo, 980a14d391cSJunfeng Guo .link_update = gve_link_update, 981a14d391cSJunfeng Guo .stats_get = gve_dev_stats_get, 982a14d391cSJunfeng Guo .stats_reset = gve_dev_stats_reset, 983a14d391cSJunfeng Guo .mtu_set = gve_dev_mtu_set, 984a14d391cSJunfeng Guo .xstats_get = gve_xstats_get, 985a14d391cSJunfeng Guo .xstats_get_names = gve_xstats_get_names, 98663ef5456SJoshua Washington .rss_hash_update = gve_rss_hash_update, 98763ef5456SJoshua Washington .rss_hash_conf_get = gve_rss_hash_conf_get, 9880d4ea0b3SJoshua Washington .reta_update = gve_rss_reta_update, 9890d4ea0b3SJoshua Washington .reta_query = gve_rss_reta_query, 990a14d391cSJunfeng Guo }; 991a14d391cSJunfeng Guo 992457967cdSJunfeng Guo static void 993457967cdSJunfeng Guo gve_free_counter_array(struct gve_priv *priv) 994457967cdSJunfeng Guo { 995457967cdSJunfeng Guo rte_memzone_free(priv->cnt_array_mz); 996457967cdSJunfeng Guo priv->cnt_array = NULL; 997457967cdSJunfeng Guo } 998457967cdSJunfeng Guo 999457967cdSJunfeng Guo static void 1000457967cdSJunfeng Guo gve_free_irq_db(struct gve_priv *priv) 1001457967cdSJunfeng Guo { 1002457967cdSJunfeng Guo rte_memzone_free(priv->irq_dbs_mz); 1003457967cdSJunfeng Guo priv->irq_dbs = NULL; 1004457967cdSJunfeng Guo } 1005457967cdSJunfeng Guo 1006457967cdSJunfeng Guo static void 1007457967cdSJunfeng Guo gve_teardown_device_resources(struct gve_priv *priv) 1008457967cdSJunfeng Guo { 1009457967cdSJunfeng Guo int err; 1010457967cdSJunfeng Guo 1011457967cdSJunfeng Guo /* Tell device its resources are being freed */ 1012457967cdSJunfeng Guo if (gve_get_device_resources_ok(priv)) { 1013457967cdSJunfeng Guo err = gve_adminq_deconfigure_device_resources(priv); 1014457967cdSJunfeng Guo if (err) 1015457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Could not deconfigure device resources: err=%d", err); 1016457967cdSJunfeng Guo } 1017f2a9e162SRushil Gupta 1018f2a9e162SRushil Gupta if (!gve_is_gqi(priv)) { 1019f2a9e162SRushil Gupta rte_free(priv->ptype_lut_dqo); 1020f2a9e162SRushil Gupta priv->ptype_lut_dqo = NULL; 1021f2a9e162SRushil Gupta } 1022457967cdSJunfeng Guo gve_free_counter_array(priv); 1023457967cdSJunfeng Guo gve_free_irq_db(priv); 1024457967cdSJunfeng Guo gve_clear_device_resources_ok(priv); 1025457967cdSJunfeng Guo } 1026457967cdSJunfeng Guo 1027457967cdSJunfeng Guo static int 1028457967cdSJunfeng Guo pci_dev_msix_vec_count(struct rte_pci_device *pdev) 1029457967cdSJunfeng Guo { 1030baa9c550SDavid Marchand off_t msix_pos = rte_pci_find_capability(pdev, RTE_PCI_CAP_ID_MSIX); 1031457967cdSJunfeng Guo uint16_t control; 1032457967cdSJunfeng Guo 1033a10b6e53SDavid Marchand if (msix_pos > 0 && rte_pci_read_config(pdev, &control, sizeof(control), 10347bb1168dSDavid Marchand msix_pos + RTE_PCI_MSIX_FLAGS) == sizeof(control)) 10357bb1168dSDavid Marchand return (control & RTE_PCI_MSIX_FLAGS_QSIZE) + 1; 1036a10b6e53SDavid Marchand 1037a10b6e53SDavid Marchand return 0; 1038457967cdSJunfeng Guo } 1039457967cdSJunfeng Guo 1040457967cdSJunfeng Guo static int 1041457967cdSJunfeng Guo gve_setup_device_resources(struct gve_priv *priv) 1042457967cdSJunfeng Guo { 1043457967cdSJunfeng Guo char z_name[RTE_MEMZONE_NAMESIZE]; 1044457967cdSJunfeng Guo const struct rte_memzone *mz; 1045457967cdSJunfeng Guo int err = 0; 1046457967cdSJunfeng Guo 1047457967cdSJunfeng Guo snprintf(z_name, sizeof(z_name), "gve_%s_cnt_arr", priv->pci_dev->device.name); 1048457967cdSJunfeng Guo mz = rte_memzone_reserve_aligned(z_name, 1049457967cdSJunfeng Guo priv->num_event_counters * sizeof(*priv->cnt_array), 1050457967cdSJunfeng Guo rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG, 1051457967cdSJunfeng Guo PAGE_SIZE); 1052457967cdSJunfeng Guo if (mz == NULL) { 1053457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Could not alloc memzone for count array"); 1054457967cdSJunfeng Guo return -ENOMEM; 1055457967cdSJunfeng Guo } 1056457967cdSJunfeng Guo priv->cnt_array = (rte_be32_t *)mz->addr; 1057457967cdSJunfeng Guo priv->cnt_array_mz = mz; 1058457967cdSJunfeng Guo 1059457967cdSJunfeng Guo snprintf(z_name, sizeof(z_name), "gve_%s_irqmz", priv->pci_dev->device.name); 1060457967cdSJunfeng Guo mz = rte_memzone_reserve_aligned(z_name, 1061457967cdSJunfeng Guo sizeof(*priv->irq_dbs) * (priv->num_ntfy_blks), 1062457967cdSJunfeng Guo rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG, 1063457967cdSJunfeng Guo PAGE_SIZE); 1064457967cdSJunfeng Guo if (mz == NULL) { 1065457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Could not alloc memzone for irq_dbs"); 1066457967cdSJunfeng Guo err = -ENOMEM; 1067457967cdSJunfeng Guo goto free_cnt_array; 1068457967cdSJunfeng Guo } 1069457967cdSJunfeng Guo priv->irq_dbs = (struct gve_irq_db *)mz->addr; 1070457967cdSJunfeng Guo priv->irq_dbs_mz = mz; 1071457967cdSJunfeng Guo 1072457967cdSJunfeng Guo err = gve_adminq_configure_device_resources(priv, 1073457967cdSJunfeng Guo priv->cnt_array_mz->iova, 1074457967cdSJunfeng Guo priv->num_event_counters, 1075457967cdSJunfeng Guo priv->irq_dbs_mz->iova, 1076457967cdSJunfeng Guo priv->num_ntfy_blks); 1077457967cdSJunfeng Guo if (unlikely(err)) { 1078457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Could not config device resources: err=%d", err); 1079457967cdSJunfeng Guo goto free_irq_dbs; 1080457967cdSJunfeng Guo } 1081f2a9e162SRushil Gupta if (!gve_is_gqi(priv)) { 1082f2a9e162SRushil Gupta priv->ptype_lut_dqo = rte_zmalloc("gve_ptype_lut_dqo", 1083f2a9e162SRushil Gupta sizeof(struct gve_ptype_lut), 0); 1084f2a9e162SRushil Gupta if (priv->ptype_lut_dqo == NULL) { 1085f2a9e162SRushil Gupta PMD_DRV_LOG(ERR, "Failed to alloc ptype lut."); 1086f2a9e162SRushil Gupta err = -ENOMEM; 1087f2a9e162SRushil Gupta goto free_irq_dbs; 1088f2a9e162SRushil Gupta } 1089f2a9e162SRushil Gupta err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo); 1090f2a9e162SRushil Gupta if (unlikely(err)) { 1091f2a9e162SRushil Gupta PMD_DRV_LOG(ERR, "Failed to get ptype map: err=%d", err); 1092f2a9e162SRushil Gupta goto free_ptype_lut; 1093f2a9e162SRushil Gupta } 1094f2a9e162SRushil Gupta } 1095457967cdSJunfeng Guo 1096f2a9e162SRushil Gupta return 0; 1097f2a9e162SRushil Gupta free_ptype_lut: 1098f2a9e162SRushil Gupta rte_free(priv->ptype_lut_dqo); 1099f2a9e162SRushil Gupta priv->ptype_lut_dqo = NULL; 1100457967cdSJunfeng Guo free_irq_dbs: 1101457967cdSJunfeng Guo gve_free_irq_db(priv); 1102457967cdSJunfeng Guo free_cnt_array: 1103457967cdSJunfeng Guo gve_free_counter_array(priv); 1104457967cdSJunfeng Guo 1105457967cdSJunfeng Guo return err; 1106457967cdSJunfeng Guo } 1107457967cdSJunfeng Guo 1108eb8ec5c3SJoshua Washington static void 1109eb8ec5c3SJoshua Washington gve_set_default_ring_size_bounds(struct gve_priv *priv) 1110eb8ec5c3SJoshua Washington { 1111eb8ec5c3SJoshua Washington priv->max_tx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE; 1112eb8ec5c3SJoshua Washington priv->max_rx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE; 1113eb8ec5c3SJoshua Washington priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE; 1114eb8ec5c3SJoshua Washington priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE; 1115eb8ec5c3SJoshua Washington } 1116eb8ec5c3SJoshua Washington 1117457967cdSJunfeng Guo static int 1118457967cdSJunfeng Guo gve_init_priv(struct gve_priv *priv, bool skip_describe_device) 1119457967cdSJunfeng Guo { 1120457967cdSJunfeng Guo int num_ntfy; 1121457967cdSJunfeng Guo int err; 1122457967cdSJunfeng Guo 1123457967cdSJunfeng Guo /* Set up the adminq */ 1124457967cdSJunfeng Guo err = gve_adminq_alloc(priv); 1125457967cdSJunfeng Guo if (err) { 1126457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Failed to alloc admin queue: err=%d", err); 1127457967cdSJunfeng Guo return err; 1128457967cdSJunfeng Guo } 1129748d0e7fSRushil Gupta err = gve_verify_driver_compatibility(priv); 1130748d0e7fSRushil Gupta if (err) { 1131748d0e7fSRushil Gupta PMD_DRV_LOG(ERR, "Could not verify driver compatibility: err=%d", err); 1132748d0e7fSRushil Gupta goto free_adminq; 1133748d0e7fSRushil Gupta } 1134457967cdSJunfeng Guo 1135eb8ec5c3SJoshua Washington /* Set default descriptor counts */ 1136eb8ec5c3SJoshua Washington gve_set_default_ring_size_bounds(priv); 1137eb8ec5c3SJoshua Washington 1138457967cdSJunfeng Guo if (skip_describe_device) 1139457967cdSJunfeng Guo goto setup_device; 1140457967cdSJunfeng Guo 1141457967cdSJunfeng Guo /* Get the initial information we need from the device */ 1142457967cdSJunfeng Guo err = gve_adminq_describe_device(priv); 1143457967cdSJunfeng Guo if (err) { 1144457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Could not get device information: err=%d", err); 1145457967cdSJunfeng Guo goto free_adminq; 1146457967cdSJunfeng Guo } 1147457967cdSJunfeng Guo 1148457967cdSJunfeng Guo num_ntfy = pci_dev_msix_vec_count(priv->pci_dev); 1149457967cdSJunfeng Guo if (num_ntfy <= 0) { 1150457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Could not count MSI-x vectors"); 1151457967cdSJunfeng Guo err = -EIO; 1152457967cdSJunfeng Guo goto free_adminq; 1153457967cdSJunfeng Guo } else if (num_ntfy < GVE_MIN_MSIX) { 1154457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "GVE needs at least %d MSI-x vectors, but only has %d", 1155457967cdSJunfeng Guo GVE_MIN_MSIX, num_ntfy); 1156457967cdSJunfeng Guo err = -EINVAL; 1157457967cdSJunfeng Guo goto free_adminq; 1158457967cdSJunfeng Guo } 1159457967cdSJunfeng Guo 1160457967cdSJunfeng Guo priv->num_registered_pages = 0; 1161457967cdSJunfeng Guo 1162457967cdSJunfeng Guo /* gvnic has one Notification Block per MSI-x vector, except for the 1163457967cdSJunfeng Guo * management vector 1164457967cdSJunfeng Guo */ 1165457967cdSJunfeng Guo priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; 1166457967cdSJunfeng Guo priv->mgmt_msix_idx = priv->num_ntfy_blks; 1167457967cdSJunfeng Guo 1168457967cdSJunfeng Guo priv->max_nb_txq = RTE_MIN(priv->max_nb_txq, priv->num_ntfy_blks / 2); 1169457967cdSJunfeng Guo priv->max_nb_rxq = RTE_MIN(priv->max_nb_rxq, priv->num_ntfy_blks / 2); 1170457967cdSJunfeng Guo 1171457967cdSJunfeng Guo if (priv->default_num_queues > 0) { 1172457967cdSJunfeng Guo priv->max_nb_txq = RTE_MIN(priv->default_num_queues, priv->max_nb_txq); 1173457967cdSJunfeng Guo priv->max_nb_rxq = RTE_MIN(priv->default_num_queues, priv->max_nb_rxq); 1174457967cdSJunfeng Guo } 1175457967cdSJunfeng Guo 1176457967cdSJunfeng Guo PMD_DRV_LOG(INFO, "Max TX queues %d, Max RX queues %d", 1177457967cdSJunfeng Guo priv->max_nb_txq, priv->max_nb_rxq); 1178457967cdSJunfeng Guo 1179457967cdSJunfeng Guo setup_device: 1180457967cdSJunfeng Guo err = gve_setup_device_resources(priv); 1181457967cdSJunfeng Guo if (!err) 1182457967cdSJunfeng Guo return 0; 1183457967cdSJunfeng Guo free_adminq: 1184457967cdSJunfeng Guo gve_adminq_free(priv); 1185457967cdSJunfeng Guo return err; 1186457967cdSJunfeng Guo } 1187457967cdSJunfeng Guo 1188457967cdSJunfeng Guo static void 1189457967cdSJunfeng Guo gve_teardown_priv_resources(struct gve_priv *priv) 1190457967cdSJunfeng Guo { 1191457967cdSJunfeng Guo gve_teardown_device_resources(priv); 1192457967cdSJunfeng Guo gve_adminq_free(priv); 1193457967cdSJunfeng Guo } 1194457967cdSJunfeng Guo 1195457967cdSJunfeng Guo static int 1196457967cdSJunfeng Guo gve_dev_init(struct rte_eth_dev *eth_dev) 1197457967cdSJunfeng Guo { 1198457967cdSJunfeng Guo struct gve_priv *priv = eth_dev->data->dev_private; 1199457967cdSJunfeng Guo int max_tx_queues, max_rx_queues; 1200457967cdSJunfeng Guo struct rte_pci_device *pci_dev; 1201457967cdSJunfeng Guo struct gve_registers *reg_bar; 1202457967cdSJunfeng Guo rte_be32_t *db_bar; 1203457967cdSJunfeng Guo int err; 1204457967cdSJunfeng Guo 1205d67bce4bSTathagat Priyadarshi if (rte_eal_process_type() != RTE_PROC_PRIMARY) { 1206d67bce4bSTathagat Priyadarshi if (gve_is_gqi(priv)) { 1207d67bce4bSTathagat Priyadarshi gve_set_rx_function(eth_dev); 1208d67bce4bSTathagat Priyadarshi gve_set_tx_function(eth_dev); 1209d67bce4bSTathagat Priyadarshi eth_dev->dev_ops = &gve_eth_dev_ops; 1210d67bce4bSTathagat Priyadarshi } else { 1211d67bce4bSTathagat Priyadarshi gve_set_rx_function_dqo(eth_dev); 1212d67bce4bSTathagat Priyadarshi gve_set_tx_function_dqo(eth_dev); 1213d67bce4bSTathagat Priyadarshi eth_dev->dev_ops = &gve_eth_dev_ops_dqo; 1214d67bce4bSTathagat Priyadarshi } 1215457967cdSJunfeng Guo return 0; 1216d67bce4bSTathagat Priyadarshi } 1217457967cdSJunfeng Guo 1218457967cdSJunfeng Guo pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 1219457967cdSJunfeng Guo 1220457967cdSJunfeng Guo reg_bar = pci_dev->mem_resource[GVE_REG_BAR].addr; 1221457967cdSJunfeng Guo if (!reg_bar) { 1222457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Failed to map pci bar!"); 1223457967cdSJunfeng Guo return -ENOMEM; 1224457967cdSJunfeng Guo } 1225457967cdSJunfeng Guo 1226457967cdSJunfeng Guo db_bar = pci_dev->mem_resource[GVE_DB_BAR].addr; 1227457967cdSJunfeng Guo if (!db_bar) { 1228457967cdSJunfeng Guo PMD_DRV_LOG(ERR, "Failed to map doorbell bar!"); 1229457967cdSJunfeng Guo return -ENOMEM; 1230457967cdSJunfeng Guo } 1231457967cdSJunfeng Guo 1232457967cdSJunfeng Guo gve_write_version(®_bar->driver_version); 1233457967cdSJunfeng Guo /* Get max queues to alloc etherdev */ 1234457967cdSJunfeng Guo max_tx_queues = ioread32be(®_bar->max_tx_queues); 1235457967cdSJunfeng Guo max_rx_queues = ioread32be(®_bar->max_rx_queues); 1236457967cdSJunfeng Guo 1237457967cdSJunfeng Guo priv->reg_bar0 = reg_bar; 1238457967cdSJunfeng Guo priv->db_bar2 = db_bar; 1239457967cdSJunfeng Guo priv->pci_dev = pci_dev; 1240457967cdSJunfeng Guo priv->state_flags = 0x0; 1241457967cdSJunfeng Guo 1242457967cdSJunfeng Guo priv->max_nb_txq = max_tx_queues; 1243457967cdSJunfeng Guo priv->max_nb_rxq = max_rx_queues; 1244457967cdSJunfeng Guo 1245457967cdSJunfeng Guo err = gve_init_priv(priv, false); 1246457967cdSJunfeng Guo if (err) 1247457967cdSJunfeng Guo return err; 1248457967cdSJunfeng Guo 1249a46583cfSJunfeng Guo if (gve_is_gqi(priv)) { 1250a14d391cSJunfeng Guo eth_dev->dev_ops = &gve_eth_dev_ops; 1251b044845bSJunfeng Guo gve_set_rx_function(eth_dev); 1252b044845bSJunfeng Guo gve_set_tx_function(eth_dev); 1253a46583cfSJunfeng Guo } else { 1254a14d391cSJunfeng Guo eth_dev->dev_ops = &gve_eth_dev_ops_dqo; 1255b044845bSJunfeng Guo gve_set_rx_function_dqo(eth_dev); 1256b044845bSJunfeng Guo gve_set_tx_function_dqo(eth_dev); 1257a46583cfSJunfeng Guo } 1258a46583cfSJunfeng Guo 1259457967cdSJunfeng Guo eth_dev->data->mac_addrs = &priv->dev_addr; 1260457967cdSJunfeng Guo 1261457967cdSJunfeng Guo return 0; 1262457967cdSJunfeng Guo } 1263457967cdSJunfeng Guo 1264457967cdSJunfeng Guo static int 1265457967cdSJunfeng Guo gve_dev_uninit(struct rte_eth_dev *eth_dev) 1266457967cdSJunfeng Guo { 1267457967cdSJunfeng Guo struct gve_priv *priv = eth_dev->data->dev_private; 1268457967cdSJunfeng Guo 1269457967cdSJunfeng Guo gve_teardown_priv_resources(priv); 1270457967cdSJunfeng Guo 1271457967cdSJunfeng Guo eth_dev->data->mac_addrs = NULL; 1272457967cdSJunfeng Guo 1273457967cdSJunfeng Guo return 0; 1274457967cdSJunfeng Guo } 1275457967cdSJunfeng Guo 1276457967cdSJunfeng Guo static int 1277457967cdSJunfeng Guo gve_pci_probe(__rte_unused struct rte_pci_driver *pci_drv, 1278457967cdSJunfeng Guo struct rte_pci_device *pci_dev) 1279457967cdSJunfeng Guo { 1280457967cdSJunfeng Guo return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct gve_priv), gve_dev_init); 1281457967cdSJunfeng Guo } 1282457967cdSJunfeng Guo 1283457967cdSJunfeng Guo static int 1284457967cdSJunfeng Guo gve_pci_remove(struct rte_pci_device *pci_dev) 1285457967cdSJunfeng Guo { 1286457967cdSJunfeng Guo return rte_eth_dev_pci_generic_remove(pci_dev, gve_dev_uninit); 1287457967cdSJunfeng Guo } 1288457967cdSJunfeng Guo 1289457967cdSJunfeng Guo static const struct rte_pci_id pci_id_gve_map[] = { 1290457967cdSJunfeng Guo { RTE_PCI_DEVICE(GOOGLE_VENDOR_ID, GVE_DEV_ID) }, 1291457967cdSJunfeng Guo { .device_id = 0 }, 1292457967cdSJunfeng Guo }; 1293457967cdSJunfeng Guo 1294457967cdSJunfeng Guo static struct rte_pci_driver rte_gve_pmd = { 1295457967cdSJunfeng Guo .id_table = pci_id_gve_map, 1296457967cdSJunfeng Guo .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 1297457967cdSJunfeng Guo .probe = gve_pci_probe, 1298457967cdSJunfeng Guo .remove = gve_pci_remove, 1299457967cdSJunfeng Guo }; 1300457967cdSJunfeng Guo 1301457967cdSJunfeng Guo RTE_PMD_REGISTER_PCI(net_gve, rte_gve_pmd); 1302457967cdSJunfeng Guo RTE_PMD_REGISTER_PCI_TABLE(net_gve, pci_id_gve_map); 1303457967cdSJunfeng Guo RTE_PMD_REGISTER_KMOD_DEP(net_gve, "* igb_uio | vfio-pci"); 1304457967cdSJunfeng Guo RTE_LOG_REGISTER_SUFFIX(gve_logtype_driver, driver, NOTICE); 1305